diff --git a/.github/actions/cleanup_runner/action.yml b/.github/actions/cleanup_runner/action.yml new file mode 100644 index 000000000000..3d251fdd7a45 --- /dev/null +++ b/.github/actions/cleanup_runner/action.yml @@ -0,0 +1,21 @@ +name: 'Cleanup Runner' +runs: + using: "composite" + steps: + - name: Before + shell: bash + run: df -h + + - name: Free disk space + uses: endersonmenezes/free-disk-space@v3 + with: + rm_cmd: "rmz" # apparently faster + remove_android: true + remove_dotnet: true + remove_haskell: true + remove_tool_cache: true + remove_folders: /opt/az /opt/ghc /opt/google /opt/hostedtoolcache /opt/microsoft /usr/lib/firefox /usr/lib/jvm /usr/local/.ghcup /usr/local/julia* /usr/local/lib/android /usr/local/share/boost /usr/local/share/chromium /usr/local/share/powershell /usr/share/dotnet /usr/share/swift /var/lib/apt/lists/* + + - name: After + shell: bash + run: df -h diff --git a/.github/config/extensions/avro.cmake b/.github/config/extensions/avro.cmake index 60e34c756b79..be7ad5a46d9d 100644 --- a/.github/config/extensions/avro.cmake +++ b/.github/config/extensions/avro.cmake @@ -2,6 +2,6 @@ if (NOT MINGW) duckdb_extension_load(avro LOAD_TESTS DONT_LINK GIT_URL https://github.com/duckdb/duckdb-avro - GIT_TAG 7b75062f6345d11c5342c09216a75c57342c2e82 + GIT_TAG 93da8a19b41eb577add83d0552c6946a16e97c83 ) endif() diff --git a/.github/config/extensions/aws.cmake b/.github/config/extensions/aws.cmake index ee0547d8089e..de112cafd77a 100644 --- a/.github/config/extensions/aws.cmake +++ b/.github/config/extensions/aws.cmake @@ -2,6 +2,6 @@ if (NOT MINGW AND NOT ${WASM_ENABLED}) duckdb_extension_load(aws ### TODO: re-enable LOAD_TESTS GIT_URL https://github.com/duckdb/duckdb-aws - GIT_TAG 18803d5e55b9f9f6dda5047d0fdb4f4238b6801d + GIT_TAG 55bf3621fb7db254b473c94ce6360643ca38fac0 ) endif() diff --git a/.github/config/extensions/azure.cmake b/.github/config/extensions/azure.cmake index fe6dcfd19914..612c68143fef 100644 --- a/.github/config/extensions/azure.cmake +++ b/.github/config/extensions/azure.cmake @@ -1,7 +1,7 @@ if (NOT MINGW AND NOT ${WASM_ENABLED}) - duckdb_extension_load(azure - LOAD_TESTS - GIT_URL https://github.com/duckdb/duckdb-azure - GIT_TAG 0709c0fa1cf67a668b58b1f06ff3e5fc1696e10a - ) + duckdb_extension_load(azure + LOAD_TESTS + GIT_URL https://github.com/duckdb/duckdb-azure + GIT_TAG f4d55b658c14d3de40bac9d1c68f08f3cf116374 + ) endif() diff --git a/.github/config/extensions/delta.cmake b/.github/config/extensions/delta.cmake index eed25eb333b9..e2b542317930 100644 --- a/.github/config/extensions/delta.cmake +++ b/.github/config/extensions/delta.cmake @@ -1,7 +1,7 @@ if (NOT MINGW AND NOT ${WASM_ENABLED}) duckdb_extension_load(delta GIT_URL https://github.com/duckdb/duckdb-delta - GIT_TAG 03aaf0f073bc622ade27c158d32473588b32aa8b + GIT_TAG 50de51108cc4d2c09a549e022fce4f74e17bf360 SUBMODULES extension-ci-tools ) endif() \ No newline at end of file diff --git a/.github/config/extensions/ducklake.cmake b/.github/config/extensions/ducklake.cmake index 42ff34e3c3e6..15435118ed69 100644 --- a/.github/config/extensions/ducklake.cmake +++ b/.github/config/extensions/ducklake.cmake @@ -1,5 +1,5 @@ duckdb_extension_load(ducklake DONT_LINK GIT_URL https://github.com/duckdb/ducklake - GIT_TAG f134ad86f2f6e7cdf4133086c38ecd9c48f1a772 + GIT_TAG de813ff4d052bffe3e9e7ffcdc31d18ca38e5ecd ) diff --git a/.github/config/extensions/httpfs.cmake b/.github/config/extensions/httpfs.cmake index 1d8e88c2e586..c96890e13799 100644 --- a/.github/config/extensions/httpfs.cmake +++ b/.github/config/extensions/httpfs.cmake @@ -1,6 +1,6 @@ duckdb_extension_load(httpfs - LOAD_TESTS + LOAD TESTS GIT_URL https://github.com/duckdb/duckdb-httpfs - GIT_TAG 8356a9017444f54018159718c8017ff7db4ea756 + GIT_TAG 9c7d34977b10346d0b4cbbde5df807d1dab0b2bf INCLUDE_DIR src/include ) diff --git a/.github/config/extensions/iceberg.cmake b/.github/config/extensions/iceberg.cmake index 18235f51fbb5..6b312de4d5d5 100644 --- a/.github/config/extensions/iceberg.cmake +++ b/.github/config/extensions/iceberg.cmake @@ -8,6 +8,6 @@ if (NOT MINGW AND NOT ${WASM_ENABLED}) duckdb_extension_load(iceberg # ${LOAD_ICEBERG_TESTS} TODO: re-enable once autoloading test is fixed GIT_URL https://github.com/duckdb/duckdb-iceberg - GIT_TAG 4f3c5499e5feec9fe17a69a8ca74d81aaf472fd2 + GIT_TAG 1c0c4c60818f58b603fc50d5267b1f1202fe5484 ) endif() diff --git a/.github/config/extensions/inet.cmake b/.github/config/extensions/inet.cmake index 7b112317c3fa..c40773a4a5a9 100644 --- a/.github/config/extensions/inet.cmake +++ b/.github/config/extensions/inet.cmake @@ -1,8 +1,7 @@ duckdb_extension_load(inet LOAD_TESTS GIT_URL https://github.com/duckdb/duckdb-inet - GIT_TAG f6a2a14f061d2dfccdb4283800b55fef3fcbb128 + GIT_TAG fe7f60bb60245197680fb07ecd1629a1dc3d91c8 INCLUDE_DIR src/include TEST_DIR test/sql - APPLY_PATCHES ) diff --git a/.github/config/extensions/mysql_scanner.cmake b/.github/config/extensions/mysql_scanner.cmake index 581cac266d26..5efe4ab4b657 100644 --- a/.github/config/extensions/mysql_scanner.cmake +++ b/.github/config/extensions/mysql_scanner.cmake @@ -3,6 +3,6 @@ if (NOT MINGW AND NOT ${WASM_ENABLED} AND NOT ${MUSL_ENABLED}) DONT_LINK LOAD_TESTS GIT_URL https://github.com/duckdb/duckdb-mysql - GIT_TAG c80647b33972c150f0bd0001c35085cefdc82d1e + GIT_TAG ecb76de715dfe0b5bba52f4fc8bde87186dd486f ) endif() diff --git a/.github/config/extensions/postgres_scanner.cmake b/.github/config/extensions/postgres_scanner.cmake index d99e014da81d..58d70ec5548f 100644 --- a/.github/config/extensions/postgres_scanner.cmake +++ b/.github/config/extensions/postgres_scanner.cmake @@ -4,6 +4,6 @@ if (NOT MINGW AND NOT ${WASM_ENABLED}) duckdb_extension_load(postgres_scanner DONT_LINK GIT_URL https://github.com/duckdb/duckdb-postgres - GIT_TAG f012a4f99cea1d276d1787d0dc84b1f1a0e0f0b2 + GIT_TAG b63ef4b1eb007320840b6d1760f3c9b139bb3b49 ) endif() diff --git a/.github/config/extensions/spatial.cmake b/.github/config/extensions/spatial.cmake index bc9b60e22643..da5375e0cde1 100644 --- a/.github/config/extensions/spatial.cmake +++ b/.github/config/extensions/spatial.cmake @@ -3,7 +3,7 @@ if (${BUILD_COMPLETE_EXTENSION_SET}) duckdb_extension_load(spatial DONT_LINK LOAD_TESTS GIT_URL https://github.com/duckdb/duckdb-spatial - GIT_TAG a6a607fe3a98ef9ad4bed218490b770f725fbc12 + GIT_TAG 2f2668d211c0cf759f460403a108f24eb8b887e3 INCLUDE_DIR src/spatial TEST_DIR test/sql ) diff --git a/.github/config/extensions/unity_catalog.cmake b/.github/config/extensions/unity_catalog.cmake new file mode 100644 index 000000000000..62026df3a68a --- /dev/null +++ b/.github/config/extensions/unity_catalog.cmake @@ -0,0 +1,7 @@ +if (NOT MINGW AND NOT ${WASM_ENABLED} AND NOT ${MUSL_ENABLED}) + duckdb_extension_load(unity_catalog + GIT_URL https://github.com/duckdb/unity_catalog + GIT_TAG 1ad4f0b1fb12f661ff58fc91c2c9e7022591ea4b + LOAD_TESTS + ) +endif() diff --git a/.github/config/extensions/vortex.cmake b/.github/config/extensions/vortex.cmake new file mode 100644 index 000000000000..f69b20cedcb0 --- /dev/null +++ b/.github/config/extensions/vortex.cmake @@ -0,0 +1,10 @@ +if (NOT WIN32 AND NOT ${WASM_ENABLED} AND NOT ${MUSL_ENABLED}) + + duckdb_extension_load(vortex + GIT_URL https://github.com/vortex-data/duckdb-vortex + GIT_TAG dae36cd56988da2b47f06a1f63df0cfb47a97a50 + SUBMODULES vortex + APPLY_PATCHES + LOAD_TESTS + ) +endif() diff --git a/.github/config/external_extensions.cmake b/.github/config/external_extensions.cmake new file mode 100644 index 000000000000..9885cdc102e5 --- /dev/null +++ b/.github/config/external_extensions.cmake @@ -0,0 +1,7 @@ +# +# This is the extension configuration for extensions that are maintained externally +# +# + +################## VORTEX +include("${EXTENSION_CONFIG_BASE_DIR}/vortex.cmake") diff --git a/.github/config/uncovered_files.csv b/.github/config/uncovered_files.csv deleted file mode 100644 index 000773130f51..000000000000 --- a/.github/config/uncovered_files.csv +++ /dev/null @@ -1,843 +0,0 @@ -catalog/catalog.cpp 49 -catalog/catalog_entry.cpp 11 -catalog/catalog_entry/duck_schema_entry.cpp 10 -catalog/catalog_entry/duck_table_entry.cpp 7 -catalog/catalog_entry/index_catalog_entry.cpp 2 -catalog/catalog_entry/scalar_function_catalog_entry.cpp 4 -catalog/catalog_entry/schema_catalog_entry.cpp 10 -catalog/catalog_entry/table_catalog_entry.cpp 4 -catalog/catalog_entry/table_function_catalog_entry.cpp 5 -catalog/catalog_entry/type_catalog_entry.cpp 2 -catalog/catalog_search_path.cpp 9 -catalog/catalog_set.cpp 20 -catalog/catalog_transaction.cpp 3 -catalog/default/default_functions.cpp 9 -catalog/dependency_manager.cpp 5 -common/allocator.cpp 20 -common/arrow/arrow_appender.cpp 23 -common/arrow/appender/map_data.cpp 7 -common/arrow/arrow_converter.cpp 13 -common/arrow/arrow_wrapper.cpp 113 -common/assert.cpp 2 -common/bind_helpers.cpp 12 -common/box_renderer.cpp 36 -common/checksum.cpp 2 -common/compressed_file_system.cpp 14 -common/crypto/md5.cpp 2 -common/enum_util.cpp 3526 -common/enums/expression_type.cpp 185 -common/enums/join_type.cpp 3 -common/exception.cpp 106 -common/exception_format_value.cpp 9 -common/field_writer.cpp 3 -common/file_buffer.cpp 5 -common/file_system.cpp 20 -common/fsst.cpp 7 -common/gzip_file_system.cpp 40 -common/hive_partitioning.cpp 55 -common/http_state.cpp 36 -common/local_file_system.cpp 22 -common/multi_file_reader.cpp 15 -common/operator/cast_operators.cpp 81 -common/operator/convert_to_string.cpp 7 -common/pipe_file_system.cpp 4 -common/preserved_error.cpp 13 -common/progress_bar/progress_bar.cpp 16 -common/progress_bar/terminal_progress_bar_display.cpp 32 -common/radix_partitioning.cpp 15 -common/re2_regex.cpp 37 -common/row_operations/row_aggregate.cpp 2 -common/row_operations/row_external.cpp 26 -common/row_operations/row_heap_gather.cpp 3 -common/row_operations/row_heap_scatter.cpp 3 -common/row_operations/row_gather.cpp 11 -common/row_operations/row_matcher.cpp 14 -common/serializer/deserializer.cpp 13 -common/serializer/serializer.cpp 21 -common/serializer/buffered_file_reader.cpp 11 -common/serializer/buffered_file_writer.cpp 2 -common/serializer/binary_serializer.cpp 11 -common/serializer/memory_stream.cpp 5 -common/sort/comparators.cpp 103 -common/sort/merge_sorter.cpp 100 -common/sort/partition_state.cpp 44 -common/sort/sort_state.cpp 10 -common/sort/sorted_block.cpp 20 -common/string_util.cpp 15 -common/tree_renderer.cpp 9 -common/types.cpp 88 -common/extra_type_info.cpp 34 -common/virtual_file_system.cpp 32 -common/types/batched_data_collection.cpp 11 -common/types/bit.cpp 9 -common/types/blob.cpp 3 -common/types/cast_helpers.cpp 2 -common/types/column/column_data_allocator.cpp 13 -common/types/column/column_data_collection.cpp 55 -common/types/column/partitioned_column_data.cpp 8 -common/types/conflict_info.cpp 2 -common/types/conflict_manager.cpp 3 -common/types/data_chunk.cpp 45 -common/types/date.cpp 24 -common/types/hash.cpp 5 -common/types/hugeint.cpp 35 -common/types/hyperloglog.cpp 22 -common/types/list_segment.cpp 1 -common/types/interval.cpp 31 -common/types/row/partitioned_tuple_data.cpp 15 -common/types/row/row_data_collection.cpp 8 -common/types/row/row_data_collection_scanner.cpp 61 -common/types/row/row_layout.cpp 4 -common/types/row/tuple_data_allocator.cpp 23 -common/types/row/tuple_data_collection.cpp 76 -common/types/row/tuple_data_iterator.cpp 5 -common/types/row/tuple_data_scatter_gather.cpp 19 -common/types/row/tuple_data_segment.cpp 10 -common/types/string_heap.cpp 11 -common/types/time.cpp 25 -common/types/timestamp.cpp 19 -common/types/uuid.cpp 3 -common/types/validity_mask.cpp 13 -common/types/value.cpp 180 -common/types/vector.cpp 153 -common/value_operations/comparison_operations.cpp 33 -common/vector_operations/generators.cpp 31 -common/vector_operations/is_distinct_from.cpp 49 -common/vector_operations/null_operations.cpp 15 -common/vector_operations/numeric_inplace_operators.cpp 6 -common/vector_operations/vector_copy.cpp 2 -common/vector_operations/vector_hash.cpp 7 -common/vector_operations/vector_storage.cpp 13 -core_functions/aggregate/distributive/approx_count.cpp 1 -core_functions/aggregate/distributive/arg_min_max.cpp 10 -core_functions/aggregate/distributive/bitagg.cpp 8 -core_functions/aggregate/distributive/bitstring_agg.cpp 8 -core_functions/aggregate/distributive/entropy.cpp 9 -core_functions/aggregate/distributive/kurtosis.cpp 3 -core_functions/aggregate/distributive/minmax.cpp 3 -core_functions/aggregate/distributive/skew.cpp 2 -core_functions/aggregate/distributive/sum.cpp 10 -core_functions/aggregate/holistic/approximate_quantile.cpp 29 -core_functions/aggregate/holistic/mode.cpp 11 -core_functions/aggregate/holistic/quantile.cpp 14 -core_functions/aggregate/holistic/reservoir_quantile.cpp 39 -core_functions/aggregate/nested/histogram.cpp 1 -core_functions/aggregate/nested/list.cpp 4 -core_functions/scalar/array/array_value.cpp 2 -core_functions/scalar/string/length.cpp 2 -core_functions/lambda_functions.cpp 1 -core_functions/scalar/bit/bitstring.cpp 3 -core_functions/scalar/date/date_diff.cpp 124 -core_functions/scalar/date/date_part.cpp 17 -core_functions/scalar/date/date_sub.cpp 209 -core_functions/scalar/date/date_trunc.cpp 23 -core_functions/scalar/date/strftime.cpp 10 -core_functions/scalar/date/time_bucket.cpp 3 -core_functions/scalar/enum/enum_functions.cpp 10 -core_functions/scalar/generic/current_setting.cpp 1 -core_functions/scalar/generic/least.cpp 2 -core_functions/scalar/generic/system_functions.cpp 4 -core_functions/scalar/list/array_slice.cpp 3 -core_functions/scalar/list/flatten.cpp 6 -core_functions/scalar/list/list_aggregates.cpp 6 -core_functions/scalar/list/list_lambdas.cpp 2 -core_functions/scalar/list/list_sort.cpp 7 -core_functions/scalar/list/range.cpp 6 -core_functions/scalar/map/cardinality.cpp 3 -core_functions/scalar/map/map.cpp 8 -core_functions/scalar/map/map_concat.cpp 6 -core_functions/scalar/map/map_entries.cpp 6 -core_functions/scalar/map/map_extract.cpp 3 -core_functions/scalar/map/map_keys_values.cpp 3 -core_functions/scalar/math/numeric.cpp 8 -core_functions/scalar/string/hex.cpp 13 -core_functions/scalar/string/jaro_winkler.cpp 11 -core_functions/scalar/string/printf.cpp 4 -core_functions/scalar/string/string_split.cpp 2 -core_functions/scalar/struct/struct_insert.cpp 2 -core_functions/scalar/union/union_extract.cpp 11 -core_functions/scalar/union/union_tag.cpp 4 -core_functions/scalar/union/union_value.cpp 2 -execution/aggregate_hashtable.cpp 14 -execution/column_binding_resolver.cpp 4 -execution/expression_executor.cpp 12 -execution/expression_executor/execute_between.cpp 30 -execution/expression_executor/execute_case.cpp 17 -execution/expression_executor/execute_comparison.cpp 5 -execution/expression_executor/execute_conjunction.cpp 3 -execution/expression_executor/execute_operator.cpp 3 -execution/expression_executor_state.cpp 2 -execution/index/art/art.cpp 7 -execution/index/art/art_key.cpp 6 -execution/index/art/iterator.cpp 2 -execution/index/art/leaf.cpp 10 -execution/index/art/node.cpp 11 -execution/index/art/node256.cpp 2 -execution/index/art/node48.cpp 10 -execution/index/art/prefix.cpp 2 -execution/index/fixed_size_allocator.cpp 2 -execution/index/fixed_size_buffer.cpp 6 -execution/join_hashtable.cpp 23 -execution/nested_loop_join/nested_loop_join_inner.cpp 24 -execution/nested_loop_join/nested_loop_join_mark.cpp 35 -execution/operator/aggregate/aggregate_object.cpp 2 -execution/operator/aggregate/distinct_aggregate_data.cpp 9 -execution/operator/aggregate/physical_hash_aggregate.cpp 30 -execution/operator/aggregate/physical_perfecthash_aggregate.cpp 13 -execution/operator/aggregate/physical_streaming_window.cpp 4 -execution/operator/aggregate/physical_ungrouped_aggregate.cpp 4 -execution/operator/aggregate/physical_window.cpp 32 -execution/operator/helper/physical_limit.cpp 4 -execution/operator/helper/physical_limit_percent.cpp 4 -execution/operator/helper/physical_load.cpp 2 -execution/operator/helper/physical_reservoir_sample.cpp 8 -execution/operator/helper/physical_reset.cpp 25 -execution/operator/helper/physical_set.cpp 3 -execution/operator/helper/physical_streaming_sample.cpp 16 -execution/operator/join/outer_join_marker.cpp 3 -execution/operator/join/perfect_hash_join_executor.cpp 2 -execution/operator/join/physical_asof_join.cpp 38 -execution/operator/join/physical_blockwise_nl_join.cpp 2 -execution/operator/join/physical_hash_join.cpp 8 -execution/operator/join/physical_iejoin.cpp 32 -execution/operator/join/physical_nested_loop_join.cpp 4 -execution/operator/join/physical_piecewise_merge_join.cpp 142 -execution/operator/join/physical_range_join.cpp 62 -execution/operator/order/physical_top_n.cpp 5 -execution/operator/csv_scanner/base_csv_reader.cpp 31 -execution/operator/csv_scanner/buffered_csv_reader.cpp 47 -execution/operator/csv_scanner/csv_reader_options.cpp 41 -execution/operator/persistent/csv_buffer.cpp 2 -execution/operator/csv_scanner/csv_file_handle.cpp 7 -execution/operator/csv_scanner/parallel_csv_reader.cpp 54 -execution/operator/persistent/physical_batch_copy_to_file.cpp 7 -execution/operator/persistent/physical_batch_insert.cpp 43 -execution/operator/persistent/physical_copy_to_file.cpp 2 -execution/operator/persistent/physical_export.cpp 2 -execution/operator/persistent/physical_fixed_batch_copy.cpp 7 -execution/operator/persistent/physical_insert.cpp 5 -execution/operator/projection/physical_projection.cpp 16 -execution/operator/projection/physical_tableinout_function.cpp 8 -execution/operator/scan/physical_expression_scan.cpp 2 -execution/operator/scan/physical_positional_scan.cpp 20 -execution/operator/schema/physical_attach.cpp 6 -execution/operator/schema/physical_create_art_index.cpp 1 -execution/operator/schema/physical_create_type.cpp 2 -execution/operator/set/physical_cte.cpp 1 -execution/partitionable_hashtable.cpp 5 -execution/perfect_aggregate_hashtable.cpp 4 -execution/physical_operator.cpp 2 -execution/physical_plan/plan_aggregate.cpp 2 -execution/physical_plan/plan_asof_join.cpp 37 -execution/physical_plan/plan_column_data_get.cpp 1 -execution/physical_plan/plan_comparison_join.cpp 14 -execution/physical_plan/plan_explain.cpp 5 -execution/physical_plan/plan_export.cpp 2 -execution/physical_plan/plan_expression_get.cpp 1 -execution/physical_plan/plan_positional_join.cpp 2 -execution/physical_plan/plan_recursive_cte.cpp 3 -execution/physical_plan/plan_sample.cpp 2 -execution/physical_plan/plan_set_operation.cpp 2 -execution/physical_plan/plan_show_select.cpp 3 -execution/physical_plan_generator.cpp 6 -execution/radix_partitioned_hashtable.cpp 12 -execution/reservoir_sample.cpp 60 -execution/window_executor.cpp 29 -execution/window_segment_tree.cpp 12 -function/aggregate/sorted_aggregate_function.cpp 85 -function/built_in_functions.cpp 6 -function/cast/bit_cast.cpp 2 -function/cast/cast_function_set.cpp 3 -function/cast/enum_casts.cpp 15 -function/cast/map_cast.cpp 6 -function/cast/pointer_cast.cpp 2 -function/cast/string_cast.cpp 2 -function/cast/time_casts.cpp 8 -function/cast/union_casts.cpp 4 -function/cast/uuid_casts.cpp 2 -function/cast/union/from_struct.cpp 0 -function/cast/vector_cast_helpers.cpp 9 -function/cast_rules.cpp 19 -function/function.cpp 8 -function/function_binder.cpp 18 -function/function_set.cpp 8 -function/pragma/pragma_functions.cpp 10 -function/pragma/pragma_queries.cpp 3 -function/pragma_function.cpp 6 -function/scalar/generic/constant_or_null.cpp 5 -function/scalar/list/list_concat.cpp 5 -function/scalar/list/list_extract.cpp 10 -function/scalar/operators/add.cpp 8 -function/scalar/operators/arithmetic.cpp 23 -function/scalar/operators/multiply.cpp 4 -function/scalar/operators/subtract.cpp 5 -function/scalar/strftime_format.cpp 31 -function/scalar/string/length.cpp 2 -function/scalar/string/regexp.cpp 5 -function/scalar/string/regexp/regexp_extract_all.cpp 5 -function/scalar/string/substring.cpp 8 -function/scalar/struct/struct_extract.cpp 3 -function/scalar/system/aggregate_export.cpp 10 -function/scalar_function.cpp 8 -function/table/arrow.cpp 61 -function/table/arrow_conversion.cpp 231 -function/table/arrow/arrow_duck_schema.cpp 3 -function/table/checkpoint.cpp 7 -function/table/copy_csv.cpp 16 -function/table/pragma_detailed_profiling_output.cpp 86 -function/table/pragma_last_profiling_output.cpp 51 -function/table/range.cpp 5 -function/table/read_csv.cpp 53 -function/table/system/duckdb_constraints.cpp 3 -function/table/system/duckdb_dependencies.cpp 4 -function/table/system/duckdb_extensions.cpp 20 -function/table/system/duckdb_functions.cpp 7 -function/table/system/duckdb_indexes.cpp 3 -function/table/system/duckdb_temporary_files.cpp 25 -function/table/system/pragma_table_info.cpp 2 -function/table/table_scan.cpp 12 -function/table/unnest.cpp 8 -function/table/version/pragma_version.cpp 3 -function/table_function.cpp 9 -function/udf_function.cpp 6 -extension/json/json_reader.cpp 35 -extension/json/include/json_common.hpp 8 -extension/json/include/json_executors.hpp 3 -extension/json/json_deserializer.cpp 136 -extension/json/json_extension.cpp 7 -extension/json/json_functions.cpp 9 -extension/json/json_functions/copy_json.cpp 32 -extension/json/json_functions/json_array_length.cpp 4 -extension/json/json_functions/json_contains.cpp 8 -extension/json/json_functions/json_create.cpp 6 -extension/json/json_functions/json_extract.cpp 4 -extension/json/json_functions/json_keys.cpp 4 -extension/json/json_functions/json_merge_patch.cpp 2 -extension/json/json_functions/json_serialize_sql.cpp 18 -extension/json/json_functions/json_structure.cpp 24 -extension/json/json_functions/json_transform.cpp 70 -extension/json/json_functions/read_json.cpp 18 -extension/json/json_functions/read_json_objects.cpp 3 -extension/json/json_scan.cpp 100 -extension/json/json_serializer.cpp 96 -extension/parquet/column_reader.cpp 108 -extension/parquet/column_writer.cpp 28 -extension/parquet/include/boolean_column_reader.hpp 6 -extension/parquet/include/cast_column_reader.hpp 6 -extension/parquet/include/column_writer.hpp 3 -extension/parquet/include/decode_utils.hpp 3 -extension/parquet/include/list_column_reader.hpp 4 -extension/parquet/include/parquet_dbp_decoder.hpp 3 -extension/parquet/include/parquet_reader.hpp 4 -extension/parquet/include/parquet_rle_bp_decoder.hpp 8 -extension/parquet/include/row_number_column_reader.hpp 10 -extension/parquet/include/templated_column_reader.hpp 5 -extension/parquet/include/thrift_tools.hpp 27 -extension/parquet/include/zstd_file_system.hpp 3 -extension/parquet/parquet_extension.cpp 32 -extension/parquet/parquet_metadata.cpp 29 -extension/parquet/parquet_reader.cpp 113 -extension/parquet/parquet_statistics.cpp 12 -extension/parquet/parquet_timestamp.cpp 12 -extension/parquet/parquet_writer.cpp 9 -extension/parquet/zstd_file_system.cpp 6 -include/duckdb/catalog/catalog.hpp 3 -include/duckdb/catalog/catalog_entry/table_catalog_entry.hpp 3 -include/duckdb/catalog/catalog_set.hpp 2 -include/duckdb/catalog/mapping_value.hpp 2 -include/duckdb/common/allocator.hpp 2 -include/duckdb/common/serializer/deserializer.hpp 2 -include/duckdb/common/serializer/deserialization_data.hpp 10 -include/duckdb/common/serializer/binary_deserializer.hpp 1 -include/duckdb/common/bit_utils.hpp 4 -include/duckdb/common/bitpacking.hpp 11 -include/duckdb/common/dl.hpp 3 -include/duckdb/common/enum_util.hpp 3 -include/duckdb/common/exception.hpp 22 -include/duckdb/common/field_writer.hpp 5 -include/duckdb/common/gzip_file_system.hpp 3 -include/duckdb/common/hive_partitioning.hpp 3 -include/duckdb/common/local_file_system.hpp 5 -include/duckdb/common/multi_file/multi_file_options.hpp 3 -include/duckdb/common/opener_file_system.hpp 21 -include/duckdb/common/arrow/appender/append_data.hpp 1 -include/duckdb/common/arrow/appender/scalar_data.hpp 2 -include/duckdb/common/arrow/appender/varchar_data.hpp 2 -include/duckdb/common/operator/abs.hpp 1 -include/duckdb/common/operator/add.hpp 2 -include/duckdb/common/operator/decimal_cast_operators.hpp 3 -include/duckdb/common/operator/convert_to_string.hpp 1 -include/duckdb/common/operator/multiply.hpp 2 -include/duckdb/common/operator/numeric_cast.hpp 2 -include/duckdb/common/operator/subtract.hpp 2 -include/duckdb/common/pipe_file_system.hpp 3 -include/duckdb/common/printer.hpp 2 -include/duckdb/common/radix.hpp 2 -include/duckdb/common/radix_partitioning.hpp 5 -include/duckdb/common/re2_regex.hpp 21 -include/duckdb/common/serializer.hpp 4 -include/duckdb/common/serializer/binary_deserializer.cpp 2 -include/duckdb/common/serializer/deserializer.hpp 37 -include/duckdb/common/serializer/serializer.hpp 34 -include/duckdb/common/sort/duckdb_pdqsort.hpp 3 -include/duckdb/common/sort/sorted_block.hpp 1 -include/duckdb/common/string_util.hpp 9 -include/duckdb/common/type_util.hpp 2 -include/duckdb/common/types.hpp 5 -include/duckdb/common/types/column/column_data_allocator.hpp 3 -include/duckdb/common/types/column/partitioned_column_data.hpp 2 -include/duckdb/common/types/datetime.hpp 13 -include/duckdb/common/types/hugeint.hpp 2 -include/duckdb/common/types/row/partitioned_tuple_data.hpp 11 -include/duckdb/common/types/string_type.hpp 5 -include/duckdb/common/types/uuid.hpp 4 -include/duckdb/common/types/uhugeint.hpp 2 -include/duckdb/common/types/validity_mask.hpp 5 -include/duckdb/common/types/value.hpp 6 -include/duckdb/common/types/vector_buffer.hpp 1 -include/duckdb/common/vector_operations/aggregate_executor.hpp 17 -include/duckdb/common/vector_operations/binary_executor.hpp 15 -include/duckdb/common/vector_operations/generic_executor.hpp 9 -include/duckdb/common/vector_operations/senary_executor.hpp 8 -include/duckdb/common/vector_operations/septenary_executor.hpp 20 -include/duckdb/common/vector_operations/ternary_executor.hpp 12 -include/duckdb/core_functions/aggregate/algebraic/covar.hpp 12 -include/duckdb/core_functions/aggregate/algebraic/stddev.hpp 10 -include/duckdb/core_functions/aggregate/regression/regr_slope.hpp 2 -include/duckdb/core_functions/aggregate/sum_helpers.hpp 6 -include/duckdb/execution/aggregate_hashtable.hpp 3 -include/duckdb/execution/operator/helper/physical_limit.hpp 3 -include/duckdb/execution/operator/helper/physical_streaming_sample.hpp 3 -include/duckdb/execution/operator/join/physical_delim_join.hpp 3 -include/duckdb/execution/operator/join/physical_piecewise_merge_join.hpp 3 -include/duckdb/execution/operator/join/physical_range_join.hpp 2 -include/duckdb/execution/operator/persistent/buffered_csv_reader.hpp 2 -include/duckdb/execution/operator/persistent/parallel_csv_reader.hpp 3 -include/duckdb/function/aggregate_function.hpp 2 -include/duckdb/function/function_serialization.hpp 2 -include/duckdb/function/function_set.hpp 3 -include/duckdb/function/scalar/list/contains_or_position.hpp 16 -include/duckdb/function/table_function.hpp 6 -include/duckdb/function/table/arrow/arrow_duck_schema.hpp 3 -include/duckdb/function/udf_function.hpp 108 -include/duckdb/main/appender.hpp 4 -include/duckdb/main/capi/cast/from_decimal.hpp 2 -include/duckdb/main/capi/cast/to_decimal.hpp 38 -include/duckdb/main/client_context.hpp 2 -include/duckdb/main/client_context_file_opener.hpp 3 -include/duckdb/main/chunk_scan_state.hpp 4 -include/duckdb/main/connection.hpp 32 -include/duckdb/main/connection_manager.hpp 3 -include/duckdb/main/database.hpp 2 -include/duckdb/main/prepared_statement.hpp 9 -include/duckdb/main/query_profiler.hpp 9 -include/duckdb/main/query_result.hpp 12 -include/duckdb/main/relation.hpp 3 -include/duckdb/main/relation/subquery_relation.hpp 3 -include/duckdb/main/relation/write_parquet_relation.hpp 3 -include/duckdb/optimizer/matcher/function_matcher.hpp 2 -include/duckdb/optimizer/matcher/set_matcher.hpp 2 -include/duckdb/parallel/base_pipeline_event.hpp 2 -include/duckdb/parallel/event.hpp 2 -include/duckdb/parallel/task.hpp 3 -include/duckdb/parser/expression/default_expression.hpp 3 -include/duckdb/parser/expression/operator_expression.hpp 3 -include/duckdb/parser/expression/parameter_expression.hpp 3 -include/duckdb/parser/expression/positional_reference_expression.hpp 3 -include/duckdb/parser/expression/window_expression.hpp 8 -include/duckdb/parser/parsed_data/create_database_info.hpp 12 -include/duckdb/parser/parsed_data/create_function_info.hpp 4 -include/duckdb/parser/parser_extension.hpp 3 -include/duckdb/parser/query_node/recursive_cte_node.hpp 3 -include/duckdb/parser/sql_statement.hpp 3 -include/duckdb/parser/statement/logical_plan_statement.hpp 4 -include/duckdb/planner/constraints/bound_unique_constraint.hpp 2 -include/duckdb/planner/expression/bound_default_expression.hpp 3 -include/duckdb/planner/expression/bound_lambdaref_expression.hpp 5 -include/duckdb/planner/expression/bound_parameter_data.hpp 2 -include/duckdb/planner/expression/bound_subquery_expression.hpp 5 -include/duckdb/planner/filter/conjunction_filter.hpp 3 -include/duckdb/planner/table_filter.hpp 3 -include/duckdb/planner/extension_callback.hpp 2 -include/duckdb/storage/buffer/block_handle.hpp 7 -include/duckdb/storage/compression/chimp/algorithm/chimp128.hpp 10 -include/duckdb/storage/compression/chimp/algorithm/leading_zero_buffer.hpp 3 -include/duckdb/storage/compression/chimp/algorithm/output_bit_stream.hpp 2 -include/duckdb/storage/compression/chimp/chimp_analyze.hpp 2 -include/duckdb/storage/compression/chimp/chimp_compress.hpp 5 -include/duckdb/storage/compression/chimp/chimp_fetch.hpp 10 -include/duckdb/storage/compression/chimp/chimp_scan.hpp 12 -include/duckdb/storage/compression/patas/patas_fetch.hpp 11 -include/duckdb/storage/compression/patas/patas_scan.hpp 24 -include/duckdb/storage/partial_block_manager.hpp 3 -include/duckdb/storage/standard_buffer_manager.hpp 3 -include/duckdb/storage/table/segment_tree.hpp 17 -include/duckdb/transaction/meta_transaction.hpp 3 -include/duckdb/transaction/transaction.hpp 3 -include/duckdb/transaction/transaction_manager.hpp 5 -include/duckdb/verification/no_operator_caching_verifier.hpp 3 -include/duckdb/verification/parsed_statement_verifier.hpp 3 -include/duckdb/execution/operator/scan/csv/parallel_csv_reader.hpp 2 -main/appender.cpp 23 -main/attached_database.cpp 14 -main/chunk_scan_state.cpp 25 -main/capi/appender-c.cpp 3 -main/capi/arrow-c.cpp 8 -main/capi/cast/from_decimal-c.cpp 5 -main/capi/cast/utils-c.cpp 3 -main/chunk_scan_state/query_result.cpp 28 -main/capi/data_chunk-c.cpp 30 -main/capi/duckdb-c.cpp 3 -main/capi/duckdb_value-c.cpp 14 -main/capi/helper-c.cpp 52 -main/capi/hugeint-c.cpp 5 -main/capi/logical_types-c.cpp 45 -main/capi/pending-c.cpp 32 -main/capi/prepared-c.cpp 7 -main/capi/replacement_scan-c.cpp 4 -main/capi/result-c.cpp 36 -main/capi/stream-c.cpp 5 -main/capi/table_function-c.cpp 85 -main/capi/threading-c.cpp 52 -main/capi/value-c.cpp 3 -main/client_context.cpp 85 -main/client_context_file_opener.cpp 9 -main/client_verify.cpp 3 -main/config.cpp 35 -main/connection.cpp 38 -main/database.cpp 39 -main/database_manager.cpp 3 -main/database_path_and_type.cpp 5 -main/db_instance_cache.cpp 27 -main/error_manager.cpp 9 -main/extension/extension_alias.cpp 2 -main/extension/extension_helper.cpp 8 -main/extension/extension_install.cpp 125 -main/extension/extension_load.cpp 70 -main/extension/extension_util.cpp 24 -main/materialized_query_result.cpp 9 -main/pending_query_result.cpp 5 -main/prepared_statement.cpp 16 -main/prepared_statement_data.cpp 8 -main/query_profiler.cpp 32 -main/query_result.cpp 20 -main/relation.cpp 31 -main/relation/create_view_relation.cpp 7 -main/relation/cross_product_relation.cpp 2 -main/relation/read_json_relation.cpp 10 -main/relation/write_parquet_relation.cpp 22 -main/relation/aggregate_relation.cpp 14 -main/settings/settings.cpp 157 -main/stream_query_result.cpp 7 -optimizer/column_lifetime_analyzer.cpp 2 -optimizer/deliminator.cpp 7 -optimizer/expression_heuristics.cpp 6 -optimizer/expression_rewriter.cpp 3 -optimizer/filter_combiner.cpp 37 -optimizer/filter_pullup.cpp 2 -optimizer/filter_pushdown.cpp 3 -optimizer/join_order/cardinality_estimator.cpp 43 -optimizer/join_order/join_node.cpp 18 -optimizer/join_order/join_order_optimizer.cpp 96 -optimizer/join_order/query_graph.cpp 2 -optimizer/join_order/relation_manager.cpp 4 -optimizer/join_order/query_graph_manager.cpp 3 -optimizer/join_order/plan_enumerator.cpp 11 -optimizer/join_order/relation_statistics_helper.cpp 2 -optimizer/matcher/expression_matcher.cpp 2 -optimizer/pullup/pullup_projection.cpp 4 -optimizer/pushdown/pushdown_inner_join.cpp 2 -optimizer/pushdown/pushdown_set_operation.cpp 2 -optimizer/regex_range_filter.cpp 2 -optimizer/remove_unused_columns.cpp 2 -optimizer/rule/arithmetic_simplification.cpp 1 -optimizer/rule/date_part_simplification.cpp 2 -optimizer/rule/distributivity.cpp 3 -optimizer/rule/empty_needle_removal.cpp 2 -optimizer/rule/equal_or_null_simplification.cpp 6 -optimizer/rule/in_clause_simplification_rule.cpp 5 -optimizer/rule/like_optimizations.cpp 3 -optimizer/rule/move_constants.cpp 5 -optimizer/rule/regex_optimizations.cpp 4 -optimizer/statistics/expression/propagate_and_compress.cpp 2 -optimizer/statistics/expression/propagate_operator.cpp 5 -optimizer/statistics/operator/propagate_aggregate.cpp 3 -optimizer/statistics/operator/propagate_join.cpp 27 -optimizer/statistics/operator/propagate_limit.cpp 4 -optimizer/statistics/operator/propagate_set_operation.cpp 4 -optimizer/unnest_rewriter.cpp 3 -parallel/executor.cpp 25 -parallel/executor_task.cpp 9 -parallel/interrupt.cpp 22 -parallel/pipeline.cpp 14 -parallel/pipeline_event.cpp 5 -parallel/pipeline_executor.cpp 22 -parallel/pipeline_finish_event.cpp 1 -parallel/task_scheduler.cpp 42 -parser/base_expression.cpp 3 -parser/column_list.cpp 13 -parser/constraint.cpp 2 -parser/constraints/foreign_key_constraint.cpp 5 -parser/expression/between_expression.cpp 3 -parser/expression/case_expression.cpp 3 -parser/expression/collate_expression.cpp 3 -parser/expression/default_expression.cpp 8 -parser/expression/function_expression.cpp 2 -parser/expression/lambdaref_expression.cpp 3 -parser/expression/star_expression.cpp 5 -parser/expression/subquery_expression.cpp 5 -parser/expression/window_expression.cpp 8 -parser/expression_util.cpp 2 -parser/parsed_data/alter_info.cpp 7 -parser/parsed_data/alter_scalar_function_info.cpp 17 -parser/parsed_data/alter_table_function_info.cpp 17 -parser/parsed_data/alter_table_info.cpp 21 -parser/parsed_data/create_aggregate_function_info.cpp 11 -parser/parsed_data/create_collation_info.cpp 6 -parser/parsed_data/create_copy_function_info.cpp 6 -parser/parsed_data/create_info.cpp 6 -parser/parsed_data/create_index_info.cpp 7 -parser/parsed_data/create_pragma_function_info.cpp 5 -parser/parsed_data/create_scalar_function_info.cpp 13 -parser/parsed_data/create_table_function_info.cpp 13 -parser/parsed_data/create_view_info.cpp 15 -parser/parsed_data/sample_options.cpp 5 -parser/parsed_expression.cpp 15 -parser/parsed_expression_iterator.cpp 3 -parser/parser.cpp 5 -parser/query_node.cpp 8 -parser/query_node/cte_node.cpp 4 -parser/query_node/recursive_cte_node.cpp 6 -parser/query_node/select_node.cpp 18 -parser/query_node/set_operation_node.cpp 6 -parser/result_modifier.cpp 12 -parser/statement/copy_statement.cpp 4 -parser/statement/delete_statement.cpp 2 -parser/statement/insert_statement.cpp 5 -parser/statement/multi_statement.cpp 7 -parser/statement/select_statement.cpp 2 -parser/statement/set_statement.cpp 3 -parser/statement/update_statement.cpp 2 -parser/tableref.cpp 9 -parser/tableref/basetableref.cpp 2 -parser/tableref/emptytableref.cpp 3 -parser/tableref/expressionlistref.cpp 5 -parser/tableref/joinref.cpp 6 -parser/tableref/pivotref.cpp 26 -parser/tableref/subqueryref.cpp 2 -parser/tableref/table_function.cpp 2 -parser/transform/constraint/transform_constraint.cpp 4 -parser/transform/expression/transform_array_access.cpp 2 -parser/transform/expression/transform_boolean_test.cpp 7 -parser/transform/expression/transform_columnref.cpp 3 -parser/transform/expression/transform_expression.cpp 4 -parser/transform/expression/transform_function.cpp 21 -parser/transform/expression/transform_interval.cpp 4 -parser/transform/expression/transform_param_ref.cpp 2 -parser/transform/helpers/transform_cte.cpp 1 -parser/transform/helpers/transform_sample.cpp 5 -parser/transform/helpers/transform_typename.cpp 14 -parser/transform/statement/transform_alter_sequence.cpp 2 -parser/transform/statement/transform_alter_table.cpp 6 -parser/transform/statement/transform_copy.cpp 2 -parser/transform/statement/transform_create_function.cpp 4 -parser/transform/statement/transform_create_index.cpp 2 -parser/transform/statement/transform_create_schema.cpp 4 -parser/transform/statement/transform_create_sequence.cpp 8 -parser/transform/statement/transform_create_table.cpp 4 -parser/transform/statement/transform_create_type.cpp 2 -parser/transform/statement/transform_create_view.cpp 2 -parser/transform/statement/transform_delete.cpp 2 -parser/transform/statement/transform_drop.cpp 6 -parser/transform/statement/transform_load.cpp 7 -parser/transform/statement/transform_pragma.cpp 11 -parser/transform/statement/transform_prepare.cpp 3 -parser/transform/statement/transform_rename.cpp 2 -parser/transform/statement/transform_select_node.cpp 4 -parser/transform/statement/transform_set.cpp 4 -parser/transform/statement/transform_upsert.cpp 4 -parser/transform/tableref/transform_base_tableref.cpp 2 -parser/transform/tableref/transform_pivot.cpp 4 -parser/transform/tableref/transform_subquery.cpp 3 -parser/transform/tableref/transform_table_function.cpp 6 -parser/transformer.cpp 5 -planner/bind_context.cpp 6 -planner/binder.cpp 7 -planner/binder/expression/bind_aggregate_expression.cpp 15 -planner/binder/expression/bind_between_expression.cpp 2 -planner/binder/expression/bind_collate_expression.cpp 2 -planner/binder/expression/bind_columnref_expression.cpp 6 -planner/binder/expression/bind_comparison_expression.cpp 2 -planner/binder/query_node/bind_cte_node.cpp 1 -planner/binder/expression/bind_function_expression.cpp 5 -planner/binder/expression/bind_operator_expression.cpp 3 -planner/binder/expression/bind_star_expression.cpp 3 -planner/binder/expression/bind_subquery_expression.cpp 7 -planner/binder/expression/bind_unnest_expression.cpp 3 -planner/binder/expression/bind_window_expression.cpp 3 -planner/binder/query_node/bind_recursive_cte_node.cpp 2 -planner/binder/query_node/bind_select_node.cpp 6 -planner/binder/query_node/bind_setop_node.cpp 3 -planner/binder/query_node/plan_query_node.cpp 2 -planner/binder/query_node/plan_subquery.cpp 2 -planner/binder/statement/bind_copy.cpp 3 -planner/binder/statement/bind_create.cpp 21 -planner/binder/statement/bind_create_table.cpp 2 -planner/binder/statement/bind_drop.cpp 20 -planner/binder/statement/bind_export.cpp 3 -planner/binder/statement/bind_insert.cpp 19 -planner/binder/statement/bind_logical_plan.cpp 21 -planner/binder/statement/bind_pragma.cpp 3 -planner/binder/statement/bind_update.cpp 4 -planner/binder/statement/bind_vacuum.cpp 3 -planner/binder/tableref/bind_basetableref.cpp 15 -planner/binder/tableref/bind_joinref.cpp 3 -planner/binder/tableref/bind_pivot.cpp 6 -planner/binder/tableref/bind_table_function.cpp 18 -planner/binder/tableref/plan_joinref.cpp 2 -planner/binder/tableref/plan_subqueryref.cpp 2 -planner/bound_result_modifier.cpp 6 -planner/expression.cpp 5 -planner/expression/bound_aggregate_expression.cpp 2 -planner/expression/bound_between_expression.cpp 3 -planner/expression/bound_case_expression.cpp 2 -planner/expression/bound_cast_expression.cpp 5 -planner/expression/bound_columnref_expression.cpp 3 -planner/expression/bound_conjunction_expression.cpp 3 -planner/expression/bound_expression.cpp 4 -planner/expression/bound_lambda_expression.cpp 20 -planner/expression/bound_lambdaref_expression.cpp 38 -planner/expression/bound_reference_expression.cpp 12 -planner/expression/bound_subquery_expression.cpp 5 -planner/expression/bound_window_expression.cpp 7 -planner/expression_binder.cpp 9 -planner/expression_binder/aggregate_binder.cpp 3 -planner/expression_binder/alter_binder.cpp 3 -planner/expression_binder/base_select_binder.cpp 3 -planner/expression_binder/check_binder.cpp 5 -planner/expression_binder/constant_binder.cpp 3 -planner/expression_binder/group_binder.cpp 5 -planner/expression_binder/having_binder.cpp 4 -planner/expression_binder/index_binder.cpp 7 -planner/expression_binder/insert_binder.cpp 5 -planner/expression_binder/lateral_binder.cpp 26 -planner/expression_binder/order_binder.cpp 3 -planner/expression_binder/relation_binder.cpp 18 -planner/expression_binder/returning_binder.cpp 3 -planner/expression_binder/table_function_binder.cpp 7 -planner/expression_binder/update_binder.cpp 5 -planner/expression_iterator.cpp 26 -planner/filter/conjunction_filter.cpp 46 -planner/filter/constant_filter.cpp 6 -planner/filter/null_filter.cpp 16 -planner/joinside.cpp 12 -planner/logical_operator.cpp 30 -planner/operator/logical_aggregate.cpp 3 -planner/operator/logical_column_data_get.cpp 2 -planner/operator/logical_copy_to_file.cpp 37 -planner/operator/logical_create_index.cpp 2 -planner/operator/logical_cross_product.cpp 2 -planner/operator/logical_cteref.cpp 2 -planner/operator/logical_delete.cpp 2 -planner/operator/logical_dependent_join.cpp 1 -planner/operator/logical_delim_get.cpp 2 -planner/operator/logical_distinct.cpp 3 -planner/operator/logical_dummy_scan.cpp 2 -planner/operator/logical_execute.cpp 3 -planner/operator/logical_export.cpp 3 -planner/operator/logical_expression_get.cpp 2 -planner/operator/logical_extension_operator.cpp 25 -planner/operator/logical_get.cpp 10 -planner/operator/logical_insert.cpp 4 -planner/operator/logical_pivot.cpp 3 -planner/operator/logical_positional_join.cpp 3 -planner/operator/logical_pragma.cpp 3 -planner/operator/logical_prepare.cpp 3 -planner/operator/logical_projection.cpp 2 -planner/operator/logical_recursive_cte.cpp 2 -planner/operator/logical_reset.cpp 9 -planner/operator/logical_sample.cpp 2 -planner/operator/logical_set_operation.cpp 2 -planner/operator/logical_simple.cpp 2 -planner/operator/logical_unnest.cpp 2 -planner/operator/logical_update.cpp 2 -planner/operator/logical_window.cpp 2 -planner/planner.cpp 8 -planner/pragma_handler.cpp 2 -planner/subquery/flatten_dependent_join.cpp 45 -planner/subquery/has_correlated_expressions.cpp 4 -planner/subquery/rewrite_correlated_expressions.cpp 2 -planner/table_binding.cpp 5 -planner/table_filter.cpp 7 -storage/arena_allocator.cpp 19 -storage/block.cpp 6 -storage/buffer/block_handle.cpp 18 -storage/buffer/buffer_pool.cpp 6 -storage/buffer/buffer_pool_reservation.cpp 5 -storage/buffer_manager.cpp 20 -storage/checkpoint/row_group_writer.cpp 6 -storage/checkpoint/table_data_writer.cpp 3 -storage/checkpoint/write_overflow_strings_to_disk.cpp 5 -storage/checkpoint_manager.cpp 9 -storage/compression/bitpacking.cpp 23 -storage/compression/dictionary_compression.cpp 2 -storage/compression/fsst.cpp 11 -storage/compression/numeric_constant.cpp 11 -storage/compression/string_uncompressed.cpp 15 -storage/compression/validity_uncompressed.cpp 16 -storage/data_table.cpp 46 -storage/index.cpp 3 -storage/local_storage.cpp 10 -storage/magic_bytes.cpp 2 -storage/meta_block_reader.cpp 3 -storage/optimistic_data_writer.cpp 4 -storage/partial_block_manager.cpp 26 -storage/single_file_block_manager.cpp 15 -storage/standard_buffer_manager.cpp 128 -storage/statistics/base_statistics.cpp 48 -storage/statistics/column_statistics.cpp 8 -storage/statistics/distinct_statistics.cpp 12 -storage/statistics/array_stats.cpp 1 -storage/statistics/list_stats.cpp 13 -storage/statistics/numeric_stats.cpp 117 -storage/statistics/numeric_stats_union.cpp 3 -storage/statistics/string_stats.cpp 24 -storage/statistics/struct_stats.cpp 23 -storage/storage_manager.cpp 6 -storage/serialization/serialize_expression.cpp 35 -storage/serialization/serialize_logical_operator.cpp 23 -storage/serialization/serialize_parse_info.cpp 61 -storage/serialization/serialize_table_filter.cpp 20 -storage/serialization/serialize_types.cpp 2 -storage/metadata/metadata_manager.cpp 7 -storage/table/chunk_info.cpp 62 -storage/table/column_checkpoint_state.cpp 20 -storage/table/column_data.cpp 16 -storage/table/column_data_checkpointer.cpp 4 -storage/table/column_segment.cpp 62 -storage/table/list_column_data.cpp 28 -storage/table/array_column_data.cpp 25 -storage/table/row_group.cpp 54 -storage/table/row_group_collection.cpp 7 -storage/table/row_version_manager.cpp 6 -storage/table/scan_state.cpp 7 -storage/table/standard_column_data.cpp 2 -storage/table/struct_column_data.cpp 24 -storage/table/table_statistics.cpp 27 -storage/table/update_segment.cpp 27 -storage/table/validity_column_data.cpp 3 -storage/table_index_list.cpp 4 -storage/wal_replay.cpp 3 -storage/write_ahead_log.cpp 8 -transaction/cleanup_state.cpp 2 -transaction/commit_state.cpp 31 -transaction/duck_transaction_manager.cpp 5 -transaction/meta_transaction.cpp 2 -transaction/rollback_state.cpp 5 -transaction/transaction.cpp 6 -transaction/transaction_context.cpp 6 -transaction/undo_buffer.cpp 21 -verification/no_operator_caching_verifier.cpp 6 -verification/parsed_statement_verifier.cpp 2 -verification/prepared_statement_verifier.cpp 4 -verification/statement_verifier.cpp 10 diff --git a/.github/patches/extensions/inet/hugeint_fixes.patch b/.github/patches/extensions/inet/hugeint_fixes.patch deleted file mode 100644 index 4b4375d116d7..000000000000 --- a/.github/patches/extensions/inet/hugeint_fixes.patch +++ /dev/null @@ -1,19 +0,0 @@ -diff --git a/src/inet_functions.cpp b/src/inet_functions.cpp -index da92a4c..afa7446 100644 ---- a/src/inet_functions.cpp -+++ b/src/inet_functions.cpp -@@ -185,11 +185,12 @@ static INET_TYPE AddImplementation(INET_TYPE ip, hugeint_t val) { - if (val > 0) { - address_out = - AddOperatorOverflowCheck::Operation( -- address_in, val); -+ address_in, (uhugeint_t)val); - } else { -+ // TODO: this is off for when val is the minimal uhugeint_t value - address_out = - SubtractOperatorOverflowCheck::Operation(address_in, -val); -+ uhugeint_t>(address_in, (uhugeint_t)(-val)); - } - - if (addr_type == IPAddressType::IP_ADDRESS_V4 && diff --git a/.github/patches/extensions/vortex/pass-variable-through-cmake-not-make.patch b/.github/patches/extensions/vortex/pass-variable-through-cmake-not-make.patch new file mode 100644 index 000000000000..474b5ab6ca28 --- /dev/null +++ b/.github/patches/extensions/vortex/pass-variable-through-cmake-not-make.patch @@ -0,0 +1,36 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 9f92376..20d6bf8 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -3,6 +3,11 @@ cmake_minimum_required(VERSION 3.22) + set(TARGET_NAME vortex) + project(${TARGET_NAME}_project) + ++set(CMAKE_OSX_DEPLOYMENT_TARGET 12.0) ++if(UNIX AND NOT APPLE) ++ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ftls-model=global-dynamic") ++endif() ++ + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + set(CMAKE_CXX_STANDARD 17) + +diff --git a/Makefile b/Makefile +index 3017d3f..808f019 100644 +--- a/Makefile ++++ b/Makefile +@@ -2,15 +2,7 @@ PROJ_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + + EXT_NAME=vortex_duckdb + EXT_CONFIG=${PROJ_DIR}extension_config.cmake +-EXT_FLAGS=-DCMAKE_OSX_DEPLOYMENT_TARGET=12.0 +-export MACOSX_DEPLOYMENT_TARGET=12.0 + export VCPKG_FEATURE_FLAGS=-binarycaching +-export VCPKG_OSX_DEPLOYMENT_TARGET=12.0 + export VCPKG_TOOLCHAIN_PATH := ${PROJ_DIR}vcpkg/scripts/buildsystems/vcpkg.cmake + +-# This is not needed on macOS, we don't see a tls error on load there. +-ifeq ($(shell uname), Linux) +- export CFLAGS=-ftls-model=global-dynamic +-endif +- + include extension-ci-tools/makefiles/duckdb_extension.Makefile diff --git a/.github/workflows/Android.yml b/.github/workflows/Android.yml index 29b82d9649f7..fc3a3c373197 100644 --- a/.github/workflows/Android.yml +++ b/.github/workflows/Android.yml @@ -89,6 +89,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | diff --git a/.github/workflows/BundleStaticLibs.yml b/.github/workflows/BundleStaticLibs.yml index 7b25b5eb04a0..d0a3730c6f9f 100644 --- a/.github/workflows/BundleStaticLibs.yml +++ b/.github/workflows/BundleStaticLibs.yml @@ -88,6 +88,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | @@ -148,6 +149,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | @@ -208,6 +210,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | diff --git a/.github/workflows/CodeQuality.yml b/.github/workflows/CodeQuality.yml index 67f410030ba6..6809cb6983b0 100644 --- a/.github/workflows/CodeQuality.yml +++ b/.github/workflows/CodeQuality.yml @@ -155,4 +155,4 @@ jobs: - name: Tidy Check Diff shell: bash if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/feature' }} - run: make tidy-check-diff + run: DUCKDB_GIT_BASE_BRANCH=${{ github.base_ref }} make tidy-check-diff diff --git a/.github/workflows/Extensions.yml b/.github/workflows/Extensions.yml index 6944f533881b..5223ccbbb64b 100644 --- a/.github/workflows/Extensions.yml +++ b/.github/workflows/Extensions.yml @@ -14,6 +14,15 @@ on: type: string run_all: type: string + # disabling this can result in faster builds, but may run out of space on some runners + run_disk_clean_step: + type: boolean + required: false + default: true + opt_in_archs: + type: string + required: false + default: '' workflow_dispatch: inputs: override_git_describe: @@ -29,6 +38,11 @@ on: type: string required: false default: '' + opt_in_archs: + description: 'Semicolon-separated list of architectures to opt into' + type: string + required: false + default: 'windows_arm64;' skip_tests: description: 'Set to true to skip all testing' type: boolean @@ -38,6 +52,11 @@ on: type: string required: false default: 'true' + run_disk_clean_step: + description: 'Disabling this can result in faster builds, but may run out of space on some runners' + type: boolean + required: false + default: true push: branches-ignore: - 'main' @@ -86,12 +105,17 @@ jobs: outputs: main_extensions_config: ${{ steps.set-main-extensions.outputs.extension_config }} main_extensions_exclude_archs: ${{ steps.set-main-extensions.outputs.exclude_archs }} + main_extensions_opt_in_archs: ${{ steps.set-main-extensions.outputs.opt_in_archs }} rust_based_extensions_config: ${{ steps.set-rust-based-extensions.outputs.extension_config }} rust_based_extensions_exclude_archs: ${{ steps.set-rust-based-extensions.outputs.exclude_archs }} + external_extensions_config: ${{ steps.set-external-extensions.outputs.extension_config }} + external_extensions_exclude_archs: ${{ steps.set-external-extensions.outputs.exclude_archs }} + env: # NOTE: on PRs we exclude some archs to speed things up - BASE_EXCLUDE_ARCHS: ${{ (github.event_name == 'pull_request' || inputs.run_all != 'true') && 'wasm_eh;wasm_threads;windows_amd64_mingw;osx_amd64;linux_arm64;linux_amd64_musl;' || '' }} + BASE_EXCLUDE_ARCHS: ${{ (github.event_name == 'pull_request' || inputs.run_all != 'true') && 'wasm_mvp;wasm_threads;windows_amd64_mingw;osx_amd64;linux_arm64;linux_amd64_musl;' || '' }} EXTRA_EXCLUDE_ARCHS: ${{ inputs.extra_exclude_archs }} + OPT_IN_ARCHS: ${{ inputs.opt_in_archs }} steps: - uses: actions/checkout@v4 with: @@ -107,6 +131,7 @@ jobs: run: | # Set config echo exclude_archs="$DEFAULT_EXCLUDE_ARCHS;$BASE_EXCLUDE_ARCHS;$EXTRA_EXCLUDE_ARCHS" >> $GITHUB_OUTPUT + echo opt_in_archs="$OPT_IN_ARCHS" >> $GITHUB_OUTPUT in_tree_extensions="`cat $IN_TREE_CONFIG_FILE`" out_of_tree_extensions="`cat $OUT_OF_TREE_CONFIG_FILE`" echo "extension_config<> $GITHUB_OUTPUT @@ -129,6 +154,19 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT cat $GITHUB_OUTPUT + - id: set-external-extensions + name: Configure External extensions + env: + CONFIG_FILE: .github/config/external_extensions.cmake + DEFAULT_EXCLUDE_ARCHS: 'wasm_mvp;wasm_eh;wasm_threads;windows_amd64_mingw;windows_amd64;linux_amd64_musl' + run: | + echo exclude_archs="$DEFAULT_EXCLUDE_ARCHS;$BASE_EXCLUDE_ARCHS;$EXTRA_EXCLUDE_ARCHS" >> $GITHUB_OUTPUT + external_extensions="`cat .github/config/external_extensions.cmake`" + echo "extension_config<> $GITHUB_OUTPUT + echo "$external_extensions" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + # Build the extensions from .github/config/in_tree_extensions.cmake main-extensions: name: Main Extensions @@ -138,9 +176,10 @@ jobs: with: artifact_prefix: main-extensions exclude_archs: ${{ needs.load-extension-configs.outputs.main_extensions_exclude_archs }} + opt_in_archs: ${{ needs.load-extension-configs.outputs.main_extensions_opt_in_archs }} extension_config: ${{ needs.load-extension-configs.outputs.main_extensions_config }} override_tag: ${{ inputs.override_git_describe }} - duckdb_ref: ${{ inputs.git_ref }} + override_duckdb_version: ${{ inputs.git_ref }} skip_tests: ${{ inputs.skip_tests && true || false }} save_cache: ${{ vars.BRANCHES_TO_BE_CACHED == '' || contains(vars.BRANCHES_TO_BE_CACHED, github.ref) }} @@ -156,7 +195,23 @@ jobs: extension_config: ${{ needs.load-extension-configs.outputs.rust_based_extensions_config }} extra_toolchains: 'rust' override_tag: ${{ inputs.override_git_describe }} - duckdb_ref: ${{ inputs.git_ref }} + override_duckdb_version: ${{ inputs.git_ref }} + skip_tests: ${{ inputs.skip_tests && true || false }} + save_cache: ${{ vars.BRANCHES_TO_BE_CACHED == '' || contains(vars.BRANCHES_TO_BE_CACHED, github.ref) }} + + # Build the extensions from .github/config/external_extensions.cmake + external-extensions: + name: External Extensions + needs: + - load-extension-configs + uses: ./.github/workflows/_extension_distribution.yml + with: + exclude_archs: ${{ needs.load-extension-configs.outputs.external_extensions_exclude_archs }} + artifact_prefix: external-extensions + extension_config: ${{ needs.load-extension-configs.outputs.external_extensions_config }} + extra_toolchains: 'rust' + override_tag: ${{ inputs.override_git_describe }} + override_duckdb_version: ${{ inputs.git_ref }} skip_tests: ${{ inputs.skip_tests && true || false }} save_cache: ${{ vars.BRANCHES_TO_BE_CACHED == '' || contains(vars.BRANCHES_TO_BE_CACHED, github.ref) }} @@ -167,23 +222,32 @@ jobs: needs: - main-extensions - rust-based-extensions + - external-extensions steps: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: ./.github/actions/cleanup_runner + - uses: actions/download-artifact@v4 name: Download main extensions with: - pattern: main-extensions-${{ github.sha }}* + pattern: main-extensions* path: /tmp/repository_generation/main-extensions - uses: actions/download-artifact@v4 name: Download rust-based extensions with: - pattern: rust-based-extensions-${{ github.sha }}* + pattern: rust-based-extensions* path: /tmp/repository_generation/rust-based-extensions + - uses: actions/download-artifact@v4 + name: Download external extensions + with: + pattern: external-extensions* + path: /tmp/repository_generation/external-extensions + - name: Print all extensions run: | tree /tmp/repository_generation @@ -197,7 +261,7 @@ jobs: - uses: actions/upload-artifact@v4 with: if-no-files-found: error - name: extension-repository-${{ github.sha }} + name: extension-repository path: | /tmp/merged_repository/**/*.duckdb_extension* @@ -208,19 +272,29 @@ jobs: - create-extension-repository steps: + - name: Free disk space + uses: endersonmenezes/free-disk-space@v3 + continue-on-error: true + if: inputs.run_disk_clean_step + with: + remove_haskell: true + remove_tool_cache: true + remove_folders: "/usr/local/share/powershell /usr/share/swift" + testing: false + - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/download-artifact@v4 with: - pattern: extension-repository-${{ github.sha }} + pattern: extension-repository path: /tmp - name: List extensions to deploy shell: bash run: | - tree /tmp/extension-repository-${{ github.sha }} + tree /tmp/extension-repository - name: Deploy extensions shell: bash @@ -232,7 +306,7 @@ jobs: DUCKDB_EXTENSION_SIGNING_PK: ${{ secrets.DUCKDB_EXTENSION_SIGNING_PK }} run: | pip install awscli - ./scripts/extension-upload-repository.sh /tmp/extension-repository-${{ github.sha }} + ./scripts/extension-upload-repository.sh /tmp/extension-repository autoload-tests: name: Extension Autoloading Tests @@ -259,13 +333,13 @@ jobs: - uses: actions/download-artifact@v4 with: - pattern: extension-repository-${{ github.sha }} + pattern: extension-repository path: /tmp - name: List extensions to test with shell: bash run: | - tree /tmp/extension-repository-${{ github.sha }} + tree /tmp/extension-repository - name: Build DuckDB env: @@ -281,7 +355,7 @@ jobs: - name: Run Tests env: - LOCAL_EXTENSION_REPO: /tmp/extension-repository-${{ github.sha }} + LOCAL_EXTENSION_REPO: /tmp/extension-repository run: | ./build/release/test/unittest --autoloading available --skip-compiled @@ -326,13 +400,13 @@ jobs: - uses: actions/download-artifact@v4 name: Download extension repository artifact with: - pattern: extension-repository-${{ github.sha }} + pattern: extension-repository path: /tmp - name: Copy over local extension repository shell: bash run: | - cp -r /tmp/extension-repository-${{ github.sha }} build/release/repository + cp -r /tmp/extension-repository build/release/repository tree build/release/repository find build/release/repository -type f ! -path "build/release/repository/*/linux_amd64/*" -delete tree build/release/repository @@ -363,4 +437,4 @@ jobs: if: failure() run: | echo "There are differences in src/include/duckdb/main/extension_entries.hpp" - echo "Check the uploaded extension_entries.hpp (in the workflow Summary), and check that in instead of src/include/duckdb/main/extension_entries.hpp" \ No newline at end of file + echo "Check the uploaded extension_entries.hpp (in the workflow Summary), and check that in instead of src/include/duckdb/main/extension_entries.hpp" diff --git a/.github/workflows/InvokeCI.yml b/.github/workflows/InvokeCI.yml index a869c545fbd1..22627a09cd59 100644 --- a/.github/workflows/InvokeCI.yml +++ b/.github/workflows/InvokeCI.yml @@ -27,6 +27,7 @@ jobs: git_ref: ${{ inputs.git_ref }} skip_tests: ${{ inputs.skip_tests }} run_all: ${{ inputs.run_all }} + opt_in_archs: 'windows_arm64;' osx: uses: ./.github/workflows/OSX.yml diff --git a/.github/workflows/LinuxRelease.yml b/.github/workflows/LinuxRelease.yml index 0ab52bc2ef3f..a35bc1e86bb0 100644 --- a/.github/workflows/LinuxRelease.yml +++ b/.github/workflows/LinuxRelease.yml @@ -117,6 +117,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | @@ -140,15 +141,18 @@ jobs: run: | python3 scripts/run_tests_one_by_one.py build/release/test/unittest "*" --time_execution + - name: Release specific tests + shell: bash + run: | + ./build/release/test/unittest --select-tag release + - name: Tools Tests shell: bash - if: ${{ inputs.skip_tests != 'true' }} run: | python3 -m pytest tools/shell/tests --shell-binary build/release/duckdb - name: Examples shell: bash - if: ${{ inputs.skip_tests != 'true' }} run: | build/release/benchmark/benchmark_runner benchmark/micro/update/update_with_join.benchmark build/release/duckdb -c "COPY (SELECT 42) TO '/dev/stdout' (FORMAT PARQUET)" | cat @@ -167,6 +171,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | diff --git a/.github/workflows/Main.yml b/.github/workflows/Main.yml index 1b345244f0ea..82f7b388dfd1 100644 --- a/.github/workflows/Main.yml +++ b/.github/workflows/Main.yml @@ -49,7 +49,7 @@ jobs: steps: - name: Preliminary checks on CI run: echo "Event name is ${{ github.event_name }}" - + linux-debug: name: Linux Debug # This tests release build while enabling slow verifiers (masked by #ifdef DEBUG) and sanitizers @@ -102,7 +102,7 @@ jobs: linux-release: name: Linux Release (full suite) - needs: check-draft + needs: linux-debug runs-on: ubuntu-24.04 env: GEN: ninja @@ -134,7 +134,7 @@ jobs: no-string-inline: name: No String Inline / Destroy Unpinned Blocks runs-on: ubuntu-24.04 - needs: linux-configs + needs: linux-debug env: GEN: ninja CORE_EXTENSIONS: "icu;parquet;tpch;tpcds;fts;json;inet" @@ -171,7 +171,7 @@ jobs: vector-sizes: name: Vector Sizes runs-on: ubuntu-22.04 - needs: linux-configs + needs: linux-debug env: CC: gcc-10 CXX: g++-10 @@ -179,19 +179,12 @@ jobs: DUCKDB_TEST_DESCRIPTION: 'Compiled with STANDARD_VECTOR_SIZE=2. Use require vector_size 2048 to skip tests.' steps: - - name: Clean up the disc space - shell: bash - run: | - echo "Disk usage before clean up:" - df -h - rm -rf /opt/hostedtoolcache/CodeQL Java* Pypy Ruby go node - echo "Disk usage after clean up:" - df -h - - uses: actions/checkout@v3 with: fetch-depth: 0 + - uses: ./.github/actions/cleanup_runner + - name: Install shell: bash run: sudo apt-get update -y -qq && sudo apt-get install -y -qq ninja-build @@ -215,7 +208,7 @@ jobs: name: Valgrind if: ${{ !startsWith(github.ref, 'refs/tags/v') }} runs-on: ubuntu-24.04 - needs: linux-configs + needs: linux-debug env: CC: clang CXX: clang++ @@ -251,7 +244,7 @@ jobs: threadsan: name: Thread Sanitizer - needs: linux-configs + needs: linux-debug runs-on: ubuntu-24.04 env: CC: clang @@ -295,7 +288,7 @@ jobs: amalgamation-tests: name: Amalgamation Tests runs-on: ubuntu-22.04 - needs: check-draft + needs: linux-debug env: CC: clang CXX: clang++ @@ -321,12 +314,12 @@ jobs: python scripts/amalgamation.py --extended clang++ -std=c++17 -Isrc/amalgamation src/amalgamation/duckdb.cpp -emit-llvm -S -O0 - # TODO: Bring back BLOCK_VERIFICATION: 1, and consider bringing back fts + # TODO: Consider bringing back fts # TODO: DEBUG_STACKTRACE: 1 + reldebug ? linux-configs: name: Tests a release build with different configurations runs-on: ubuntu-24.04 - needs: check-draft + needs: linux-debug env: BASE_BRANCH: ${{ github.base_ref || 'main' }} @@ -339,9 +332,9 @@ jobs: with: python-version: '3.12' - - name: Install Ninja + - name: Install shell: bash - run: sudo apt-get update -y -qq && sudo apt-get install -y -qq ninja-build + run: sudo apt-get update -y -qq && sudo apt-get install -y -qq ninja-build libcurl4-openssl-dev - name: Setup Ccache uses: hendrikmuhs/ccache-action@main @@ -357,12 +350,6 @@ jobs: GEN: ninja run: make - - name: test/configs/encryption.json - if: (success() || failure()) && steps.build.conclusion == 'success' - shell: bash - run: | - ./build/release/test/unittest --test-config test/configs/encryption.json - - name: test/configs/force_storage.json if: (success() || failure()) && steps.build.conclusion == 'success' shell: bash @@ -381,6 +368,12 @@ jobs: run: | ./build/release/test/unittest --test-config test/configs/latest_storage.json + - name: test/configs/block_verification.json + if: (success() || failure()) && steps.build.conclusion == 'success' + shell: bash + run: | + ./build/release/test/unittest --test-config test/configs/block_verification.json + - name: test/configs/verify_fetch_row.json if: (success() || failure()) && steps.build.conclusion == 'success' shell: bash @@ -464,3 +457,27 @@ jobs: shell: bash run: | ./build/release/test/unittest --test-config test/configs/compressed_in_memory.json + + - name: Forwards compatibility tests + if: (success() || failure()) && steps.build.conclusion == 'success' + shell: bash + run: | + python3 scripts/test_storage_compatibility.py --versions "1.2.1|1.3.2" --new-unittest build/release/test/unittest + + # TODO: clean this up: we should probably be able to run this whole test suite with httpfs + # We want to run the remainder of tests with httpfs + - name: Build with httpfs extension + id: build-httpfs + shell: bash + if: (success() || failure()) && steps.build.conclusion == 'success' + env: + CORE_EXTENSIONS: "json;parquet;icu;tpch;tpcds;httpfs" + GEN: ninja + run: + make + + - name: test/configs/encryption.json + if: (success() || failure()) && steps.build.conclusion == 'success' + shell: bash + run: | + ./build/release/test/unittest --test-config test/configs/encryption.json diff --git a/.github/workflows/NightlyTests.yml b/.github/workflows/NightlyTests.yml index 6954372383c3..f0140670d4aa 100644 --- a/.github/workflows/NightlyTests.yml +++ b/.github/workflows/NightlyTests.yml @@ -107,43 +107,6 @@ jobs: run: | python3 scripts/run_tests_one_by_one.py build/relassert/test/unittest "*" --no-exit --timeout 1200 - release-assert-osx: - name: Release Assertions OSX - runs-on: macos-latest - needs: linux-memory-leaks - env: - GEN: ninja - CORE_EXTENSIONS: "icu;tpch;tpcds;fts;json;inet;httpfs" - DISABLE_SANITIZER: 1 - CRASH_ON_ASSERT: 1 - RUN_SLOW_VERIFIERS: 1 - - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - uses: actions/setup-python@v5 - with: - python-version: '3.12' - - - name: Install Ninja - run: brew install ninja llvm - - - name: Setup Ccache - uses: hendrikmuhs/ccache-action@main - with: - key: ${{ github.job }} - save: ${{ env.CCACHE_SAVE }} - - - name: Build - shell: bash - run: CMAKE_LLVM_PATH='/opt/homebrew/opt/llvm' UNSAFE_NUMERIC_CAST=1 make relassert - - - name: Test - shell: bash - run: | - python3 scripts/run_tests_one_by_one.py build/relassert/test/unittest "*" --no-exit --timeout 1200 - release-assert-osx-storage: name: Release Assertions OSX Storage runs-on: macos-latest @@ -660,46 +623,3 @@ jobs: - name: Test shell: bash run: build/relassert/test/unittest --test-config test/configs/hash_zero.json - - codecov: - name: Code Coverage - runs-on: ubuntu-22.04 - needs: linux-memory-leaks - env: - GEN: ninja - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Install - shell: bash - run: sudo apt-get update -y -qq && sudo apt-get install -y -qq ninja-build lcov curl g++ zip - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: '3.9' - - - name: Install pytest - run: | - python3 -m pip install pytest - - - name: Check Coverage - shell: bash - continue-on-error: true - run: | - make coverage-check - - - name: Create Archive - if: ${{ success() || failure() }} - shell: bash - run: | - zip -r coverage.zip coverage_html - - - uses: actions/upload-artifact@v4 - if: ${{ success() || failure() }} - with: - name: coverage - path: coverage.zip - if-no-files-found: error diff --git a/.github/workflows/OSX.yml b/.github/workflows/OSX.yml index 7e296f1bbb49..333a58197b16 100644 --- a/.github/workflows/OSX.yml +++ b/.github/workflows/OSX.yml @@ -173,14 +173,26 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | python scripts/amalgamation.py + zip -j -y libduckdb-osx-universal.zip build/release/src/libduckdb*.dylib src/amalgamation/duckdb.hpp src/include/duckdb.h + + mkdir build/release_arm64 build/release_amd64 + lipo -extract arm64 -output build/release_arm64/duckdb build/release/duckdb + lipo -extract x86_64 -output build/release_amd64/duckdb build/release/duckdb + zip -j duckdb_cli-osx-universal.zip build/release/duckdb + zip -j duckdb_cli-osx-arm64.zip build/release_arm64/duckdb + zip -j duckdb_cli-osx-amd64.zip build/release_amd64/duckdb + gzip -9 -k -n -c build/release/duckdb > duckdb_cli-osx-universal.gz - zip -j libduckdb-osx-universal.zip build/release/src/libduckdb*.dylib src/amalgamation/duckdb.hpp src/include/duckdb.h - ./scripts/upload-assets-to-staging.sh github_release libduckdb-osx-universal.zip duckdb_cli-osx-universal.zip duckdb_cli-osx-universal.gz + gzip -9 -k -n -c build/release_arm64/duckdb > duckdb_cli-osx-arm64.gz + gzip -9 -k -n -c build/release_amd64/duckdb > duckdb_cli-osx-amd64.gz + + ./scripts/upload-assets-to-staging.sh github_release libduckdb-osx-universal.zip duckdb_cli-osx-universal.zip duckdb_cli-osx-universal.gz duckdb_cli-osx-arm64.zip duckdb_cli-osx-arm64.gz duckdb_cli-osx-amd64.zip duckdb_cli-osx-amd64.gz - uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/Regression.yml b/.github/workflows/Regression.yml index 1145a155090a..6451681ddc26 100644 --- a/.github/workflows/Regression.yml +++ b/.github/workflows/Regression.yml @@ -137,55 +137,55 @@ jobs: if: ${{ github.repository == 'duckdb/duckdb' && github.ref == 'refs/heads/main' }} shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks benchmark/fivetran/benchmark_list.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks benchmark/fivetran/benchmark_list.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test Micro if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/micro.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/micro.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test Ingestion Perf if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/ingestion.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/ingestion.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test TPCH if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpch.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpch.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test TPCH-PARQUET if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpch_parquet.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpch_parquet.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test TPCDS if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpcds.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/tpcds.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test H2OAI if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/h2oai.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/h2oai.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test IMDB if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/imdb.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/imdb.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test CSV if: always() shell: bash run: | - python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/csv.csv --verbose --threads 2 + python scripts/regression/test_runner.py --old duckdb/build/release/benchmark/benchmark_runner --new build/release/benchmark/benchmark_runner --benchmarks .github/regression/csv.csv --verbose --threads 2 --clear-benchmark-cache - name: Regression Test RealNest if: always() diff --git a/.github/workflows/StagedUpload.yml b/.github/workflows/StagedUpload.yml index 74753cca977c..49d0589a0067 100644 --- a/.github/workflows/StagedUpload.yml +++ b/.github/workflows/StagedUpload.yml @@ -9,9 +9,6 @@ on: target_git_describe: type: string -env: - GH_TOKEN: ${{ secrets.GH_TOKEN }} - jobs: staged-upload: runs-on: ubuntu-latest @@ -32,14 +29,22 @@ jobs: - name: Download from staging bucket shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | TARGET=$(git log -1 --format=%h) mkdir to_be_uploaded - aws s3 cp --recursive "s3://duckdb-staging/$TARGET/${{ inputs.target_git_describe }}/$GITHUB_REPOSITORY/github_release" to_be_uploaded --region us-east-2 + aws s3 cp --recursive "s3://duckdb-staging/$TARGET/${{ inputs.target_git_describe }}/$GITHUB_REPOSITORY/github_release" to_be_uploaded - name: Deploy + if: ${{ inputs.target_git_describe != '' }} shell: bash + env: + AWS_ENDPOINT_URL: ${{ secrets.DUCKDB_INSTALL_S3_ENDPOINT }} + AWS_ACCESS_KEY_ID: ${{ secrets.DUCKDB_INSTALL_S3_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.DUCKDB_INSTALL_S3_SECRET }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} run: | python3 scripts/asset-upload-gha.py to_be_uploaded/* + aws s3 cp to_be_uploaded/* "s3://duckdb-install/${{ inputs.target_git_describe }} \ No newline at end of file diff --git a/.github/workflows/Windows.yml b/.github/workflows/Windows.yml index fb0c10f4d125..7d796cca4c23 100644 --- a/.github/workflows/Windows.yml +++ b/.github/workflows/Windows.yml @@ -150,6 +150,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | @@ -263,6 +264,7 @@ jobs: - name: Deploy shell: bash env: + AWS_ENDPOINT_URL: ${{ secrets.S3_DUCKDB_STAGING_ENDPOINT }} AWS_ACCESS_KEY_ID: ${{ secrets.S3_DUCKDB_STAGING_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DUCKDB_STAGING_KEY }} run: | diff --git a/.github/workflows/_extension_distribution.yml b/.github/workflows/_extension_distribution.yml index b58063992d11..83c4449c1b3b 100644 --- a/.github/workflows/_extension_distribution.yml +++ b/.github/workflows/_extension_distribution.yml @@ -12,11 +12,15 @@ on: required: false type: string default: '' + opt_in_archs: + required: false + type: string + default: '' extra_toolchains: required: false type: string default: "" - duckdb_ref: + override_duckdb_version: required: false type: string default: "" @@ -48,13 +52,14 @@ jobs: # DuckDB version is overridden to the current commit of the current repository set_caller_as_duckdb: true - duckdb_version: ${{ github.sha }} + duckdb_version: ${{ inputs.override_duckdb_version != '' && inputs.override_duckdb_version || github.sha }} # CI tools is pinned to main override_ci_tools_repository: duckdb/extension-ci-tools ci_tools_version: main exclude_archs: ${{ inputs.exclude_archs }} + opt_in_archs: ${{ inputs.opt_in_archs }} extra_toolchains: ${{ inputs.extra_toolchains }} use_merged_vcpkg_manifest: '1' @@ -64,5 +69,7 @@ jobs: skip_tests: ${{ inputs.skip_tests }} save_cache: ${{ inputs.save_cache }} + # This forces tests to be run for all extensions in the config + extensions_test_selection: 'complete' # The extension_config.cmake configuration that gets built extra_extension_config: ${{ inputs.extension_config }} diff --git a/.github/workflows/_manual_extension_deploy.yml b/.github/workflows/_manual_extension_deploy.yml new file mode 100644 index 000000000000..42f59f9b0139 --- /dev/null +++ b/.github/workflows/_manual_extension_deploy.yml @@ -0,0 +1,154 @@ +# Note: *will* override existing extensions, tread with care + +name: Single Extension Build and Deploy +on: + workflow_dispatch: + inputs: + extension_name: + description: 'extension to deploy (using current config)' + required: true + type: string + override_git_describe: + description: 'Override git describe (use for tagged releases)' + type: string + duckdb_ref: + description: 'DuckDB version (empty for current commit)' + type: string + required: false + default: '' + extension_template_ref: + description: 'The extension-template version to use (empty for vx.y-codename)' + type: string + required: false + default: 'v1.4-andium' + extra_exclude_archs: + description: 'Inject more architectures to skip (optional)' + type: string + required: false + default: '' + skip_tests: + description: 'Set to true to skip all testing (optional)' + type: boolean + required: false + default: false + extra_toolchains: + description: 'extra toolchains (optional)' + required: false + type: string + default: "" + +jobs: + load-extension-configs: + name: Load Extension Config + runs-on: ubuntu-latest + outputs: + extension_config: ${{ steps.set-extension.outputs.extension_config }} + extension_exclude_archs: ${{ steps.set-extension.outputs.exclude_archs }} + env: + BASE_EXCLUDE_ARCHS: '' + EXTRA_EXCLUDE_ARCHS: ${{ inputs.extra_exclude_archs }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - id: set-extension + name: Configure extension + env: + EXT_CONFIG: .github/config/extensions/${{ inputs.extension_name }}.cmake + run: | + # Set config + echo exclude_archs="$DEFAULT_EXCLUDE_ARCHS;$BASE_EXCLUDE_ARCHS;$EXTRA_EXCLUDE_ARCHS" >> $GITHUB_OUTPUT + ext_config="`cat $EXT_CONFIG`" + echo "extension_config<> $GITHUB_OUTPUT + echo "$ext_config" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + + build: + name: Build Extension + needs: + - load-extension-configs + uses: ./.github/workflows/_extension_distribution.yml + with: + artifact_prefix: extensions + exclude_archs: ${{ needs.load-extension-configs.outputs.extension_exclude_archs }} + extension_config: ${{ needs.load-extension-configs.outputs.extension_config }} + override_tag: ${{ inputs.override_git_describe }} + override_duckdb_version: ${{ inputs.duckdb_ref }} + skip_tests: ${{ inputs.skip_tests && true || false }} + save_cache: ${{ vars.BRANCHES_TO_BE_CACHED == '' || contains(vars.BRANCHES_TO_BE_CACHED, github.ref) }} + extra_toolchains: 'rust' + + # Merge all extensions into a single, versioned repository + create-extension-repository: + name: Create Extension Repository + runs-on: ubuntu-latest + needs: + - build + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/download-artifact@v4 + name: Download extensions + with: + pattern: extensions-${{ inputs.duckdb_ref }}* + path: /tmp/repository_generation/extensions + + - name: Print all extensions + run: | + tree /tmp/repository_generation + + - name: Merge into single repository + run: | + mkdir /tmp/merged_repository + cp -r /tmp/repository_generation/*/*/* /tmp/merged_repository + tree /tmp/merged_repository + + - name: Remove all non-target extensions + run: | + find /tmp/merged_repository -type f ! -name "${{ inputs.extension_name }}.duckdb_extension*" -delete + tree /tmp/merged_repository + + - uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: extension-repository-${{ inputs.duckdb_ref }} + path: | + /tmp/merged_repository/**/*.duckdb_extension* + + upload-extensions: + name: Upload Extensions + runs-on: ubuntu-latest + needs: + - create-extension-repository + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/download-artifact@v4 + with: + pattern: extension-repository-${{ inputs.duckdb_ref }} + path: /tmp + + - name: List extensions to deploy + shell: bash + run: | + tree /tmp/extension-repository-${{ inputs.duckdb_ref }} + + - name: Deploy extensions + shell: bash + env: + AWS_ENDPOINT_URL: ${{ secrets.DUCKDB_CORE_EXTENSION_S3_ENDPOINT }} + AWS_ACCESS_KEY_ID: ${{secrets.DUCKDB_CORE_EXTENSION_S3_ID}} + AWS_SECRET_ACCESS_KEY: ${{secrets.DUCKDB_CORE_EXTENSION_S3_SECRET}} + DUCKDB_DEPLOY_SCRIPT_MODE: for_real + DUCKDB_EXTENSION_SIGNING_PK: ${{ secrets.DUCKDB_EXTENSION_SIGNING_PK }} + run: | + pip install awscli + ./scripts/extension-upload-repository.sh /tmp/extension-repository-${{ inputs.duckdb_ref }} duckdb-staging \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index b513696eff61..32462652350d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -541,10 +541,6 @@ if(LATEST_STORAGE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDUCKDB_LATEST_STORAGE") endif() -if(BLOCK_VERIFICATION) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDUCKDB_BLOCK_VERIFICATION") -endif() - if(DEBUG_ALLOCATION) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDUCKDB_DEBUG_ALLOCATION") endif() @@ -850,7 +846,7 @@ else() endif() set(LOCAL_EXTENSION_REPO FALSE) -if (NOT EXTENSION_CONFIG_BUILD AND NOT ${EXTENSION_TESTS_ONLY} AND NOT CLANG_TIDY) +if (NOT ${EXTENSION_CONFIG_BUILD} AND NOT ${EXTENSION_TESTS_ONLY} AND NOT CLANG_TIDY) if (NOT Python3_FOUND) add_custom_target( duckdb_local_extension_repo ALL) diff --git a/Makefile b/Makefile index f0bd8b793c4a..fb6adb1d094d 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,10 @@ endif ifeq (${STATIC_LIBCPP}, 1) STATIC_LIBCPP=-DSTATIC_LIBCPP=TRUE endif +GIT_BASE_BRANCH:=main +ifneq (${DUCKDB_GIT_BASE_BRANCH}, ) + GIT_BASE_BRANCH:=${DUCKDB_GIT_BASE_BRANCH} +endif COMMON_CMAKE_VARS ?= CMAKE_VARS_BUILD ?= @@ -149,11 +153,11 @@ ifdef CORE_EXTENSIONS BUILD_EXTENSIONS:=${BUILD_EXTENSIONS};${CORE_EXTENSIONS} endif ifeq (${BUILD_ALL_EXT}, 1) - CMAKE_VARS:=${CMAKE_VARS} -DDUCKDB_EXTENSION_CONFIGS=".github/config/in_tree_extensions.cmake;.github/config/out_of_tree_extensions.cmake;.github/config/rust_based_extensions.cmake" + CMAKE_VARS:=${CMAKE_VARS} -DDUCKDB_EXTENSION_CONFIGS=".github/config/in_tree_extensions.cmake;.github/config/out_of_tree_extensions.cmake;.github/config/external_extensions.cmake;.github/config/rust_based_extensions.cmake" else ifeq (${BUILD_ALL_IT_EXT}, 1) CMAKE_VARS:=${CMAKE_VARS} -DDUCKDB_EXTENSION_CONFIGS=".github/config/in_tree_extensions.cmake" else ifeq (${BUILD_ALL_OOT_EXT}, 1) - CMAKE_VARS:=${CMAKE_VARS} -DDUCKDB_EXTENSION_CONFIGS=".github/config/out_of_tree_extensions.cmake" + CMAKE_VARS:=${CMAKE_VARS} -DDUCKDB_EXTENSION_CONFIGS=".github/config/out_of_tree_extensions.cmake;.github/config/external_extensions.cmake;" endif ifeq (${STATIC_OPENSSL}, 1) CMAKE_VARS:=${CMAKE_VARS} -DOPENSSL_USE_STATIC_LIBS=1 @@ -236,9 +240,6 @@ endif ifeq (${LATEST_STORAGE}, 1) CMAKE_VARS:=${CMAKE_VARS} -DLATEST_STORAGE=1 endif -ifeq (${BLOCK_VERIFICATION}, 1) - CMAKE_VARS:=${CMAKE_VARS} -DBLOCK_VERIFICATION=1 -endif ifneq (${DISABLE_CPP_UNITTESTS}, ) CMAKE_VARS:=${CMAKE_VARS} -DENABLE_UNITTEST_CPP_TESTS=0 endif @@ -315,7 +316,7 @@ ifeq (${EXPORT_DYNAMIC_SYMBOLS}, 1) CMAKE_VARS:=${CMAKE_VARS} -DEXPORT_DYNAMIC_SYMBOLS=1 endif ifneq ("${CMAKE_LLVM_PATH}", "") - CMAKE_VARS:=${CMAKE_VARS} -DCMAKE_RANLIB='${CMAKE_LLVM_PATH}/bin/llvm-ranlib' -DCMAKE_AR='${CMAKE_LLVM_PATH}/bin/llvm-ar' -DCMAKE_CXX_COMPILER='${CMAKE_LLVM_PATH}/bin/clang++' -DCMAKE_C_COMPILER='${CMAKE_LLVM_PATH}/bin/clang' + CMAKE_VARS:=${CMAKE_VARS} -DCMAKE_RANLIB='${CMAKE_LLVM_PATH}/bin/llvm-ranlib' -DCMAKE_AR='${CMAKE_LLVM_PATH}/bin/llvm-ar' -DCMAKE_CXX_COMPILER='${CMAKE_LLVM_PATH}/bin/clang++' -DCMAKE_C_COMPILER='${CMAKE_LLVM_PATH}/bin/clang' -DCMAKE_EXE_LINKER_FLAGS_INIT='-L${CMAKE_LLVM_PATH}/lib -L${CMAKE_LLVM_PATH}/lib/c++' -DCMAKE_SHARED_LINKER_FLAGS_INIT='-L${CMAKE_LLVM_PATH}/lib -L${CMAKE_LLVM_PATH}/lib/c++' -DCMAKE_MODULE_LINKER_FLAGS_INIT='-L${CMAKE_LLVM_PATH}/lib -L${CMAKE_LLVM_PATH}/lib/c++' endif CMAKE_VARS:=${CMAKE_VARS} ${COMMON_CMAKE_VARS} @@ -441,7 +442,7 @@ tidy-check-diff: cd build/tidy && \ cmake -DCLANG_TIDY=1 -DDISABLE_UNITY=1 -DBUILD_EXTENSIONS=parquet -DBUILD_SHELL=0 ../.. && \ cd ../../ && \ - git diff origin/main . ':(exclude)tools' ':(exclude)extension' ':(exclude)test' ':(exclude)benchmark' ':(exclude)third_party' ':(exclude)src/common/adbc' ':(exclude)src/main/capi' | $(PYTHON) scripts/clang-tidy-diff.py -path build/tidy -quiet ${TIDY_THREAD_PARAMETER} ${TIDY_BINARY_PARAMETER} ${TIDY_PERFORM_CHECKS} -p1 + git diff origin/${GIT_BASE_BRANCH} . ':(exclude)tools' ':(exclude)extension' ':(exclude)test' ':(exclude)benchmark' ':(exclude)third_party' ':(exclude)src/common/adbc' ':(exclude)src/main/capi' | $(PYTHON) scripts/clang-tidy-diff.py -path build/tidy -quiet ${TIDY_THREAD_PARAMETER} ${TIDY_BINARY_PARAMETER} ${TIDY_PERFORM_CHECKS} -p1 tidy-fix: mkdir -p ./build/tidy && \ diff --git a/data/19578.csv b/data/19578.csv new file mode 100644 index 000000000000..f2fba2260688 --- /dev/null +++ b/data/19578.csv @@ -0,0 +1,2 @@ +Country;Location;Count;Date +China;"35.86166;104.19397";97100,7;04/07/20 \ No newline at end of file diff --git a/data/attach_test/attach.db b/data/attach_test/attach.db new file mode 100644 index 000000000000..b0f1e9174683 Binary files /dev/null and b/data/attach_test/attach.db differ diff --git a/data/attach_test/encrypted_ctr_key=abcde.db b/data/attach_test/encrypted_ctr_key=abcde.db new file mode 100644 index 000000000000..e3853a47fd37 Binary files /dev/null and b/data/attach_test/encrypted_ctr_key=abcde.db differ diff --git a/data/attach_test/encrypted_gcm_key=abcde.db b/data/attach_test/encrypted_gcm_key=abcde.db new file mode 100644 index 000000000000..77dd1f4353c2 Binary files /dev/null and b/data/attach_test/encrypted_gcm_key=abcde.db differ diff --git a/extension/core_functions/aggregate/holistic/approximate_quantile.cpp b/extension/core_functions/aggregate/holistic/approximate_quantile.cpp index 35336383b264..9aa5e94064fd 100644 --- a/extension/core_functions/aggregate/holistic/approximate_quantile.cpp +++ b/extension/core_functions/aggregate/holistic/approximate_quantile.cpp @@ -355,11 +355,11 @@ AggregateFunction GetApproxQuantileListAggregateFunction(const LogicalType &type return GetTypedApproxQuantileListAggregateFunction(type); case LogicalTypeId::INTEGER: case LogicalTypeId::DATE: - case LogicalTypeId::TIME: return GetTypedApproxQuantileListAggregateFunction(type); case LogicalTypeId::BIGINT: case LogicalTypeId::TIMESTAMP: case LogicalTypeId::TIMESTAMP_TZ: + case LogicalTypeId::TIME: return GetTypedApproxQuantileListAggregateFunction(type); case LogicalTypeId::TIME_TZ: // Not binary comparable diff --git a/extension/extension_config.cmake b/extension/extension_config.cmake index a8eab7443aa5..5f957f4000b2 100644 --- a/extension/extension_config.cmake +++ b/extension/extension_config.cmake @@ -14,6 +14,6 @@ duckdb_extension_load(parquet) # The Linux allocator has issues so we use jemalloc, but only on x86 because page sizes are fixed at 4KB. # Configuring jemalloc properly for 32bit is a hassle, and not worth it so we only enable on 64bit -if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT FORCE_32_BIT AND OS_NAME STREQUAL "linux" AND NOT WASM_LOADABLE_EXTENSIONS AND NOT CLANG_TIDY AND NOT ANDROID AND NOT ZOS AND NOT ${WASM_ENABLED} AND NOT ${MUSL_ENABLED}) +if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT FORCE_32_BIT AND OS_NAME STREQUAL "linux" AND NOT WASM_LOADABLE_EXTENSIONS AND NOT CLANG_TIDY AND NOT ANDROID AND NOT ZOS AND NOT BSD AND NOT ${WASM_ENABLED} AND NOT ${MUSL_ENABLED}) duckdb_extension_load(jemalloc) endif() diff --git a/extension/icu/icu_extension.cpp b/extension/icu/icu_extension.cpp index 006283576005..c18b75efd785 100644 --- a/extension/icu/icu_extension.cpp +++ b/extension/icu/icu_extension.cpp @@ -5,11 +5,8 @@ #include "duckdb/function/scalar_function.hpp" #include "duckdb/main/config.hpp" #include "duckdb/main/connection.hpp" -#include "duckdb/main/database.hpp" #include "duckdb/main/extension/extension_loader.hpp" #include "duckdb/parser/parsed_data/create_collation_info.hpp" -#include "duckdb/parser/parsed_data/create_scalar_function_info.hpp" -#include "duckdb/parser/parsed_data/create_table_function_info.hpp" #include "duckdb/planner/expression/bound_function_expression.hpp" #include "include/icu-current.hpp" #include "include/icu-dateadd.hpp" @@ -25,8 +22,6 @@ #include "include/icu_extension.hpp" #include "unicode/calendar.h" #include "unicode/coll.h" -#include "unicode/errorcode.h" -#include "unicode/sortkey.h" #include "unicode/stringpiece.h" #include "unicode/timezone.h" #include "unicode/ucol.h" @@ -209,7 +204,7 @@ static ScalarFunction GetICUCollateFunction(const string &collation, const strin return result; } -unique_ptr GetTimeZoneInternal(string &tz_str, vector &candidates) { +unique_ptr GetKnownTimeZone(const string &tz_str) { icu::StringPiece tz_name_utf8(tz_str); const auto uid = icu::UnicodeString::fromUTF8(tz_name_utf8); duckdb::unique_ptr tz(icu::TimeZone::createTimeZone(uid)); @@ -217,6 +212,74 @@ unique_ptr GetTimeZoneInternal(string &tz_str, vector &ca return tz; } + return nullptr; +} + +static string NormalizeTimeZone(const string &tz_str) { + if (GetKnownTimeZone(tz_str)) { + return tz_str; + } + + // Map UTC±NN00 to Etc/UTC±N + do { + if (tz_str.size() <= 4) { + break; + } + if (tz_str.compare(0, 3, "UTC")) { + break; + } + + idx_t pos = 3; + const auto utc = tz_str[pos++]; + // Invert the sign (UTC and Etc use opposite sign conventions) + // https://en.wikipedia.org/wiki/Tz_database#Area + auto sign = utc; + if (utc == '+') { + sign = '-'; + ; + } else if (utc == '-') { + sign = '+'; + } else { + break; + } + + string mapped = "Etc/GMT"; + mapped += sign; + const auto base_len = mapped.size(); + for (; pos < tz_str.size(); ++pos) { + const auto digit = tz_str[pos]; + // We could get fancy here and count colons and their locations, but I doubt anyone cares. + if (digit == '0' || digit == ':') { + continue; + } + if (!StringUtil::CharacterIsDigit(digit)) { + break; + } + mapped += digit; + } + if (pos < tz_str.size()) { + break; + } + // If we didn't add anything, then make it +0 + if (mapped.size() == base_len) { + mapped.back() = '+'; + mapped += '0'; + } + // Final sanity check + if (GetKnownTimeZone(mapped)) { + return mapped; + } + } while (false); + + return tz_str; +} + +unique_ptr GetTimeZoneInternal(string &tz_str, vector &candidates) { + auto tz = GetKnownTimeZone(tz_str); + if (tz) { + return tz; + } + // Try to be friendlier // Go through all the zone names and look for a case insensitive match // If we don't find one, make a suggestion @@ -269,6 +332,7 @@ unique_ptr ICUHelpers::GetTimeZone(string &tz_str, string *error_ static void SetICUTimeZone(ClientContext &context, SetScope scope, Value ¶meter) { auto tz_str = StringValue::Get(parameter); + tz_str = NormalizeTimeZone(tz_str); ICUHelpers::GetTimeZone(tz_str); parameter = Value(tz_str); } @@ -368,12 +432,13 @@ static void LoadInternal(ExtensionLoader &loader) { auto locales = icu::Collator::getAvailableLocales(count); for (int32_t i = 0; i < count; i++) { string collation; - if (string(locales[i].getCountry()).empty()) { + const auto &locale = locales[i]; // NOLINT + if (string(locale.getCountry()).empty()) { // language only - collation = locales[i].getLanguage(); + collation = locale.getLanguage(); } else { // language + country - collation = locales[i].getLanguage() + string("_") + locales[i].getCountry(); + collation = locale.getLanguage() + string("_") + locale.getCountry(); } collation = StringUtil::Lower(collation); @@ -405,6 +470,11 @@ static void LoadInternal(ExtensionLoader &loader) { icu::UnicodeString tz_id; std::string tz_string; tz->getID(tz_id).toUTF8String(tz_string); + // If the environment TZ is invalid, look for some alternatives + tz_string = NormalizeTimeZone(tz_string); + if (!GetKnownTimeZone(tz_string)) { + tz_string = "UTC"; + } config.AddExtensionOption("TimeZone", "The current time zone", LogicalType::VARCHAR, Value(tz_string), SetICUTimeZone); diff --git a/extension/json/include/json_serializer.hpp b/extension/json/include/json_serializer.hpp index aa17f3ffd16b..e856bff798fb 100644 --- a/extension/json/include/json_serializer.hpp +++ b/extension/json/include/json_serializer.hpp @@ -39,6 +39,18 @@ struct JsonSerializer : Serializer { return serializer.GetRootObject(); } + template + static string SerializeToString(T &value) { + auto doc = yyjson_mut_doc_new(nullptr); + JsonSerializer serializer(doc, false, false, false); + value.Serialize(serializer); + auto result_obj = serializer.GetRootObject(); + idx_t len = 0; + auto data = yyjson_mut_val_write_opts(result_obj, JSONCommon::WRITE_PRETTY_FLAG, nullptr, + reinterpret_cast(&len), nullptr); + return string(data, len); + } + yyjson_mut_val *GetRootObject() { D_ASSERT(stack.size() == 1); // or we forgot to pop somewhere return stack.front(); diff --git a/extension/parquet/column_writer.cpp b/extension/parquet/column_writer.cpp index 7983b6a239e0..63fc1cd57eb9 100644 --- a/extension/parquet/column_writer.cpp +++ b/extension/parquet/column_writer.cpp @@ -534,10 +534,10 @@ ColumnWriter::CreateWriterRecursive(ClientContext &context, ParquetWriter &write template <> struct NumericLimits { static constexpr float Minimum() { - return std::numeric_limits::lowest(); + return NumericLimits::Minimum(); }; static constexpr float Maximum() { - return std::numeric_limits::max(); + return NumericLimits::Maximum(); }; static constexpr bool IsSigned() { return std::is_signed::value; @@ -550,10 +550,10 @@ struct NumericLimits { template <> struct NumericLimits { static constexpr double Minimum() { - return std::numeric_limits::lowest(); + return NumericLimits::Minimum(); }; static constexpr double Maximum() { - return std::numeric_limits::max(); + return NumericLimits::Maximum(); }; static constexpr bool IsSigned() { return std::is_signed::value; diff --git a/extension/parquet/decoder/delta_length_byte_array_decoder.cpp b/extension/parquet/decoder/delta_length_byte_array_decoder.cpp index 9a0c1eac5452..a2fd7abd9a56 100644 --- a/extension/parquet/decoder/delta_length_byte_array_decoder.cpp +++ b/extension/parquet/decoder/delta_length_byte_array_decoder.cpp @@ -34,13 +34,21 @@ void DeltaLengthByteArrayDecoder::InitializePage() { void DeltaLengthByteArrayDecoder::Read(shared_ptr &block_ref, uint8_t *defines, idx_t read_count, Vector &result, idx_t result_offset) { if (defines) { - ReadInternal(block_ref, defines, read_count, result, result_offset); + if (reader.Type().IsJSONType()) { + ReadInternal(block_ref, defines, read_count, result, result_offset); + } else { + ReadInternal(block_ref, defines, read_count, result, result_offset); + } } else { - ReadInternal(block_ref, defines, read_count, result, result_offset); + if (reader.Type().IsJSONType()) { + ReadInternal(block_ref, defines, read_count, result, result_offset); + } else { + ReadInternal(block_ref, defines, read_count, result, result_offset); + } } } -template +template void DeltaLengthByteArrayDecoder::ReadInternal(shared_ptr &block_ref, uint8_t *const defines, const idx_t read_count, Vector &result, const idx_t result_offset) { auto &block = *block_ref; @@ -58,6 +66,8 @@ void DeltaLengthByteArrayDecoder::ReadInternal(shared_ptr &blo } } + const auto &string_column_reader = reader.Cast(); + const auto start_ptr = block.ptr; for (idx_t row_idx = 0; row_idx < read_count; row_idx++) { const auto result_idx = result_offset + row_idx; @@ -75,11 +85,15 @@ void DeltaLengthByteArrayDecoder::ReadInternal(shared_ptr &blo } const auto &str_len = length_data[length_idx++]; result_data[result_idx] = string_t(char_ptr_cast(block.ptr), str_len); + if (VALIDATE_INDIVIDUAL_STRINGS) { + string_column_reader.VerifyString(char_ptr_cast(block.ptr), str_len); + } block.unsafe_inc(str_len); } - // Verify that the strings we read are valid UTF-8 - reader.Cast().VerifyString(char_ptr_cast(start_ptr), block.ptr - start_ptr); + if (!VALIDATE_INDIVIDUAL_STRINGS) { + string_column_reader.VerifyString(char_ptr_cast(start_ptr), NumericCast(block.ptr - start_ptr)); + } StringColumnReader::ReferenceBlock(result, block_ref); } diff --git a/extension/parquet/include/decoder/delta_length_byte_array_decoder.hpp b/extension/parquet/include/decoder/delta_length_byte_array_decoder.hpp index f8141e26e35b..9f304da255e8 100644 --- a/extension/parquet/include/decoder/delta_length_byte_array_decoder.hpp +++ b/extension/parquet/include/decoder/delta_length_byte_array_decoder.hpp @@ -27,7 +27,7 @@ class DeltaLengthByteArrayDecoder { void Skip(uint8_t *defines, idx_t skip_count); private: - template + template void ReadInternal(shared_ptr &block, uint8_t *defines, idx_t read_count, Vector &result, idx_t result_offset); template diff --git a/extension/parquet/include/reader/string_column_reader.hpp b/extension/parquet/include/reader/string_column_reader.hpp index bfc0692af873..d0d18b80cde0 100644 --- a/extension/parquet/include/reader/string_column_reader.hpp +++ b/extension/parquet/include/reader/string_column_reader.hpp @@ -14,6 +14,7 @@ namespace duckdb { class StringColumnReader : public ColumnReader { +public: enum class StringColumnType : uint8_t { VARCHAR, JSON, OTHER }; static StringColumnType GetStringColumnType(const LogicalType &type) { @@ -36,7 +37,7 @@ class StringColumnReader : public ColumnReader { public: static void VerifyString(const char *str_data, uint32_t str_len, const bool isVarchar); - void VerifyString(const char *str_data, uint32_t str_len); + void VerifyString(const char *str_data, uint32_t str_len) const; static void ReferenceBlock(Vector &result, shared_ptr &block); diff --git a/extension/parquet/include/writer/templated_column_writer.hpp b/extension/parquet/include/writer/templated_column_writer.hpp index c035bba43a0c..0ed8543d34be 100644 --- a/extension/parquet/include/writer/templated_column_writer.hpp +++ b/extension/parquet/include/writer/templated_column_writer.hpp @@ -126,7 +126,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { public: unique_ptr InitializeWriteState(duckdb_parquet::RowGroup &row_group) override { auto result = make_uniq>(writer, row_group, row_group.columns.size()); - result->encoding = duckdb_parquet::Encoding::RLE_DICTIONARY; + result->encoding = writer.GetParquetVersion() == ParquetVersion::V1 ? duckdb_parquet::Encoding::PLAIN_DICTIONARY + : duckdb_parquet::Encoding::RLE_DICTIONARY; RegisterToRowGroup(row_group); return std::move(result); } @@ -150,6 +151,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { } page_state.dbp_encoder.FinishWrite(temp_writer); break; + case duckdb_parquet::Encoding::PLAIN_DICTIONARY: + // PLAIN_DICTIONARY can be treated the same as RLE_DICTIONARY case duckdb_parquet::Encoding::RLE_DICTIONARY: D_ASSERT(page_state.dict_bit_width != 0); if (!page_state.dict_written_value) { @@ -265,7 +268,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { bool HasDictionary(PrimitiveColumnWriterState &state_p) override { auto &state = state_p.Cast>(); - return state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY; + return state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY || + state.encoding == duckdb_parquet::Encoding::PLAIN_DICTIONARY; } idx_t DictionarySize(PrimitiveColumnWriterState &state_p) override { @@ -285,7 +289,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { void FlushDictionary(PrimitiveColumnWriterState &state_p, ColumnWriterStatistics *stats) override { auto &state = state_p.Cast>(); - D_ASSERT(state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY); + D_ASSERT(state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY || + state.encoding == duckdb_parquet::Encoding::PLAIN_DICTIONARY); if (writer.EnableBloomFilters()) { state.bloom_filter = @@ -310,7 +315,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { idx_t GetRowSize(const Vector &vector, const idx_t index, const PrimitiveColumnWriterState &state_p) const override { auto &state = state_p.Cast>(); - if (state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY) { + if (state.encoding == duckdb_parquet::Encoding::RLE_DICTIONARY || + state.encoding == duckdb_parquet::Encoding::PLAIN_DICTIONARY) { return (state.key_bit_width + 7) / 8; } else { return OP::template GetRowSize(vector, index); @@ -328,6 +334,8 @@ class StandardColumnWriter : public PrimitiveColumnWriter { const auto *data_ptr = FlatVector::GetData(input_column); switch (page_state.encoding) { + case duckdb_parquet::Encoding::PLAIN_DICTIONARY: + // PLAIN_DICTIONARY can be treated the same as RLE_DICTIONARY case duckdb_parquet::Encoding::RLE_DICTIONARY: { idx_t r = chunk_start; if (!page_state.dict_written_value) { diff --git a/extension/parquet/parquet_writer.cpp b/extension/parquet/parquet_writer.cpp index fe072f7ccfae..3c6d11d9ad1a 100644 --- a/extension/parquet/parquet_writer.cpp +++ b/extension/parquet/parquet_writer.cpp @@ -12,6 +12,7 @@ #include "duckdb/common/serializer/write_stream.hpp" #include "duckdb/common/string_util.hpp" #include "duckdb/function/table_function.hpp" +#include "duckdb/main/extension_helper.hpp" #include "duckdb/main/client_context.hpp" #include "duckdb/main/connection.hpp" #include "duckdb/parser/parsed_data/create_copy_function_info.hpp" @@ -374,6 +375,12 @@ ParquetWriter::ParquetWriter(ClientContext &context, FileSystem &fs, string file if (encryption_config) { auto &config = DBConfig::GetConfig(context); + + // To ensure we can write, we need to autoload httpfs + if (!config.encryption_util || !config.encryption_util->SupportsEncryption()) { + ExtensionHelper::TryAutoLoadExtension(context, "httpfs"); + } + if (config.encryption_util && debug_use_openssl) { // Use OpenSSL encryption_util = config.encryption_util; @@ -562,7 +569,7 @@ void ParquetWriter::FlushRowGroup(PreparedRowGroup &prepared) { row_group.__isset.total_compressed_size = true; if (encryption_config) { - auto row_group_ordinal = num_row_groups.load(); + const auto row_group_ordinal = file_meta_data.row_groups.size(); if (row_group_ordinal > std::numeric_limits::max()) { throw InvalidInputException("RowGroup ordinal exceeds 32767 when encryption enabled"); } @@ -583,6 +590,14 @@ void ParquetWriter::Flush(ColumnDataCollection &buffer) { return; } + // "total_written" is only used for the FILE_SIZE_BYTES flag, and only when threads are writing in parallel. + // We pre-emptively increase it here to try to reduce overshooting when many threads are writing in parallel. + // However, waiting for the exact value (PrepareRowGroup) takes too long, and would cause overshoots to happen. + // So, we guess the compression ratio. We guess 3x, but this will be off depending on the data. + // "total_written" is restored to the exact number of written bytes at the end of FlushRowGroup. + // PhysicalCopyToFile should be reworked to use prepare/flush batch separately for better accuracy. + total_written += buffer.SizeInBytes() / 2; + PreparedRowGroup prepared_row_group; PrepareRowGroup(buffer, prepared_row_group); buffer.Reset(); diff --git a/extension/parquet/reader/string_column_reader.cpp b/extension/parquet/reader/string_column_reader.cpp index 867dbb4d890c..019abd71a480 100644 --- a/extension/parquet/reader/string_column_reader.cpp +++ b/extension/parquet/reader/string_column_reader.cpp @@ -31,7 +31,7 @@ void StringColumnReader::VerifyString(const char *str_data, uint32_t str_len, co } } -void StringColumnReader::VerifyString(const char *str_data, uint32_t str_len) { +void StringColumnReader::VerifyString(const char *str_data, uint32_t str_len) const { switch (string_column_type) { case StringColumnType::VARCHAR: VerifyString(str_data, str_len, true); diff --git a/scripts/extension-upload-repository.sh b/scripts/extension-upload-repository.sh index aa90338d361e..687ae98778d6 100755 --- a/scripts/extension-upload-repository.sh +++ b/scripts/extension-upload-repository.sh @@ -15,7 +15,11 @@ else BASE_DIR="$1" fi -echo $BASE_DIR +if [ -z "$2" ]; then + TARGET_BUCKET="duckdb-core-extensions" +else + TARGET_BUCKET="$2" +fi set -e @@ -23,9 +27,9 @@ set -e shopt -s nullglob if [ "$DUCKDB_DEPLOY_SCRIPT_MODE" == "for_real" ]; then - echo "Deploying extensions.." + echo "Deploying extensions from '$BASE_DIR' to bucket '$TARGET_BUCKET' .." else - echo "Deploying extensions.. (DRY RUN)" + echo "Deploying extensions from '$BASE_DIR' to bucket '$TARGET_BUCKET'.. (DRY RUN)" fi for version_dir in $BASE_DIR/*; do @@ -38,6 +42,8 @@ for version_dir in $BASE_DIR/*; do FILES="$arch_dir/*.duckdb_extension" fi + echo "" + for f in $FILES; do if [[ $architecture == wasm* ]]; then ext_name=`basename $f .duckdb_extension.wasm` @@ -48,7 +54,9 @@ for version_dir in $BASE_DIR/*; do echo "Processing extension: $ext_name (architecture: $architecture, version: $duckdb_version, path: $f)" # args: [] - $script_dir/extension-upload-single.sh $ext_name "" "$duckdb_version" "$architecture" "duckdb-core-extensions" true false "$(dirname "$f")" + $script_dir/extension-upload-single.sh $ext_name "" "$duckdb_version" "$architecture" "$TARGET_BUCKET" true false "$(dirname "$f")" + + echo "" done echo "" done diff --git a/scripts/extension-upload-single.sh b/scripts/extension-upload-single.sh index 30f5c0c673a7..d3d0b15fc12c 100755 --- a/scripts/extension-upload-single.sh +++ b/scripts/extension-upload-single.sh @@ -48,6 +48,7 @@ fi # append signature to extension binary cat $ext.sign >> $ext.append +rm $ext.sign # compress extension binary if [[ $4 == wasm_* ]]; then @@ -55,12 +56,14 @@ if [[ $4 == wasm_* ]]; then else gzip < $ext.append > "$ext.compressed" fi +rm $ext.append set -e # Abort if AWS key is not set if [ -z "$AWS_ACCESS_KEY_ID" ]; then echo "No AWS key found, skipping.." + rm "$ext.compressed" exit 0 fi @@ -74,6 +77,7 @@ fi if [[ $7 = 'true' ]]; then if [ -z "$3" ]; then echo "extension-upload-single.sh called with upload_versioned=true but no extension version was passed" + rm "$ext.compressed" exit 1 fi @@ -92,3 +96,6 @@ if [[ $6 = 'true' ]]; then aws s3 cp $ext.compressed s3://$5/$3/$4/$1.duckdb_extension.gz $DRY_RUN_PARAM --acl public-read fi fi + +# clean up +rm "$ext.compressed" diff --git a/scripts/generate_enum_util.py b/scripts/generate_enum_util.py index 6bae0889f573..74c91537eb61 100644 --- a/scripts/generate_enum_util.py +++ b/scripts/generate_enum_util.py @@ -15,6 +15,7 @@ "DictionaryAppendState", "DictFSSTMode", "ComplexJSONType", + "UnavailableReason", ] enum_util_header_file = os.path.join("..", "src", "include", "duckdb", "common", "enum_util.hpp") diff --git a/scripts/regression/test_runner.py b/scripts/regression/test_runner.py index abaf3cd35338..c374596013b5 100644 --- a/scripts/regression/test_runner.py +++ b/scripts/regression/test_runner.py @@ -45,6 +45,9 @@ def geomean(xs): parser.add_argument("--max-timeout", type=int, default=3600, help="Set maximum timeout in seconds (default: 3600).") parser.add_argument("--root-dir", type=str, default="", help="Root directory.") parser.add_argument("--no-summary", type=str, default=False, help="No summary in the end.") +parser.add_argument( + "--clear-benchmark-cache", action="store_true", help="Clear benchmark caches prior to running", default=False +) parser.add_argument( "--regression-threshold-seconds", type=float, @@ -85,6 +88,18 @@ def geomean(xs): print(f"Failed to find new runner {new_runner_path}") exit(1) +if args.clear_benchmark_cache: + old_cache_path = os.path.join(os.path.dirname(old_runner_path), '..', '..', '..', 'duckdb_benchmark_data') + new_cache_path = os.path.join(os.path.dirname(new_runner_path), '..', '..', '..', 'duckdb_benchmark_data') + try: + shutil.rmtree(old_cache_path) + except: + pass + try: + shutil.rmtree(new_cache_path) + except: + pass + config_dict = vars(args) old_runner = BenchmarkRunner(BenchmarkRunnerConfig.from_params(old_runner_path, benchmark_file, **config_dict)) new_runner = BenchmarkRunner(BenchmarkRunnerConfig.from_params(new_runner_path, benchmark_file, **config_dict)) diff --git a/scripts/test_config_compare.py b/scripts/test_config_compare.py new file mode 100644 index 000000000000..220f1c6698ec --- /dev/null +++ b/scripts/test_config_compare.py @@ -0,0 +1,53 @@ +import json +from collections import defaultdict +import sys + + +def load_skip_dict(path): + """Load a skip_tests list into a dict: reason -> set(paths).""" + with open(path) as f: + data = json.load(f) + + out = {} + for block in data.get("skip_tests", []): + reason = block["reason"] + paths = set(block.get("paths", [])) + out[reason] = paths + return out + + +def compare_files(file_a, file_b): + a = load_skip_dict(file_a) + b = load_skip_dict(file_b) + + all_reasons = set(a.keys()) | set(b.keys()) + + for reason in sorted(all_reasons): + paths_a = a.get(reason, set()) + paths_b = b.get(reason, set()) + + added = sorted(paths_b - paths_a) + removed = sorted(paths_a - paths_b) + + if not added and not removed: + continue + + print(f"\n=== Reason: {reason} ===") + + if removed: + print(" - Present in A but NOT in B:") + for p in removed: + print(f" {p}") + + if added: + print(" + Present in B but NOT in A:") + for p in added: + print(f" {p}") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: python compare_skip_tests.py ") + exit(1) + + compare_files(sys.argv[1], sys.argv[2]) diff --git a/scripts/test_storage_compatibility.py b/scripts/test_storage_compatibility.py new file mode 100644 index 000000000000..86e4cf0bc9dc --- /dev/null +++ b/scripts/test_storage_compatibility.py @@ -0,0 +1,229 @@ +import argparse +import os +import subprocess +import re +import csv +from pathlib import Path + +parser = argparse.ArgumentParser(description='Run a full benchmark using the CLI and report the results.') +group = parser.add_mutually_exclusive_group(required=True) +group.add_argument('--old-cli', action='store', help='Path to the CLI of the old DuckDB version to test') +group.add_argument('--versions', type=str, action='store', help='DuckDB versions to test') +parser.add_argument('--new-unittest', action='store', help='Path to the new unittester to run', required=True) +parser.add_argument('--new-cli', action='store', help='Path to the new unittester to run', default=None) +parser.add_argument('--compatibility', action='store', help='Storage compatibility version', default='v1.0.0') +parser.add_argument( + '--test-config', action='store', help='Test config script to run', default='test/configs/storage_compatibility.json' +) +parser.add_argument('--db-name', action='store', help='Database name to write to', default='bwc_storage_test.db') +parser.add_argument('--abort-on-failure', action='store_true', help='Abort on first failure', default=False) +parser.add_argument('--start-offset', type=int, action='store', help='Test start offset', default=None) +parser.add_argument('--end-offset', type=int, action='store', help='Test end offset', default=None) +parser.add_argument('--no-summarize-failures', action='store_true', help='Skip failure summary', default=False) +parser.add_argument('--list-versions', action='store_true', help='Only list versions to test', default=False) +parser.add_argument( + '--run-empty-tests', + action='store_true', + help='Run tests that don' 't have a CREATE TABLE or CREATE VIEW statement', + default=False, +) + +args, extra_args = parser.parse_known_args() + +programs_to_test = [] +if args.versions is not None: + version_splits = args.versions.split('|') + for version in version_splits: + cli_path = os.path.join(Path.home(), '.duckdb', 'cli', version, 'duckdb') + if not os.path.isfile(cli_path): + os.system(f'curl https://install.duckdb.org | DUCKDB_VERSION={version} sh') + programs_to_test.append(cli_path) +else: + programs_to_test.append(args.old_cli) + +unittest_program = args.new_unittest +db_name = args.db_name +new_cli = args.new_unittest.replace('test/unittest', 'duckdb') if args.new_cli is None else args.new_cli +summarize_failures = not args.no_summarize_failures + +# Use the '-l' parameter to output the list of tests to run +proc = subprocess.run( + [unittest_program, '--test-config', args.test_config, '-l'] + extra_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, +) +stdout = proc.stdout.decode('utf8').strip() +stderr = proc.stderr.decode('utf8').strip() +if len(stderr) > 0: + print("Failed to run program " + unittest_program) + print("Returncode:", proc.returncode) + print(stdout) + print(stderr) + exit(1) + + +# The output is in the format of 'PATH\tGROUP', we're only interested in the PATH portion +test_cases = [] +first_line = True +for line in stdout.splitlines(): + if first_line: + first_line = False + continue + if len(line.strip()) == 0: + continue + splits = line.rsplit('\t', 1) + test_cases.append(splits[0]) + +test_cases.sort() +if args.compatibility != 'v1.0.0': + raise Exception("Only v1.0.0 is supported for now (FIXME)") + + +def escape_cmd_arg(arg): + if '"' in arg or '\'' in arg or ' ' in arg or '\\' in arg: + arg = arg.replace('\\', '\\\\') + arg = arg.replace('"', '\\"') + arg = arg.replace("'", "\\'") + return f'"{arg}"' + return arg + + +error_container = [] + + +def handle_failure(test, cmd, msg, stdout, stderr, returncode): + print(f"==============FAILURE============") + print(test) + print(f"==============MESSAGE============") + print(msg) + print(f"==============COMMAND============") + cmd_str = '' + for entry in cmd: + cmd_str += escape_cmd_arg(entry) + ' ' + print(cmd_str.strip()) + print(f"==============RETURNCODE=========") + print(str(returncode)) + print(f"==============STDOUT=============") + print(stdout) + print(f"==============STDERR=============") + print(stderr) + print(f"=================================") + if args.abort_on_failure: + exit(1) + else: + error_container.append({'test': test, 'stderr': stderr}) + + +def run_program(cmd, description): + proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout = proc.stdout.decode('utf8').strip() + stderr = proc.stderr.decode('utf8').strip() + if proc.returncode != 0: + return { + 'test': test, + 'cmd': cmd, + 'msg': f'Failed to {description}', + 'stdout': stdout, + 'stderr': stderr, + 'returncode': proc.returncode, + } + return None + + +def try_run_program(cmd, description): + result = run_program(cmd, description) + if result is None: + return True + handle_failure(**result) + return False + + +index = 0 +start = 0 if args.start_offset is None else args.start_offset +end = len(test_cases) if args.end_offset is None else args.end_offset +for i in range(start, end): + test = test_cases[i] + skipped = '' + if not args.run_empty_tests: + with open(test, 'r') as f: + test_contents = f.read().lower() + if 'create table' not in test_contents and 'create view' not in test_contents: + skipped = ' (SKIPPED)' + + print(f'[{i}/{len(test_cases)}]: {test}{skipped}') + if skipped != '': + continue + # remove the old db + try: + os.remove(db_name) + except: + pass + cmd = [unittest_program, '--test-config', args.test_config, test] + if not try_run_program(cmd, 'Run Test'): + continue + + if not os.path.isfile(db_name): + # db not created + continue + + cmd = [ + programs_to_test[-1], + db_name, + '-c', + '.headers off', + '-csv', + '-c', + '.output table_list.csv', + '-c', + 'SHOW ALL TABLES', + ] + if not try_run_program(cmd, 'List Tables'): + continue + + tables = [] + with open('table_list.csv', newline='') as f: + reader = csv.reader(f) + for row in reader: + tables.append((row[1], row[2])) + # no tables / views + if len(tables) == 0: + continue + + # read all tables / views + failures = [] + for cli in programs_to_test: + cmd = [cli, db_name] + for table in tables: + schema_name = table[0].replace('"', '""') + table_name = table[1].replace('"', '""') + cmd += ['-c', f'FROM "{schema_name}"."{table_name}"'] + failure = run_program(cmd, 'Query Tables') + if failure is not None: + failures.append(failure) + if len(failures) > 0: + # we failed to query the tables + # this MIGHT be expected - e.g. we might have views that reference stale state (e.g. files that are deleted) + # try to run it with the new CLI - if this succeeds we have a problem + new_cmd = [new_cli] + cmd[1:] + new_failure = run_program(new_cmd, 'Query Tables (New)') + if new_failure is None: + # we succeeded with the new CLI - report the failure + for failure in failures: + handle_failure(**failure) + continue + +if len(error_container) == 0: + exit(0) + +if summarize_failures: + print( + '''\n\n==================================================== +================ FAILURES SUMMARY ================ +====================================================\n +''' + ) + for i, error in enumerate(error_container, start=1): + print(f"\n{i}:", error["test"], "\n") + print(error["stderr"]) + +exit(1) diff --git a/scripts/upload-assets-to-staging.sh b/scripts/upload-assets-to-staging.sh index d2111d894800..156b1d1ac9f3 100755 --- a/scripts/upload-assets-to-staging.sh +++ b/scripts/upload-assets-to-staging.sh @@ -60,5 +60,5 @@ python3 -m pip install awscli for var in "${@: 2}" do - aws s3 cp $var s3://duckdb-staging/$TARGET/$GITHUB_REPOSITORY/$FOLDER/ $DRY_RUN_PARAM --region us-east-2 + aws s3 cp $var s3://duckdb-staging/$TARGET/$GITHUB_REPOSITORY/$FOLDER/ $DRY_RUN_PARAM done diff --git a/src/catalog/default/default_table_functions.cpp b/src/catalog/default/default_table_functions.cpp index c0778647405a..94079bbcb05d 100644 --- a/src/catalog/default/default_table_functions.cpp +++ b/src/catalog/default/default_table_functions.cpp @@ -69,7 +69,7 @@ FROM histogram_values(source, col_name, bin_count := bin_count, technique := tec {DEFAULT_SCHEMA, "duckdb_logs_parsed", {"log_type"}, {}, R"( SELECT * EXCLUDE (message), UNNEST(parse_duckdb_log_message(log_type, message)) FROM duckdb_logs(denormalized_table=1) -WHERE type = log_type +WHERE type ILIKE log_type )"}, {nullptr, nullptr, {nullptr}, {{nullptr, nullptr}}, nullptr} }; diff --git a/src/common/adbc/adbc.cpp b/src/common/adbc/adbc.cpp index 0ed31eb39081..2b6535055e11 100644 --- a/src/common/adbc/adbc.cpp +++ b/src/common/adbc/adbc.cpp @@ -537,7 +537,8 @@ static int get_schema(struct ArrowArrayStream *stream, struct ArrowSchema *out) auto count = duckdb_column_count(&result_wrapper->result); std::vector types(count); - std::vector owned_names(count); + std::vector owned_names; + owned_names.reserve(count); duckdb::vector names(count); for (idx_t i = 0; i < count; i++) { types[i] = duckdb_column_logical_type(&result_wrapper->result, i); @@ -793,7 +794,8 @@ AdbcStatusCode StatementGetParameterSchema(struct AdbcStatement *statement, stru count = 1; } std::vector types(count); - std::vector owned_names(count); + std::vector owned_names; + owned_names.reserve(count); duckdb::vector names(count); for (idx_t i = 0; i < count; i++) { diff --git a/src/common/encryption_key_manager.cpp b/src/common/encryption_key_manager.cpp index 482c4a006877..e20f2208b51f 100644 --- a/src/common/encryption_key_manager.cpp +++ b/src/common/encryption_key_manager.cpp @@ -19,7 +19,8 @@ EncryptionKey::EncryptionKey(data_ptr_t encryption_key_p) { D_ASSERT(memcmp(key, encryption_key_p, MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH) == 0); // zero out the encryption key in memory - memset(encryption_key_p, 0, MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(encryption_key_p, + MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH); LockEncryptionKey(key); } @@ -37,7 +38,7 @@ void EncryptionKey::LockEncryptionKey(data_ptr_t key, idx_t key_len) { } void EncryptionKey::UnlockEncryptionKey(data_ptr_t key, idx_t key_len) { - memset(key, 0, key_len); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(key, key_len); #if defined(_WIN32) VirtualUnlock(key, key_len); #else @@ -64,27 +65,32 @@ EncryptionKeyManager &EncryptionKeyManager::Get(DatabaseInstance &db) { string EncryptionKeyManager::GenerateRandomKeyID() { uint8_t key_id[KEY_ID_BYTES]; - duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::GenerateRandomDataStatic(key_id, KEY_ID_BYTES); + RandomEngine engine; + engine.RandomData(key_id, KEY_ID_BYTES); string key_id_str(reinterpret_cast(key_id), KEY_ID_BYTES); return key_id_str; } void EncryptionKeyManager::AddKey(const string &key_name, data_ptr_t key) { + lock_guard guard(lock); derived_keys.emplace(key_name, EncryptionKey(key)); // Zero-out the encryption key - std::memset(key, 0, DERIVED_KEY_LENGTH); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(key, DERIVED_KEY_LENGTH); } bool EncryptionKeyManager::HasKey(const string &key_name) const { + lock_guard guard(lock); return derived_keys.find(key_name) != derived_keys.end(); } const_data_ptr_t EncryptionKeyManager::GetKey(const string &key_name) const { D_ASSERT(HasKey(key_name)); + lock_guard guard(lock); return derived_keys.at(key_name).GetPtr(); } void EncryptionKeyManager::DeleteKey(const string &key_name) { + lock_guard guard(lock); derived_keys.erase(key_name); } @@ -107,7 +113,7 @@ string EncryptionKeyManager::Base64Decode(const string &key) { auto output = duckdb::unique_ptr(new unsigned char[result_size]); Blob::FromBase64(key, output.get(), result_size); string decoded_key(reinterpret_cast(output.get()), result_size); - memset(output.get(), 0, result_size); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(output.get(), result_size); return decoded_key; } @@ -124,10 +130,9 @@ void EncryptionKeyManager::DeriveKey(string &user_key, data_ptr_t salt, data_ptr KeyDerivationFunctionSHA256(reinterpret_cast(decoded_key.data()), decoded_key.size(), salt, derived_key); - - // wipe the original and decoded key - std::fill(user_key.begin(), user_key.end(), 0); - std::fill(decoded_key.begin(), decoded_key.end(), 0); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(data_ptr_cast(&user_key[0]), user_key.size()); + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(data_ptr_cast(&decoded_key[0]), + decoded_key.size()); user_key.clear(); decoded_key.clear(); } diff --git a/src/common/enum_util.cpp b/src/common/enum_util.cpp index d66c2b29127f..12308f453787 100644 --- a/src/common/enum_util.cpp +++ b/src/common/enum_util.cpp @@ -100,6 +100,7 @@ #include "duckdb/execution/index/art/art_scanner.hpp" #include "duckdb/execution/index/art/node.hpp" #include "duckdb/execution/index/bound_index.hpp" +#include "duckdb/execution/index/unbound_index.hpp" #include "duckdb/execution/operator/csv_scanner/csv_option.hpp" #include "duckdb/execution/operator/csv_scanner/csv_state.hpp" #include "duckdb/execution/reservoir_sample.hpp" @@ -710,6 +711,24 @@ BlockState EnumUtil::FromString(const char *value) { return static_cast(StringUtil::StringToEnum(GetBlockStateValues(), 2, "BlockState", value)); } +const StringUtil::EnumStringLiteral *GetBufferedIndexReplayValues() { + static constexpr StringUtil::EnumStringLiteral values[] { + { static_cast(BufferedIndexReplay::INSERT_ENTRY), "INSERT_ENTRY" }, + { static_cast(BufferedIndexReplay::DEL_ENTRY), "DEL_ENTRY" } + }; + return values; +} + +template<> +const char* EnumUtil::ToChars(BufferedIndexReplay value) { + return StringUtil::EnumToString(GetBufferedIndexReplayValues(), 2, "BufferedIndexReplay", static_cast(value)); +} + +template<> +BufferedIndexReplay EnumUtil::FromString(const char *value) { + return static_cast(StringUtil::StringToEnum(GetBufferedIndexReplayValues(), 2, "BufferedIndexReplay", value)); +} + const StringUtil::EnumStringLiteral *GetCAPIResultSetTypeValues() { static constexpr StringUtil::EnumStringLiteral values[] { { static_cast(CAPIResultSetType::CAPI_RESULT_TYPE_NONE), "CAPI_RESULT_TYPE_NONE" }, diff --git a/src/common/enums/compression_type.cpp b/src/common/enums/compression_type.cpp index ec551eff18dc..427cfbe91261 100644 --- a/src/common/enums/compression_type.cpp +++ b/src/common/enums/compression_type.cpp @@ -17,25 +17,60 @@ vector ListCompressionTypes(void) { return compression_types; } -bool CompressionTypeIsDeprecated(CompressionType compression_type, optional_ptr storage_manager) { - vector types({CompressionType::COMPRESSION_PATAS, CompressionType::COMPRESSION_CHIMP}); - if (storage_manager) { - if (storage_manager->GetStorageVersion() >= 5) { - //! NOTE: storage_manager is an optional_ptr because it's called from ForceCompressionSetting, which doesn't - //! have guaranteed access to a StorageManager The introduction of DICT_FSST deprecates Dictionary and FSST - //! compression methods - types.emplace_back(CompressionType::COMPRESSION_DICTIONARY); - types.emplace_back(CompressionType::COMPRESSION_FSST); - } else { - types.emplace_back(CompressionType::COMPRESSION_DICT_FSST); - } +namespace { +struct CompressionMethodRequirements { + CompressionType type; + optional_idx minimum_storage_version; + optional_idx maximum_storage_version; +}; +} // namespace + +CompressionAvailabilityResult CompressionTypeIsAvailable(CompressionType compression_type, + optional_ptr storage_manager) { + //! Max storage compatibility + vector candidates({{CompressionType::COMPRESSION_PATAS, optional_idx(), 0}, + {CompressionType::COMPRESSION_CHIMP, optional_idx(), 0}, + {CompressionType::COMPRESSION_DICTIONARY, 0, 4}, + {CompressionType::COMPRESSION_FSST, 0, 4}, + {CompressionType::COMPRESSION_DICT_FSST, 5, optional_idx()}}); + + optional_idx current_storage_version; + if (storage_manager && storage_manager->HasStorageVersion()) { + current_storage_version = storage_manager->GetStorageVersion(); } - for (auto &type : types) { - if (type == compression_type) { - return true; + for (auto &candidate : candidates) { + auto &type = candidate.type; + if (type != compression_type) { + continue; + } + auto &min = candidate.minimum_storage_version; + auto &max = candidate.maximum_storage_version; + + if (!min.IsValid()) { + //! Used to signal: always deprecated + return CompressionAvailabilityResult::Deprecated(); + } + + if (!current_storage_version.IsValid()) { + //! Can't determine in this call whether it's available or not, default to available + return CompressionAvailabilityResult(); + } + + auto current_version = current_storage_version.GetIndex(); + D_ASSERT(min.IsValid()); + if (min.GetIndex() > current_version) { + //! Minimum required storage version is higher than the current storage version, this method isn't available + //! yet + return CompressionAvailabilityResult::NotAvailableYet(); + } + if (max.IsValid() && max.GetIndex() < current_version) { + //! Maximum supported storage version is lower than the current storage version, this method is no longer + //! available + return CompressionAvailabilityResult::Deprecated(); } + return CompressionAvailabilityResult(); } - return false; + return CompressionAvailabilityResult(); } CompressionType CompressionTypeFromString(const string &str) { diff --git a/src/common/exception/binder_exception.cpp b/src/common/exception/binder_exception.cpp index 62dca06fb5af..bac17ec83d1a 100644 --- a/src/common/exception/binder_exception.cpp +++ b/src/common/exception/binder_exception.cpp @@ -18,9 +18,14 @@ BinderException BinderException::ColumnNotFound(const string &name, const vector extra_info["name"] = name; if (!similar_bindings.empty()) { extra_info["candidates"] = StringUtil::Join(similar_bindings, ","); + return BinderException( + StringUtil::Format("Referenced column \"%s\" not found in FROM clause!%s", name, candidate_str), + extra_info); + } else { + return BinderException( + StringUtil::Format("Referenced column \"%s\" was not found because the FROM clause is missing", name), + extra_info); } - return BinderException( - StringUtil::Format("Referenced column \"%s\" not found in FROM clause!%s", name, candidate_str), extra_info); } BinderException BinderException::NoMatchingFunction(const string &catalog_name, const string &schema_name, diff --git a/src/common/local_file_system.cpp b/src/common/local_file_system.cpp index 8733e0162046..bb7a7b04d773 100644 --- a/src/common/local_file_system.cpp +++ b/src/common/local_file_system.cpp @@ -1283,6 +1283,29 @@ bool LocalFileSystem::OnDiskFile(FileHandle &handle) { return true; } +string LocalFileSystem::GetVersionTag(FileHandle &handle) { + // TODO: Fix using FileSystem::Stats for v1.5, which should also fix it for Windows +#ifdef _WIN32 + return ""; +#else + int fd = handle.Cast().fd; + struct stat s; + if (fstat(fd, &s) == -1) { + throw IOException("Failed to get file size for file \"%s\": %s", {{"errno", std::to_string(errno)}}, + handle.path, strerror(errno)); + } + + // dev/ino should be enough, but to guard against in-place writes we also add file size and modification time + uint64_t version_tag[4]; + Store(NumericCast(s.st_dev), data_ptr_cast(&version_tag[0])); + Store(NumericCast(s.st_ino), data_ptr_cast(&version_tag[1])); + Store(NumericCast(s.st_size), data_ptr_cast(&version_tag[2])); + Store(Timestamp::FromEpochSeconds(s.st_mtime).value, data_ptr_cast(&version_tag[3])); + + return string(char_ptr_cast(version_tag), sizeof(uint64_t) * 4); +#endif +} + void LocalFileSystem::Seek(FileHandle &handle, idx_t location) { if (!CanSeek()) { throw IOException("Cannot seek in files of this type"); diff --git a/src/common/random_engine.cpp b/src/common/random_engine.cpp index 78403e0301af..156b4baec583 100644 --- a/src/common/random_engine.cpp +++ b/src/common/random_engine.cpp @@ -82,4 +82,14 @@ void RandomEngine::SetSeed(uint64_t seed) { random_state->pcg.seed(seed); } +void RandomEngine::RandomData(duckdb::data_ptr_t data, duckdb::idx_t len) { + while (len) { + const auto random_integer = NextRandomInteger(); + const auto next = duckdb::MinValue(len, sizeof(random_integer)); + memcpy(data, duckdb::const_data_ptr_cast(&random_integer), next); + data += next; + len -= next; + } +} + } // namespace duckdb diff --git a/src/common/settings.json b/src/common/settings.json index ac0698b16be3..c79b520a1901 100644 --- a/src/common/settings.json +++ b/src/common/settings.json @@ -225,6 +225,13 @@ "default_scope": "global", "default_value": "false" }, + { + "name": "debug_verify_blocks", + "description": "DEBUG SETTING: verify block metadata during checkpointing", + "type": "BOOLEAN", + "default_scope": "global", + "default_value": "false" + }, { "name": "debug_verify_vector", "description": "DEBUG SETTING: enable vector verification", diff --git a/src/common/types/column/column_data_collection.cpp b/src/common/types/column/column_data_collection.cpp index b53e07d681f8..89890b3258ba 100644 --- a/src/common/types/column/column_data_collection.cpp +++ b/src/common/types/column/column_data_collection.cpp @@ -1036,6 +1036,7 @@ void ColumnDataCollection::InitializeScan(ColumnDataParallelScanState &state, ve bool ColumnDataCollection::Scan(ColumnDataParallelScanState &state, ColumnDataLocalScanState &lstate, DataChunk &result) const { + D_ASSERT(result.GetTypes() == types); result.Reset(); idx_t chunk_index; @@ -1129,6 +1130,10 @@ void ColumnDataCollection::ScanAtIndex(ColumnDataParallelScanState &state, Colum } bool ColumnDataCollection::Scan(ColumnDataScanState &state, DataChunk &result) const { + for (idx_t i = 0; i < state.column_ids.size(); i++) { + D_ASSERT(result.GetTypes()[i] == types[state.column_ids[i]]); + } + result.Reset(); idx_t chunk_index; @@ -1213,6 +1218,7 @@ idx_t ColumnDataCollection::ChunkCount() const { } void ColumnDataCollection::FetchChunk(idx_t chunk_idx, DataChunk &result) const { + D_ASSERT(result.GetTypes() == types); D_ASSERT(chunk_idx < ChunkCount()); for (auto &segment : segments) { if (chunk_idx >= segment->ChunkCount()) { diff --git a/src/common/types/conflict_manager.cpp b/src/common/types/conflict_manager.cpp index 49d5d11864a7..9348fd5c0398 100644 --- a/src/common/types/conflict_manager.cpp +++ b/src/common/types/conflict_manager.cpp @@ -87,7 +87,7 @@ optional_idx ConflictManager::GetFirstInvalidIndex(const idx_t count, const bool for (idx_t i = 0; i < count; i++) { if (negate && !validity.RowIsValid(i)) { return i; - } else if (validity.RowIsValid(i)) { + } else if (!negate && validity.RowIsValid(i)) { return i; } } diff --git a/src/common/types/row/CMakeLists.txt b/src/common/types/row/CMakeLists.txt index 384de7fd9bcc..be13f78288e4 100644 --- a/src/common/types/row/CMakeLists.txt +++ b/src/common/types/row/CMakeLists.txt @@ -6,7 +6,6 @@ endif() add_library_unity( duckdb_common_types_row OBJECT - block_iterator.cpp partitioned_tuple_data.cpp row_data_collection.cpp row_data_collection_scanner.cpp diff --git a/src/common/types/row/block_iterator.cpp b/src/common/types/row/block_iterator.cpp deleted file mode 100644 index bebba60e11f7..000000000000 --- a/src/common/types/row/block_iterator.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "duckdb/common/types/row/block_iterator.hpp" - -namespace duckdb { - -BlockIteratorStateType GetBlockIteratorStateType(const bool &external) { - return external ? BlockIteratorStateType::EXTERNAL : BlockIteratorStateType::IN_MEMORY; -} - -InMemoryBlockIteratorState::InMemoryBlockIteratorState(const TupleDataCollection &key_data) - : block_ptrs(ConvertBlockPointers(key_data.GetRowBlockPointers())), fast_mod(key_data.TuplesPerBlock()), - tuple_count(key_data.Count()) { -} - -unsafe_vector InMemoryBlockIteratorState::ConvertBlockPointers(const vector &block_ptrs) { - unsafe_vector converted_block_ptrs; - converted_block_ptrs.reserve(block_ptrs.size()); - for (const auto &block_ptr : block_ptrs) { - converted_block_ptrs.emplace_back(block_ptr); - } - return converted_block_ptrs; -} - -ExternalBlockIteratorState::ExternalBlockIteratorState(TupleDataCollection &key_data_p, - optional_ptr payload_data_p) - : tuple_count(key_data_p.Count()), current_chunk_idx(DConstants::INVALID_INDEX), key_data(key_data_p), - key_ptrs(FlatVector::GetData(key_scan_state.chunk_state.row_locations)), payload_data(payload_data_p), - keep_pinned(false), pin_payload(false) { - key_data.InitializeScan(key_scan_state); - if (payload_data) { - payload_data->InitializeScan(payload_scan_state); - } -} - -} // namespace duckdb diff --git a/src/execution/expression_executor/execute_comparison.cpp b/src/execution/expression_executor/execute_comparison.cpp index 6e78de49c08e..f8fb8dc68e1d 100644 --- a/src/execution/expression_executor/execute_comparison.cpp +++ b/src/execution/expression_executor/execute_comparison.cpp @@ -138,8 +138,19 @@ static idx_t TemplatedSelectOperation(Vector &left, Vector &right, optional_ptr< false_sel.get()); case PhysicalType::LIST: case PhysicalType::STRUCT: - case PhysicalType::ARRAY: - return NestedSelectOperation(left, right, sel, count, true_sel, false_sel, null_mask); + case PhysicalType::ARRAY: { + auto result_count = NestedSelectOperation(left, right, sel, count, true_sel, false_sel, null_mask); + if (true_sel && result_count > 0) { + std::sort(true_sel->data(), true_sel->data() + result_count); + } + if (false_sel) { + idx_t false_count = count - result_count; + if (false_count > 0) { + std::sort(false_sel->data(), false_sel->data() + false_count); + } + } + return result_count; + } default: throw InternalException("Invalid type for comparison"); } diff --git a/src/execution/index/art/art.cpp b/src/execution/index/art/art.cpp index 87c9cbf9b772..cede73d8ed00 100644 --- a/src/execution/index/art/art.cpp +++ b/src/execution/index/art/art.cpp @@ -522,7 +522,9 @@ ErrorData ART::Insert(IndexLock &l, DataChunk &chunk, Vector &row_ids, IndexAppe if (keys[i].Empty()) { continue; } - D_ASSERT(ARTOperator::Lookup(*this, tree, keys[i], 0)); + auto leaf = ARTOperator::Lookup(*this, tree, keys[i], 0); + D_ASSERT(leaf); + D_ASSERT(ARTOperator::LookupInLeaf(*this, *leaf, row_id_keys[i])); } #endif return ErrorData(); @@ -602,8 +604,9 @@ void ART::Delete(IndexLock &state, DataChunk &input, Vector &row_ids) { continue; } auto leaf = ARTOperator::Lookup(*this, tree, keys[i], 0); - if (leaf && leaf->GetType() == NType::LEAF_INLINED) { - D_ASSERT(leaf->GetRowId() != row_id_keys[i].GetRowId()); + if (leaf) { + auto contains_row_id = ARTOperator::LookupInLeaf(*this, *leaf, row_id_keys[i]); + D_ASSERT(!contains_row_id); } } #endif diff --git a/src/execution/index/art/base_node.cpp b/src/execution/index/art/base_node.cpp index 94d5c0fe1943..a59297c2c622 100644 --- a/src/execution/index/art/base_node.cpp +++ b/src/execution/index/art/base_node.cpp @@ -95,7 +95,9 @@ void Node4::DeleteChild(ART &art, Node &node, Node &parent, const uint8_t byte, auto prev_node4_status = node.GetGateStatus(); Node::FreeNode(art, node); - Prefix::Concat(art, parent, node, child, remaining_byte, prev_node4_status); + // Propagate both the prev_node_4 status and the general gate status (if the gate was earlier on), + // since the concatenation logic depends on both. + Prefix::Concat(art, parent, node, child, remaining_byte, prev_node4_status, status); } void Node4::ShrinkNode16(ART &art, Node &node4, Node &node16) { diff --git a/src/execution/index/art/prefix.cpp b/src/execution/index/art/prefix.cpp index 1d7861135d91..148f68d0f24f 100644 --- a/src/execution/index/art/prefix.cpp +++ b/src/execution/index/art/prefix.cpp @@ -65,8 +65,8 @@ void Prefix::New(ART &art, reference &ref, const ARTKey &key, const idx_t } } -void Prefix::Concat(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte, - const GateStatus node4_status) { +void Prefix::Concat(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte, const GateStatus node4_status, + const GateStatus status) { // We have four situations from which we enter here: // 1: PREFIX (parent) - Node4 (prev_node4) - PREFIX (child) - INLINED_LEAF, or // 2: PREFIX (parent) - Node4 (prev_node4) - INLINED_LEAF (child), or @@ -90,10 +90,7 @@ void Prefix::Concat(ART &art, Node &parent, Node &node4, const Node child, uint8 ConcatChildIsGate(art, parent, node4, child, byte); return; } - - auto inside_gate = parent.GetGateStatus() == GateStatus::GATE_SET; - ConcatInternal(art, parent, node4, child, byte, inside_gate); - return; + ConcatInternal(art, parent, node4, child, byte, status); } void Prefix::Reduce(ART &art, Node &node, const idx_t pos) { @@ -286,9 +283,9 @@ Prefix Prefix::GetTail(ART &art, const Node &node) { } void Prefix::ConcatInternal(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte, - const bool inside_gate) { + const GateStatus status) { if (child.GetType() == NType::LEAF_INLINED) { - if (inside_gate) { + if (status == GateStatus::GATE_SET) { if (parent.GetType() == NType::PREFIX) { // The parent only contained the Node4, so we can now inline 'all the way up', // and the gate is no longer nested. diff --git a/src/execution/index/bound_index.cpp b/src/execution/index/bound_index.cpp index 2c0d43d9133e..8573da471091 100644 --- a/src/execution/index/bound_index.cpp +++ b/src/execution/index/bound_index.cpp @@ -1,11 +1,13 @@ #include "duckdb/execution/index/bound_index.hpp" +#include "duckdb/common/array.hpp" #include "duckdb/common/radix.hpp" #include "duckdb/common/serializer/serializer.hpp" #include "duckdb/planner/expression/bound_columnref_expression.hpp" #include "duckdb/planner/expression/bound_reference_expression.hpp" #include "duckdb/planner/expression_iterator.hpp" #include "duckdb/storage/table/append_state.hpp" +#include "duckdb/common/types/selection_vector.hpp" namespace duckdb { @@ -154,28 +156,80 @@ string BoundIndex::AppendRowError(DataChunk &input, idx_t index) { return error; } -void BoundIndex::ApplyBufferedAppends(const vector &table_types, ColumnDataCollection &buffered_appends, - const vector &mapped_column_ids) { - IndexAppendInfo index_append_info(IndexAppendMode::INSERT_DUPLICATES, nullptr); +namespace { + +struct BufferedReplayState { + optional_ptr buffer = nullptr; + ColumnDataScanState scan_state; + DataChunk current_chunk; + bool scan_initialized = false; +}; +} // namespace - ColumnDataScanState state; - buffered_appends.InitializeScan(state); +void BoundIndex::ApplyBufferedReplays(const vector &table_types, BufferedIndexReplays &buffered_replays, + const vector &mapped_column_ids) { + if (!buffered_replays.HasBufferedReplays()) { + return; + } - DataChunk scan_chunk; - buffered_appends.InitializeScanChunk(scan_chunk); + // We have two replay states: one for inserts and one for deletes. These are indexed into using the + // replay_type. Both scans are interleaved, so the state maintains the position of each scan. + array replay_states; DataChunk table_chunk; table_chunk.InitializeEmpty(table_types); - while (buffered_appends.Scan(state, scan_chunk)) { - for (idx_t i = 0; i < scan_chunk.ColumnCount() - 1; i++) { - auto col_id = mapped_column_ids[i].GetPrimaryIndex(); - table_chunk.data[col_id].Reference(scan_chunk.data[i]); + for (const auto &replay_range : buffered_replays.ranges) { + const auto type_idx = static_cast(replay_range.type); + auto &state = replay_states[type_idx]; + + // Initialize the scan state if necessary. Take ownership of buffered operations, since we won't need + // them after replaying anyways. + if (!state.scan_initialized) { + state.buffer = buffered_replays.GetBuffer(replay_range.type); + state.buffer->InitializeScan(state.scan_state); + state.buffer->InitializeScanChunk(state.current_chunk); + state.scan_initialized = true; } - table_chunk.SetCardinality(scan_chunk.size()); - auto error = Append(table_chunk, scan_chunk.data.back(), index_append_info); - if (error.HasError()) { - throw InternalException("error while applying buffered appends: " + error.Message()); + idx_t current_row = replay_range.start; + while (current_row < replay_range.end) { + // Scan the next DataChunk from the ColumnDataCollection buffer if the current row is on or after + // that chunk's starting row index. + if (current_row >= state.scan_state.next_row_index) { + if (!state.buffer->Scan(state.scan_state, state.current_chunk)) { + throw InternalException("Buffered index data exhausted during replay"); + } + } + + // We need to process the remaining rows in the current chunk, which is the minimum of the available + // rows in the chunk and the remaining rows in the current range. + const auto offset_in_chunk = current_row - state.scan_state.current_row_index; + const auto available_in_chunk = state.current_chunk.size() - offset_in_chunk; + // [start, end) in ReplayRange is [inclusive, exclusive). + const auto range_remaining = replay_range.end - current_row; + const auto rows_to_process = MinValue(available_in_chunk, range_remaining); + + SelectionVector sel(offset_in_chunk, rows_to_process); + + for (idx_t col_idx = 0; col_idx < state.current_chunk.ColumnCount() - 1; col_idx++) { + const auto col_id = mapped_column_ids[col_idx].GetPrimaryIndex(); + table_chunk.data[col_id].Reference(state.current_chunk.data[col_idx]); + table_chunk.data[col_id].Slice(sel, rows_to_process); + } + table_chunk.SetCardinality(rows_to_process); + Vector row_ids(state.current_chunk.data.back(), sel, rows_to_process); + + if (replay_range.type == BufferedIndexReplay::INSERT_ENTRY) { + IndexAppendInfo append_info(IndexAppendMode::INSERT_DUPLICATES, nullptr); + const auto error = Append(table_chunk, row_ids, append_info); + if (error.HasError()) { + throw InternalException("error while applying buffered appends: " + error.Message()); + } + current_row += rows_to_process; + continue; + } + Delete(table_chunk, row_ids); + current_row += rows_to_process; } } } diff --git a/src/execution/index/unbound_index.cpp b/src/execution/index/unbound_index.cpp index 0d117ca92bc7..bd8e707a9873 100644 --- a/src/execution/index/unbound_index.cpp +++ b/src/execution/index/unbound_index.cpp @@ -35,26 +35,48 @@ void UnboundIndex::CommitDrop() { } } -void UnboundIndex::BufferChunk(DataChunk &chunk, Vector &row_ids, const vector &mapped_column_ids_p) { +void UnboundIndex::BufferChunk(DataChunk &index_column_chunk, Vector &row_ids, + const vector &mapped_column_ids_p, const BufferedIndexReplay replay_type) { D_ASSERT(!column_ids.empty()); - auto types = chunk.GetTypes(); + auto types = index_column_chunk.GetTypes(); // column types types.push_back(LogicalType::ROW_TYPE); - if (!buffered_appends) { - auto &allocator = Allocator::Get(db); - buffered_appends = make_uniq(allocator, types); + auto &allocator = Allocator::Get(db); + + //! First time we are buffering data, canonical column_id mapping is stored. + //! This should be a sorted list of all the physical offsets of Indexed columns on this table. + if (mapped_column_ids.empty()) { mapped_column_ids = mapped_column_ids_p; } D_ASSERT(mapped_column_ids == mapped_column_ids_p); + // combined_chunk has all the indexed columns according to mapped_column_ids ordering, as well as a rowid column. DataChunk combined_chunk; combined_chunk.InitializeEmpty(types); - for (idx_t i = 0; i < chunk.ColumnCount(); i++) { - combined_chunk.data[i].Reference(chunk.data[i]); + for (idx_t i = 0; i < index_column_chunk.ColumnCount(); i++) { + combined_chunk.data[i].Reference(index_column_chunk.data[i]); } combined_chunk.data.back().Reference(row_ids); - combined_chunk.SetCardinality(chunk.size()); - buffered_appends->Append(combined_chunk); + combined_chunk.SetCardinality(index_column_chunk.size()); + + auto &buffer = buffered_replays.GetBuffer(replay_type); + if (buffer == nullptr) { + buffer = make_uniq(allocator, types); + } + // The starting index of the buffer range is the size of the buffer. + const idx_t start = buffer->Count(); + const idx_t end = start + combined_chunk.size(); + auto &ranges = buffered_replays.ranges; + + if (ranges.empty() || ranges.back().type != replay_type) { + // If there are no buffered ranges, or the replay types don't match, append a new range. + ranges.emplace_back(replay_type, start, end); + buffer->Append(combined_chunk); + return; + } + // Otherwise merge the range with the previous one. + ranges.back().end = end; + buffer->Append(combined_chunk); } } // namespace duckdb diff --git a/src/execution/join_hashtable.cpp b/src/execution/join_hashtable.cpp index cfa845a88863..aca687132db6 100644 --- a/src/execution/join_hashtable.cpp +++ b/src/execution/join_hashtable.cpp @@ -888,6 +888,7 @@ idx_t ScanStructure::ResolvePredicates(DataChunk &keys, SelectionVector &match_s } // If there is a matcher for the probing side because of non-equality predicates, use it + idx_t result_count; if (ht.needs_chain_matcher) { idx_t no_match_count = 0; auto &matcher = no_match_sel ? ht.row_matcher_probe_no_match_sel : ht.row_matcher_probe; @@ -895,12 +896,17 @@ idx_t ScanStructure::ResolvePredicates(DataChunk &keys, SelectionVector &match_s // we need to only use the vectors with the indices of the columns that are used in the probe phase, namely // the non-equality columns - return matcher->Match(keys, key_state.vector_data, match_sel, this->count, pointers, no_match_sel, - no_match_count); + result_count = + matcher->Match(keys, key_state.vector_data, match_sel, this->count, pointers, no_match_sel, no_match_count); } else { // no match sel is the opposite of match sel - return this->count; + result_count = this->count; } + + // Update total probe match count + ht.total_probe_matches.fetch_add(result_count, std::memory_order_relaxed); + + return result_count; } idx_t ScanStructure::ScanInnerJoin(DataChunk &keys, SelectionVector &result_vector) { diff --git a/src/execution/operator/csv_scanner/scanner/base_scanner.cpp b/src/execution/operator/csv_scanner/scanner/base_scanner.cpp index 1e186fa974ee..179c5bcbfebf 100644 --- a/src/execution/operator/csv_scanner/scanner/base_scanner.cpp +++ b/src/execution/operator/csv_scanner/scanner/base_scanner.cpp @@ -26,6 +26,10 @@ BaseScanner::BaseScanner(shared_ptr buffer_manager_p, shared_p } } +void BaseScanner::Print() const { + state_machine->Print(); +} + string BaseScanner::RemoveSeparator(const char *value_ptr, const idx_t size, char thousands_separator) { string result; result.reserve(size); diff --git a/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp b/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp index 5ed14a992a37..a495d22a6613 100644 --- a/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp +++ b/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp @@ -22,7 +22,7 @@ StringValueResult::StringValueResult(CSVStates &states, CSVStateMachine &state_m idx_t result_size_p, idx_t buffer_position, CSVErrorHandler &error_hander_p, CSVIterator &iterator_p, bool store_line_size_p, shared_ptr csv_file_scan_p, idx_t &lines_read_p, bool sniffing_p, - string path_p, idx_t scan_id) + const string &path_p, idx_t scan_id, bool &used_unstrictness) : ScannerResult(states, state_machine, result_size_p), number_of_columns(NumericCast(state_machine.dialect_options.num_cols)), null_padding(state_machine.options.null_padding), ignore_errors(state_machine.options.ignore_errors.GetValue()), @@ -30,8 +30,8 @@ StringValueResult::StringValueResult(CSVStates &states, CSVStateMachine &state_m ? 0 : state_machine.dialect_options.state_machine_options.delimiter.GetValue().size() - 1), error_handler(error_hander_p), iterator(iterator_p), store_line_size(store_line_size_p), - csv_file_scan(std::move(csv_file_scan_p)), lines_read(lines_read_p), - current_errors(scan_id, state_machine.options.IgnoreErrors()), sniffing(sniffing_p), path(std::move(path_p)) { + csv_file_scan(std::move(csv_file_scan_p)), lines_read(lines_read_p), used_unstrictness(used_unstrictness), + current_errors(scan_id, state_machine.options.IgnoreErrors()), sniffing(sniffing_p), path(path_p) { // Vector information D_ASSERT(number_of_columns > 0); if (!buffer_handle) { @@ -154,23 +154,26 @@ inline bool IsValueNull(const char *null_str_ptr, const char *value_ptr, const i } bool StringValueResult::HandleTooManyColumnsError(const char *value_ptr, const idx_t size) { - if (cur_col_id >= number_of_columns && state_machine.state_machine_options.strict_mode.GetValue()) { - bool error = true; - if (cur_col_id == number_of_columns && ((quoted && state_machine.options.allow_quoted_nulls) || !quoted)) { - // we make an exception if the first over-value is null - bool is_value_null = false; - for (idx_t i = 0; i < null_str_count; i++) { - is_value_null = is_value_null || IsValueNull(null_str_ptr[i], value_ptr, size); + if (cur_col_id >= number_of_columns) { + if (state_machine.state_machine_options.strict_mode.GetValue()) { + bool error = true; + if (cur_col_id == number_of_columns && ((quoted && state_machine.options.allow_quoted_nulls) || !quoted)) { + // we make an exception if the first over-value is null + bool is_value_null = false; + for (idx_t i = 0; i < null_str_count; i++) { + is_value_null = is_value_null || IsValueNull(null_str_ptr[i], value_ptr, size); + } + error = !is_value_null; } - error = !is_value_null; - } - if (error) { - // We error pointing to the current value error. - current_errors.Insert(TOO_MANY_COLUMNS, cur_col_id, chunk_col_id, last_position); - cur_col_id++; + if (error) { + // We error pointing to the current value error. + current_errors.Insert(TOO_MANY_COLUMNS, cur_col_id, chunk_col_id, last_position); + cur_col_id++; + } + // We had an error + return true; } - // We had an error - return true; + used_unstrictness = true; } return false; } @@ -231,6 +234,7 @@ void StringValueResult::AddValueToVector(const char *value_ptr, idx_t size, bool } if (cur_col_id >= number_of_columns) { if (!state_machine.state_machine_options.strict_mode.GetValue()) { + used_unstrictness = true; return; } bool error = true; @@ -549,6 +553,7 @@ void StringValueResult::AddPossiblyEscapedValue(StringValueResult &result, const } if (result.cur_col_id >= result.number_of_columns && !result.state_machine.state_machine_options.strict_mode.GetValue()) { + result.used_unstrictness = true; return; } if (!result.HandleTooManyColumnsError(value_ptr, length)) { @@ -980,7 +985,7 @@ StringValueScanner::StringValueScanner(idx_t scanner_idx_p, const shared_ptrcontext), result_size, iterator.pos.buffer_pos, *error_handler, iterator, buffer_manager->context.client_data->debug_set_max_line_length, csv_file_scan, lines_read, sniffing, - buffer_manager->GetFilePath(), scanner_idx_p), + buffer_manager->GetFilePath(), scanner_idx_p, used_unstrictness), start_pos(0) { if (scanner_idx == 0 && csv_file_scan) { lines_read += csv_file_scan->skipped_rows; @@ -997,7 +1002,7 @@ StringValueScanner::StringValueScanner(const shared_ptr &buffe result(states, *state_machine, cur_buffer_handle, Allocator::DefaultAllocator(), result_size, iterator.pos.buffer_pos, *error_handler, iterator, buffer_manager->context.client_data->debug_set_max_line_length, csv_file_scan, lines_read, sniffing, - buffer_manager->GetFilePath(), 0), + buffer_manager->GetFilePath(), 0, used_unstrictness), start_pos(0) { if (scanner_idx == 0 && csv_file_scan) { lines_read += csv_file_scan->skipped_rows; @@ -1939,14 +1944,17 @@ void StringValueScanner::FinalizeChunkProcess() { if (result.current_errors.HandleErrors(result)) { result.number_of_rows++; } - if (states.IsQuotedCurrent() && !found_error && - state_machine->dialect_options.state_machine_options.strict_mode.GetValue()) { - type = UNTERMINATED_QUOTES; - // If we finish the execution of a buffer, and we end in a quoted state, it means we have unterminated - // quotes - result.current_errors.Insert(type, result.cur_col_id, result.chunk_col_id, result.last_position); - if (result.current_errors.HandleErrors(result)) { - result.number_of_rows++; + if (states.IsQuotedCurrent() && !found_error) { + if (state_machine->dialect_options.state_machine_options.strict_mode.GetValue()) { + type = UNTERMINATED_QUOTES; + // If we finish the execution of a buffer, and we end in a quoted state, it means we have unterminated + // quotes + result.current_errors.Insert(type, result.cur_col_id, result.chunk_col_id, result.last_position); + if (result.current_errors.HandleErrors(result)) { + result.number_of_rows++; + } + } else { + used_unstrictness = true; } } if (!iterator.done) { diff --git a/src/execution/operator/csv_scanner/sniffer/csv_sniffer.cpp b/src/execution/operator/csv_scanner/sniffer/csv_sniffer.cpp index bcaed8e5fa7b..9cf0878719db 100644 --- a/src/execution/operator/csv_scanner/sniffer/csv_sniffer.cpp +++ b/src/execution/operator/csv_scanner/sniffer/csv_sniffer.cpp @@ -14,7 +14,7 @@ CSVSniffer::CSVSniffer(CSVReaderOptions &options_p, const MultiFileOptions &file auto &logical_type = format_template.first; best_format_candidates[logical_type].clear(); } - // Initialize max columns found to either 0 or however many were set + // Initialize max columns found to either 0, or however many were set max_columns_found = set_columns.Size(); error_handler = make_shared_ptr(options.ignore_errors.GetValue()); detection_error_handler = make_shared_ptr(true); @@ -193,7 +193,8 @@ SnifferResult CSVSniffer::SniffCSV(const bool force_match) { buffer_manager->ResetBufferManager(); } buffer_manager->sniffing = false; - if (best_candidate->error_handler->AnyErrors() && !options.ignore_errors.GetValue()) { + if (best_candidate->error_handler->AnyErrors() && !options.ignore_errors.GetValue() && + best_candidate->state_machine->dialect_options.state_machine_options.strict_mode.GetValue()) { best_candidate->error_handler->ErrorIfTypeExists(MAXIMUM_LINE_SIZE); } D_ASSERT(best_sql_types_candidates_per_column_idx.size() == names.size()); diff --git a/src/execution/operator/csv_scanner/sniffer/type_detection.cpp b/src/execution/operator/csv_scanner/sniffer/type_detection.cpp index 2df6bbe8c5f5..347c0fd1b920 100644 --- a/src/execution/operator/csv_scanner/sniffer/type_detection.cpp +++ b/src/execution/operator/csv_scanner/sniffer/type_detection.cpp @@ -462,24 +462,30 @@ void CSVSniffer::DetectTypes() { idx_t varchar_cols = 0; for (idx_t col = 0; col < info_sql_types_candidates.size(); col++) { auto &col_type_candidates = info_sql_types_candidates[col]; - // check number of varchar columns + // check the number of varchar columns const auto &col_type = col_type_candidates.back(); if (col_type == LogicalType::VARCHAR) { varchar_cols++; } } - // it's good if the dialect creates more non-varchar columns, but only if we sacrifice < 30% of - // best_num_cols. + // it's good if the dialect creates more non-varchar columns + const bool has_less_varchar_cols = varchar_cols < min_varchar_cols; + // but only if we sacrifice < 30% of best_num_cols. + const bool acceptable_best_num_cols = + static_cast(info_sql_types_candidates.size()) > static_cast(max_columns_found) * 0.7; const idx_t number_of_errors = candidate->error_handler->GetSize(); - if (!best_candidate || (varchar_cols(info_sql_types_candidates.size())>( - static_cast(max_columns_found) * 0.7) && - (!options.ignore_errors.GetValue() || number_of_errors < min_errors))) { + const bool better_strictness = best_candidate_is_strict ? !candidate->used_unstrictness : true; + const bool acceptable_candidate = has_less_varchar_cols && acceptable_best_num_cols && better_strictness; + // If we escaped an unquoted character when strict is false. + if (!best_candidate || + (acceptable_candidate && (!options.ignore_errors.GetValue() || number_of_errors < min_errors))) { min_errors = number_of_errors; best_header_row.clear(); // we have a new best_options candidate best_candidate = std::move(candidate); min_varchar_cols = varchar_cols; + best_candidate_is_strict = !best_candidate->used_unstrictness; best_sql_types_candidates_per_column_idx = info_sql_types_candidates; for (auto &format_candidate : format_candidates) { best_format_candidates[format_candidate.first] = format_candidate.second.format; diff --git a/src/execution/operator/join/physical_hash_join.cpp b/src/execution/operator/join/physical_hash_join.cpp index 9513bded8fd7..da26325cc7e2 100644 --- a/src/execution/operator/join/physical_hash_join.cpp +++ b/src/execution/operator/join/physical_hash_join.cpp @@ -164,6 +164,11 @@ class HashJoinGlobalSinkState : public GlobalSinkState { } } + ~HashJoinGlobalSinkState() override { + DUCKDB_LOG(context, PhysicalOperatorLogType, op, "PhysicalHashJoin", "GetData", + {{"total_probe_matches", to_string(hash_table->total_probe_matches)}}); + } + void ScheduleFinalize(Pipeline &pipeline, Event &event); void InitializeProbeSpill(); diff --git a/src/execution/operator/scan/physical_positional_scan.cpp b/src/execution/operator/scan/physical_positional_scan.cpp index bff24d7857fe..6e4bfcaf3db7 100644 --- a/src/execution/operator/scan/physical_positional_scan.cpp +++ b/src/execution/operator/scan/physical_positional_scan.cpp @@ -67,10 +67,14 @@ class PositionalTableScanner { InterruptState interrupt_state; OperatorSourceInput source_input {global_state, *local_state, interrupt_state}; - auto source_result = table.GetData(context, source, source_input); - if (source_result == SourceResultType::BLOCKED) { - throw NotImplementedException( - "Unexpected interrupt from table Source in PositionalTableScanner refill"); + auto source_result = SourceResultType::HAVE_MORE_OUTPUT; + while (source_result == SourceResultType::HAVE_MORE_OUTPUT && source.size() == 0) { + // TODO: this could as well just be propagated further, but for now iterating it is + source_result = table.GetData(context, source, source_input); + if (source_result == SourceResultType::BLOCKED) { + throw NotImplementedException( + "Unexpected interrupt from table Source in PositionalTableScanner refill"); + } } } source_offset = 0; diff --git a/src/execution/operator/scan/physical_table_scan.cpp b/src/execution/operator/scan/physical_table_scan.cpp index e9f66bea4bc5..a777689b16c9 100644 --- a/src/execution/operator/scan/physical_table_scan.cpp +++ b/src/execution/operator/scan/physical_table_scan.cpp @@ -259,7 +259,7 @@ bool PhysicalTableScan::Equals(const PhysicalOperator &other_p) const { return false; } auto &other = other_p.Cast(); - if (function.function != other.function.function) { + if (function != other.function) { return false; } if (column_ids != other.column_ids) { diff --git a/src/execution/physical_plan/plan_aggregate.cpp b/src/execution/physical_plan/plan_aggregate.cpp index fcb2aaef545e..b22997068d34 100644 --- a/src/execution/physical_plan/plan_aggregate.cpp +++ b/src/execution/physical_plan/plan_aggregate.cpp @@ -236,7 +236,7 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalAggregate &op) { D_ASSERT(op.children.size() == 1); reference plan = CreatePlan(*op.children[0]); - plan = ExtractAggregateExpressions(plan, op.expressions, op.groups); + plan = ExtractAggregateExpressions(plan, op.expressions, op.groups, op.grouping_sets); bool can_use_simple_aggregation = true; for (auto &expression : op.expressions) { @@ -305,7 +305,8 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalAggregate &op) { PhysicalOperator &PhysicalPlanGenerator::ExtractAggregateExpressions(PhysicalOperator &child, vector> &aggregates, - vector> &groups) { + vector> &groups, + optional_ptr> grouping_sets) { vector> expressions; vector types; @@ -314,7 +315,7 @@ PhysicalOperator &PhysicalPlanGenerator::ExtractAggregateExpressions(PhysicalOpe auto &bound_aggr = aggr->Cast(); if (bound_aggr.order_bys) { // sorted aggregate! - FunctionBinder::BindSortedAggregate(context, bound_aggr, groups); + FunctionBinder::BindSortedAggregate(context, bound_aggr, groups, grouping_sets); } } for (auto &group : groups) { diff --git a/src/execution/physical_plan/plan_distinct.cpp b/src/execution/physical_plan/plan_distinct.cpp index f1b8aec4741b..39b6d96e8a89 100644 --- a/src/execution/physical_plan/plan_distinct.cpp +++ b/src/execution/physical_plan/plan_distinct.cpp @@ -65,7 +65,8 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalDistinct &op) { if (ClientConfig::GetConfig(context).enable_optimizer) { bool changes_made = false; - auto new_expr = OrderedAggregateOptimizer::Apply(context, *first_aggregate, groups, changes_made); + auto new_expr = + OrderedAggregateOptimizer::Apply(context, *first_aggregate, groups, nullptr, changes_made); if (new_expr) { D_ASSERT(new_expr->return_type == first_aggregate->return_type); D_ASSERT(new_expr->GetExpressionType() == ExpressionType::BOUND_AGGREGATE); @@ -81,7 +82,7 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalDistinct &op) { } } - child = ExtractAggregateExpressions(child, aggregates, groups); + child = ExtractAggregateExpressions(child, aggregates, groups, nullptr); // we add a physical hash aggregation in the plan to select the distinct groups auto &group_by = Make(context, aggregate_types, std::move(aggregates), std::move(groups), diff --git a/src/execution/physical_plan/plan_filter.cpp b/src/execution/physical_plan/plan_filter.cpp index 292fe1bc8410..796e4aeb38af 100644 --- a/src/execution/physical_plan/plan_filter.cpp +++ b/src/execution/physical_plan/plan_filter.cpp @@ -14,7 +14,6 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalFilter &op) { D_ASSERT(op.children.size() == 1); reference plan = CreatePlan(*op.children[0]); if (!op.expressions.empty()) { - D_ASSERT(!plan.get().GetTypes().empty()); // create a filter if there is anything to filter auto &filter = Make(plan.get().GetTypes(), std::move(op.expressions), op.estimated_cardinality); filter.children.push_back(plan); diff --git a/src/execution/physical_plan/plan_window.cpp b/src/execution/physical_plan/plan_window.cpp index c9cab9e8c6b7..ace9b5c1a5c1 100644 --- a/src/execution/physical_plan/plan_window.cpp +++ b/src/execution/physical_plan/plan_window.cpp @@ -2,13 +2,11 @@ #include "duckdb/execution/operator/aggregate/physical_window.hpp" #include "duckdb/execution/operator/projection/physical_projection.hpp" #include "duckdb/execution/physical_plan_generator.hpp" -#include "duckdb/main/client_context.hpp" +#include "duckdb/main/client_config.hpp" #include "duckdb/planner/expression/bound_reference_expression.hpp" #include "duckdb/planner/expression/bound_window_expression.hpp" #include "duckdb/planner/operator/logical_window.hpp" -#include - namespace duckdb { PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalWindow &op) { @@ -44,12 +42,12 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalWindow &op) { // Process the window functions by sharing the partition/order definitions unordered_map projection_map; vector> window_expressions; - idx_t blocking_count = 0; + idx_t streaming_count = 0; auto output_pos = input_width; while (!blocking_windows.empty() || !streaming_windows.empty()) { - const bool process_streaming = blocking_windows.empty(); - auto &remaining = process_streaming ? streaming_windows : blocking_windows; - blocking_count += process_streaming ? 0 : 1; + const bool process_blocking = streaming_windows.empty(); + auto &remaining = process_blocking ? blocking_windows : streaming_windows; + streaming_count += process_blocking ? 0 : 1; // Find all functions that share the partitioning of the first remaining expression auto over_idx = remaining[0]; @@ -122,7 +120,7 @@ PhysicalOperator &PhysicalPlanGenerator::CreatePlan(LogicalWindow &op) { } // Chain the new window operator on top of the plan - if (i < blocking_count) { + if (i >= streaming_count) { auto &window = Make(types, std::move(select_list), op.estimated_cardinality); window.children.push_back(plan); plan = window; diff --git a/src/function/aggregate/sorted_aggregate_function.cpp b/src/function/aggregate/sorted_aggregate_function.cpp index bde4c1479ba8..c4fd191b74d3 100644 --- a/src/function/aggregate/sorted_aggregate_function.cpp +++ b/src/function/aggregate/sorted_aggregate_function.cpp @@ -677,14 +677,15 @@ struct SortedAggregateFunction { } // namespace void FunctionBinder::BindSortedAggregate(ClientContext &context, BoundAggregateExpression &expr, - const vector> &groups) { + const vector> &groups, + optional_ptr> grouping_sets) { if (!expr.order_bys || expr.order_bys->orders.empty() || expr.children.empty()) { // not a sorted aggregate: return return; } // Remove unnecessary ORDER BY clauses and return if nothing remains if (context.config.enable_optimizer) { - if (expr.order_bys->Simplify(groups)) { + if (expr.order_bys->Simplify(groups, grouping_sets)) { expr.order_bys.reset(); return; } @@ -741,7 +742,7 @@ void FunctionBinder::BindSortedAggregate(ClientContext &context, BoundWindowExpr } // Remove unnecessary ORDER BY clauses and return if nothing remains if (context.config.enable_optimizer) { - if (BoundOrderModifier::Simplify(expr.arg_orders, expr.partitions)) { + if (BoundOrderModifier::Simplify(expr.arg_orders, expr.partitions, nullptr)) { expr.arg_orders.clear(); return; } diff --git a/src/function/cast/cast_function_set.cpp b/src/function/cast/cast_function_set.cpp index 4e6ed8b998db..606fa9010447 100644 --- a/src/function/cast/cast_function_set.cpp +++ b/src/function/cast/cast_function_set.cpp @@ -184,7 +184,9 @@ int64_t CastFunctionSet::ImplicitCastCost(optional_ptr context, c old_implicit_casting = DBConfig::GetSetting(*config); } if (old_implicit_casting) { - score = 149; + // very high cost to avoid choosing this cast if any other option is available + // (it should be more costly than casting to TEMPLATE if that is available) + score = 10000000000; } } return score; diff --git a/src/function/macro_function.cpp b/src/function/macro_function.cpp index 66e36181b5be..8487827ecfde 100644 --- a/src/function/macro_function.cpp +++ b/src/function/macro_function.cpp @@ -48,13 +48,31 @@ MacroBindResult MacroFunction::BindMacroFunction( ExpressionBinder expr_binder(binder, binder.context); expr_binder.lambda_bindings = binder.lambda_bindings; + + // Figure out whether we even need to bind arguments + bool requires_bind = false; + for (auto &function : functions) { + for (const auto &type : function->types) { + if (type.id() != LogicalTypeId::UNKNOWN) { + requires_bind = true; + break; + } + } + if (requires_bind) { + break; + } + } + // Find argument types and separate positional and default arguments vector positional_arg_types; InsertionOrderPreservingMap named_arg_types; for (auto &arg : function_expr.children) { auto arg_copy = arg->Copy(); - const auto arg_bind_result = expr_binder.BindExpression(arg_copy, depth + 1); - auto arg_type = arg_bind_result.HasError() ? LogicalType::UNKNOWN : arg_bind_result.expression->return_type; + LogicalType arg_type = LogicalType::UNKNOWN; + if (requires_bind) { + const auto arg_bind_result = expr_binder.BindExpression(arg_copy, depth + 1); + arg_type = arg_bind_result.HasError() ? LogicalType::UNKNOWN : arg_bind_result.expression->return_type; + } if (!arg->GetAlias().empty()) { // Default argument if (named_arguments.find(arg->GetAlias()) != named_arguments.end()) { diff --git a/src/function/scalar/system/parse_log_message.cpp b/src/function/scalar/system/parse_log_message.cpp index d5e336165874..e2f2eefee163 100644 --- a/src/function/scalar/system/parse_log_message.cpp +++ b/src/function/scalar/system/parse_log_message.cpp @@ -77,8 +77,10 @@ void ParseLogMessageFunction(DataChunk &args, ExpressionState &state, Vector &re } // namespace ScalarFunction ParseLogMessage::GetFunction() { - return ScalarFunction({LogicalType::VARCHAR, LogicalType::VARCHAR}, LogicalType::ANY, ParseLogMessageFunction, - ParseLogMessageBind, nullptr, nullptr, nullptr, LogicalType(LogicalTypeId::INVALID)); + auto fun = ScalarFunction({LogicalType::VARCHAR, LogicalType::VARCHAR}, LogicalType::ANY, ParseLogMessageFunction, + ParseLogMessageBind, nullptr, nullptr, nullptr, LogicalType(LogicalTypeId::INVALID)); + fun.errors = FunctionErrors::CAN_THROW_RUNTIME_ERROR; + return fun; } } // namespace duckdb diff --git a/src/function/table/direct_file_reader.cpp b/src/function/table/direct_file_reader.cpp index 8aa6aba352e1..e5ed5d7e6529 100644 --- a/src/function/table/direct_file_reader.cpp +++ b/src/function/table/direct_file_reader.cpp @@ -52,6 +52,8 @@ void DirectFileReader::Scan(ClientContext &context, GlobalTableFunctionState &gl } auto files = state.file_list; + + auto ®ular_fs = FileSystem::GetFileSystem(context); auto fs = CachingFileSystem::Get(context); idx_t out_idx = 0; @@ -65,6 +67,14 @@ void DirectFileReader::Scan(ClientContext &context, GlobalTableFunctionState &gl flags |= FileFlags::FILE_FLAGS_DIRECT_IO; } file_handle = fs.OpenFile(QueryContext(context), file, flags); + } else { + // At least verify that the file exist + // The globbing behavior in remote filesystems can lead to files being listed that do not actually exist + if (FileSystem::IsRemoteFile(file.path) && !regular_fs.FileExists(file.path)) { + output.SetCardinality(0); + done = true; + return; + } } for (idx_t col_idx = 0; col_idx < state.column_ids.size(); col_idx++) { diff --git a/src/function/table/read_file.cpp b/src/function/table/read_file.cpp index d0481cc230d2..d929e807439d 100644 --- a/src/function/table/read_file.cpp +++ b/src/function/table/read_file.cpp @@ -10,10 +10,43 @@ namespace duckdb { +namespace { + //------------------------------------------------------------------------------ // DirectMultiFileInfo //------------------------------------------------------------------------------ +template +struct DirectMultiFileInfo : MultiFileReaderInterface { + static unique_ptr CreateInterface(ClientContext &context); + unique_ptr InitializeOptions(ClientContext &context, + optional_ptr info) override; + bool ParseCopyOption(ClientContext &context, const string &key, const vector &values, + BaseFileReaderOptions &options, vector &expected_names, + vector &expected_types) override; + bool ParseOption(ClientContext &context, const string &key, const Value &val, MultiFileOptions &file_options, + BaseFileReaderOptions &options) override; + unique_ptr InitializeBindData(MultiFileBindData &multi_file_data, + unique_ptr options) override; + void BindReader(ClientContext &context, vector &return_types, vector &names, + MultiFileBindData &bind_data) override; + optional_idx MaxThreads(const MultiFileBindData &bind_data_p, const MultiFileGlobalState &global_state, + FileExpandResult expand_result) override; + unique_ptr InitializeGlobalState(ClientContext &context, MultiFileBindData &bind_data, + MultiFileGlobalState &global_state) override; + unique_ptr InitializeLocalState(ExecutionContext &, GlobalTableFunctionState &) override; + shared_ptr CreateReader(ClientContext &context, GlobalTableFunctionState &gstate, + BaseUnionData &union_data, const MultiFileBindData &bind_data_p) override; + shared_ptr CreateReader(ClientContext &context, GlobalTableFunctionState &gstate, + const OpenFileInfo &file, idx_t file_idx, + const MultiFileBindData &bind_data) override; + shared_ptr CreateReader(ClientContext &context, const OpenFileInfo &file, + BaseFileReaderOptions &options, + const MultiFileOptions &file_options) override; + unique_ptr GetCardinality(const MultiFileBindData &bind_data, idx_t file_count) override; + FileGlobInput GetGlobInput() override; +}; + template unique_ptr DirectMultiFileInfo::CreateInterface(ClientContext &context) { return make_uniq(); @@ -132,14 +165,45 @@ FileGlobInput DirectMultiFileInfo::GetGlobInput() { } //------------------------------------------------------------------------------ -// Register +// Operations //------------------------------------------------------------------------------ + +struct ReadBlobOperation { + static constexpr const char *NAME = "read_blob"; + static constexpr const char *FILE_TYPE = "blob"; + + static inline LogicalType TYPE() { + return LogicalType::BLOB; + } +}; + +struct ReadTextOperation { + static constexpr const char *NAME = "read_text"; + static constexpr const char *FILE_TYPE = "text"; + + static inline LogicalType TYPE() { + return LogicalType::VARCHAR; + } +}; + template static TableFunction GetFunction() { MultiFileFunction> table_function(OP::NAME); + // Erase extra multi file reader options + table_function.named_parameters.erase("filename"); + table_function.named_parameters.erase("hive_partitioning"); + table_function.named_parameters.erase("union_by_name"); + table_function.named_parameters.erase("hive_types"); + table_function.named_parameters.erase("hive_types_autocast"); return table_function; } +} // namespace + +//------------------------------------------------------------------------------ +// Register +//------------------------------------------------------------------------------ + void ReadBlobFunction::RegisterFunction(BuiltinFunctions &set) { auto scan_fun = GetFunction(); set.AddFunction(MultiFileReader::CreateFunctionSet(scan_fun)); diff --git a/src/function/table/system/duckdb_log.cpp b/src/function/table/system/duckdb_log.cpp index f84cb405afab..96c35853ff3c 100644 --- a/src/function/table/system/duckdb_log.cpp +++ b/src/function/table/system/duckdb_log.cpp @@ -62,6 +62,9 @@ unique_ptr DuckDBLogBindReplace(ClientContext &context, TableFunctionB bool denormalized_table = false; auto denormalized_table_setting = input.named_parameters.find("denormalized_table"); if (denormalized_table_setting != input.named_parameters.end()) { + if (denormalized_table_setting->second.IsNull()) { + throw InvalidInputException("denormalized_table cannot be NULL"); + } denormalized_table = denormalized_table_setting->second.GetValue(); } diff --git a/src/function/table/system/test_all_types.cpp b/src/function/table/system/test_all_types.cpp index cd4ba3964be7..c1c126019d57 100644 --- a/src/function/table/system/test_all_types.cpp +++ b/src/function/table/system/test_all_types.cpp @@ -19,9 +19,10 @@ struct TestAllTypesData : public GlobalTableFunctionState { idx_t offset; }; -vector TestAllTypesFun::GetTestTypes(bool use_large_enum, bool use_large_bignum) { +vector TestAllTypesFun::GetTestTypes(const bool use_large_enum, const bool use_large_bignum) { vector result; - // scalar types/numerics + + // Numeric types. result.emplace_back(LogicalType::BOOLEAN, "bool"); result.emplace_back(LogicalType::TINYINT, "tinyint"); result.emplace_back(LogicalType::SMALLINT, "smallint"); @@ -33,24 +34,31 @@ vector TestAllTypesFun::GetTestTypes(bool use_large_enum, bool use_lar result.emplace_back(LogicalType::USMALLINT, "usmallint"); result.emplace_back(LogicalType::UINTEGER, "uint"); result.emplace_back(LogicalType::UBIGINT, "ubigint"); + + // BIGNUM. if (use_large_bignum) { string data; - idx_t total_data_size = Bignum::BIGNUM_HEADER_SIZE + Bignum::MAX_DATA_SIZE; + constexpr idx_t total_data_size = Bignum::BIGNUM_HEADER_SIZE + Bignum::MAX_DATA_SIZE; data.resize(total_data_size); - // Let's set our header + + // Let's set the max header. Bignum::SetHeader(&data[0], Bignum::MAX_DATA_SIZE, false); - // Set all our other bits + // Set all other max bits. memset(&data[Bignum::BIGNUM_HEADER_SIZE], 0xFF, Bignum::MAX_DATA_SIZE); auto max = Value::BIGNUM(data); - // Let's set our header + + // Let's set the min header. Bignum::SetHeader(&data[0], Bignum::MAX_DATA_SIZE, true); - // Set all our other bits + // Set all other min bits. memset(&data[Bignum::BIGNUM_HEADER_SIZE], 0x00, Bignum::MAX_DATA_SIZE); auto min = Value::BIGNUM(data); result.emplace_back(LogicalType::BIGNUM, "bignum", min, max); + } else { result.emplace_back(LogicalType::BIGNUM, "bignum"); } + + // Time-types. result.emplace_back(LogicalType::DATE, "date"); result.emplace_back(LogicalType::TIME, "time"); result.emplace_back(LogicalType::TIMESTAMP, "timestamp"); @@ -59,15 +67,19 @@ vector TestAllTypesFun::GetTestTypes(bool use_large_enum, bool use_lar result.emplace_back(LogicalType::TIMESTAMP_NS, "timestamp_ns"); result.emplace_back(LogicalType::TIME_TZ, "time_tz"); result.emplace_back(LogicalType::TIMESTAMP_TZ, "timestamp_tz"); - result.emplace_back(LogicalType::FLOAT, "float"); - result.emplace_back(LogicalType::DOUBLE, "double"); + + // More complex numeric types. + result.emplace_back(LogicalType::FLOAT, "float", Value::FLOAT(std::numeric_limits::lowest()), + Value::FLOAT(std::numeric_limits::max())); + result.emplace_back(LogicalType::DOUBLE, "double", Value::DOUBLE(std::numeric_limits::lowest()), + Value::DOUBLE(std::numeric_limits::max())); result.emplace_back(LogicalType::DECIMAL(4, 1), "dec_4_1"); result.emplace_back(LogicalType::DECIMAL(9, 4), "dec_9_4"); result.emplace_back(LogicalType::DECIMAL(18, 6), "dec_18_6"); result.emplace_back(LogicalType::DECIMAL(38, 10), "dec38_10"); result.emplace_back(LogicalType::UUID, "uuid"); - // interval + // Interval. interval_t min_interval; min_interval.months = 0; min_interval.days = 0; @@ -79,14 +91,15 @@ vector TestAllTypesFun::GetTestTypes(bool use_large_enum, bool use_lar max_interval.micros = 999999999; result.emplace_back(LogicalType::INTERVAL, "interval", Value::INTERVAL(min_interval), Value::INTERVAL(max_interval)); - // strings/blobs/bitstrings + + // VARCHAR / BLOB / Bitstrings. result.emplace_back(LogicalType::VARCHAR, "varchar", Value("🦆🦆🦆🦆🦆🦆"), Value(string("goo\x00se", 6))); result.emplace_back(LogicalType::BLOB, "blob", Value::BLOB("thisisalongblob\\x00withnullbytes"), Value::BLOB("\\x00\\x00\\x00a")); result.emplace_back(LogicalType::BIT, "bit", Value::BIT("0010001001011100010101011010111"), Value::BIT("10101")); - // enums + // ENUMs. Vector small_enum(LogicalType::VARCHAR, 2); auto small_enum_ptr = FlatVector::GetData(small_enum); small_enum_ptr[0] = StringVector::AddStringOrBlob(small_enum, "DUCK_DUCK_ENUM"); @@ -116,7 +129,7 @@ vector TestAllTypesFun::GetTestTypes(bool use_large_enum, bool use_lar result.emplace_back(LogicalType::ENUM(large_enum, 2), "large_enum"); } - // arrays + // ARRAYs. auto int_list_type = LogicalType::LIST(LogicalType::INTEGER); auto empty_int_list = Value::LIST(LogicalType::INTEGER, vector()); auto int_list = diff --git a/src/function/table/table_scan.cpp b/src/function/table/table_scan.cpp index 99a9bcf7976d..1edd97e6444f 100644 --- a/src/function/table/table_scan.cpp +++ b/src/function/table/table_scan.cpp @@ -54,6 +54,7 @@ struct IndexScanLocalState : public LocalTableFunctionState { TableScanState scan_state; //! The column IDs of the local storage scan. vector column_ids; + bool in_charge_of_final_stretch {false}; }; static StorageIndex TransformStorageIndex(const ColumnIndex &column_id) { @@ -114,7 +115,7 @@ class DuckIndexScanState : public TableScanGlobalState { public: DuckIndexScanState(ClientContext &context, const FunctionData *bind_data_p) : TableScanGlobalState(context, bind_data_p), next_batch_index(0), arena(Allocator::Get(context)), - row_ids(nullptr), row_id_count(0), finished(false) { + row_ids(nullptr), row_id_count(0), finished_first_phase(false), started_last_phase(false) { } //! The batch index of the next Sink. @@ -129,7 +130,8 @@ class DuckIndexScanState : public TableScanGlobalState { //! The column IDs of the to-be-scanned columns. vector column_ids; //! True, if no more row IDs must be scanned. - bool finished; + bool finished_first_phase; + bool started_last_phase; //! Synchronize changes to the global index scan state. mutex index_scan_lock; @@ -163,44 +165,75 @@ class DuckIndexScanState : public TableScanGlobalState { auto &storage = duck_table.GetStorage(); auto &l_state = data_p.local_state->Cast(); - idx_t scan_count = 0; - idx_t offset = 0; - - { - // Synchronize changes to the shared global state. - lock_guard l(index_scan_lock); - if (!finished) { - l_state.batch_index = next_batch_index; - next_batch_index++; - - offset = l_state.batch_index * STANDARD_VECTOR_SIZE; - auto remaining = row_id_count - offset; - scan_count = remaining < STANDARD_VECTOR_SIZE ? remaining : STANDARD_VECTOR_SIZE; - finished = remaining < STANDARD_VECTOR_SIZE ? true : false; + enum class ExecutionPhase { NONE = 0, STORAGE = 1, LOCAL_STORAGE = 2 }; + + // We might need to loop back, so while (true) + while (true) { + idx_t scan_count = 0; + idx_t offset = 0; + + // Phase selection + auto phase_to_be_performed = ExecutionPhase::NONE; + { + // Synchronize changes to the shared global state. + lock_guard l(index_scan_lock); + if (!finished_first_phase) { + l_state.batch_index = next_batch_index; + next_batch_index++; + + offset = l_state.batch_index * STANDARD_VECTOR_SIZE; + auto remaining = row_id_count - offset; + scan_count = remaining <= STANDARD_VECTOR_SIZE ? remaining : STANDARD_VECTOR_SIZE; + finished_first_phase = remaining <= STANDARD_VECTOR_SIZE ? true : false; + phase_to_be_performed = ExecutionPhase::STORAGE; + } else if (!started_last_phase) { + // First thread to get last phase, great, set l_state's in_charge_of_final_stretch, so same thread + // will be on again + started_last_phase = true; + l_state.in_charge_of_final_stretch = true; + phase_to_be_performed = ExecutionPhase::LOCAL_STORAGE; + } else if (l_state.in_charge_of_final_stretch) { + phase_to_be_performed = ExecutionPhase::LOCAL_STORAGE; + } } - } - if (scan_count != 0) { - auto row_id_data = reinterpret_cast(row_ids + offset); - Vector local_vector(LogicalType::ROW_TYPE, row_id_data); - - if (CanRemoveFilterColumns()) { - l_state.all_columns.Reset(); - storage.Fetch(tx, l_state.all_columns, column_ids, local_vector, scan_count, l_state.fetch_state); - output.ReferenceColumns(l_state.all_columns, projection_ids); - } else { - storage.Fetch(tx, output, column_ids, local_vector, scan_count, l_state.fetch_state); + switch (phase_to_be_performed) { + case ExecutionPhase::NONE: { + // No work to be picked up + return; + } + case ExecutionPhase::STORAGE: { + // Scan (in parallel) storage + auto row_id_data = reinterpret_cast(row_ids + offset); + Vector local_vector(LogicalType::ROW_TYPE, row_id_data); + + if (CanRemoveFilterColumns()) { + l_state.all_columns.Reset(); + storage.Fetch(tx, l_state.all_columns, column_ids, local_vector, scan_count, l_state.fetch_state); + output.ReferenceColumns(l_state.all_columns, projection_ids); + } else { + storage.Fetch(tx, output, column_ids, local_vector, scan_count, l_state.fetch_state); + } + if (output.size() == 0) { + // output is empty, loop back, since there might be results to be picked up from LOCAL_STORAGE phase + continue; + } + return; + } + case ExecutionPhase::LOCAL_STORAGE: { + // Scan (sequentially, always same logical thread) local_storage + auto &local_storage = LocalStorage::Get(tx); + { + if (CanRemoveFilterColumns()) { + l_state.all_columns.Reset(); + local_storage.Scan(l_state.scan_state.local_state, column_ids, l_state.all_columns); + output.ReferenceColumns(l_state.all_columns, projection_ids); + } else { + local_storage.Scan(l_state.scan_state.local_state, column_ids, output); + } + } + return; } - } - - if (output.size() == 0) { - auto &local_storage = LocalStorage::Get(tx); - if (CanRemoveFilterColumns()) { - l_state.all_columns.Reset(); - local_storage.Scan(l_state.scan_state.local_state, column_ids, l_state.all_columns); - output.ReferenceColumns(l_state.all_columns, projection_ids); - } else { - local_storage.Scan(l_state.scan_state.local_state, column_ids, output); } } } @@ -350,7 +383,8 @@ unique_ptr DuckTableScanInitGlobal(ClientContext &cont unique_ptr DuckIndexScanInitGlobal(ClientContext &context, TableFunctionInitInput &input, const TableScanBindData &bind_data, set &row_ids) { auto g_state = make_uniq(context, input.bind_data.get()); - g_state->finished = row_ids.empty() ? true : false; + g_state->finished_first_phase = row_ids.empty() ? true : false; + g_state->started_last_phase = false; if (!row_ids.empty()) { auto row_id_ptr = g_state->arena.AllocateAligned(row_ids.size() * sizeof(row_t)); diff --git a/src/function/table_function.cpp b/src/function/table_function.cpp index 310f75b5871f..a87a2d234c15 100644 --- a/src/function/table_function.cpp +++ b/src/function/table_function.cpp @@ -37,6 +37,30 @@ TableFunction::TableFunction(const vector &arguments, table_functio TableFunction::TableFunction() : TableFunction("", {}, nullptr, nullptr, nullptr, nullptr) { } +bool TableFunction::operator==(const TableFunction &rhs) const { + return name == rhs.name && arguments == rhs.arguments && varargs == rhs.varargs && bind == rhs.bind && + bind_replace == rhs.bind_replace && bind_operator == rhs.bind_operator && init_global == rhs.init_global && + init_local == rhs.init_local && function == rhs.function && in_out_function == rhs.in_out_function && + in_out_function_final == rhs.in_out_function_final && statistics == rhs.statistics && + dependency == rhs.dependency && cardinality == rhs.cardinality && + pushdown_complex_filter == rhs.pushdown_complex_filter && pushdown_expression == rhs.pushdown_expression && + to_string == rhs.to_string && dynamic_to_string == rhs.dynamic_to_string && + table_scan_progress == rhs.table_scan_progress && get_partition_data == rhs.get_partition_data && + get_bind_info == rhs.get_bind_info && type_pushdown == rhs.type_pushdown && + get_multi_file_reader == rhs.get_multi_file_reader && supports_pushdown_type == rhs.supports_pushdown_type && + get_partition_info == rhs.get_partition_info && get_partition_stats == rhs.get_partition_stats && + get_virtual_columns == rhs.get_virtual_columns && get_row_id_columns == rhs.get_row_id_columns && + serialize == rhs.serialize && deserialize == rhs.deserialize && + verify_serialization == rhs.verify_serialization && projection_pushdown == rhs.projection_pushdown && + filter_pushdown == rhs.filter_pushdown && filter_prune == rhs.filter_prune && + sampling_pushdown == rhs.sampling_pushdown && late_materialization == rhs.late_materialization && + global_initialization == rhs.global_initialization; +} + +bool TableFunction::operator!=(const TableFunction &rhs) const { + return !(*this == rhs); +} + bool TableFunction::Equal(const TableFunction &rhs) const { // number of types if (this->arguments.size() != rhs.arguments.size()) { diff --git a/src/include/duckdb/common/encryption_key_manager.hpp b/src/include/duckdb/common/encryption_key_manager.hpp index 55c3aed758cb..fa256eab6985 100644 --- a/src/include/duckdb/common/encryption_key_manager.hpp +++ b/src/include/duckdb/common/encryption_key_manager.hpp @@ -66,6 +66,8 @@ class EncryptionKeyManager : public ObjectCacheEntry { static void KeyDerivationFunctionSHA256(data_ptr_t user_key, idx_t user_key_size, data_ptr_t salt, data_ptr_t derived_key); static string Base64Decode(const string &key); + + //! Generate a (non-cryptographically secure) random key ID static string GenerateRandomKeyID(); public: @@ -74,6 +76,7 @@ class EncryptionKeyManager : public ObjectCacheEntry { static constexpr idx_t DERIVED_KEY_LENGTH = 32; private: + mutable mutex lock; std::unordered_map derived_keys; }; diff --git a/src/include/duckdb/common/encryption_state.hpp b/src/include/duckdb/common/encryption_state.hpp index 32c0597a9c49..845a45a9d361 100644 --- a/src/include/duckdb/common/encryption_state.hpp +++ b/src/include/duckdb/common/encryption_state.hpp @@ -59,6 +59,11 @@ class EncryptionUtil { virtual ~EncryptionUtil() { } + + //! Whether the EncryptionUtil supports encryption (some may only support decryption) + DUCKDB_API virtual bool SupportsEncryption() { + return true; + } }; } // namespace duckdb diff --git a/src/include/duckdb/common/enum_util.hpp b/src/include/duckdb/common/enum_util.hpp index ad94d76597ab..0a7429684aab 100644 --- a/src/include/duckdb/common/enum_util.hpp +++ b/src/include/duckdb/common/enum_util.hpp @@ -86,6 +86,8 @@ enum class BlockIteratorStateType : int8_t; enum class BlockState : uint8_t; +enum class BufferedIndexReplay : uint8_t; + enum class CAPIResultSetType : uint8_t; enum class CSVState : uint8_t; @@ -534,6 +536,9 @@ const char* EnumUtil::ToChars(BlockIteratorStateType val template<> const char* EnumUtil::ToChars(BlockState value); +template<> +const char* EnumUtil::ToChars(BufferedIndexReplay value); + template<> const char* EnumUtil::ToChars(CAPIResultSetType value); @@ -1165,6 +1170,9 @@ BlockIteratorStateType EnumUtil::FromString(const char * template<> BlockState EnumUtil::FromString(const char *value); +template<> +BufferedIndexReplay EnumUtil::FromString(const char *value); + template<> CAPIResultSetType EnumUtil::FromString(const char *value); diff --git a/src/include/duckdb/common/enums/compression_type.hpp b/src/include/duckdb/common/enums/compression_type.hpp index 1dda5ee64117..5198f7627985 100644 --- a/src/include/duckdb/common/enums/compression_type.hpp +++ b/src/include/duckdb/common/enums/compression_type.hpp @@ -36,8 +36,48 @@ enum class CompressionType : uint8_t { COMPRESSION_COUNT // This has to stay the last entry of the type! }; -bool CompressionTypeIsDeprecated(CompressionType compression_type, - optional_ptr storage_manager = nullptr); +struct CompressionAvailabilityResult { +private: + enum class UnavailableReason : uint8_t { + AVAILABLE, + //! Introduced later, not available to this version + NOT_AVAILABLE_YET, + //! Used to be available, but isnt anymore + DEPRECATED + }; + +public: + CompressionAvailabilityResult() = default; + static CompressionAvailabilityResult Deprecated() { + return CompressionAvailabilityResult(UnavailableReason::DEPRECATED); + } + static CompressionAvailabilityResult NotAvailableYet() { + return CompressionAvailabilityResult(UnavailableReason::NOT_AVAILABLE_YET); + } + +public: + bool IsAvailable() const { + return reason == UnavailableReason::AVAILABLE; + } + bool IsDeprecated() { + D_ASSERT(!IsAvailable()); + return reason == UnavailableReason::DEPRECATED; + } + bool IsNotAvailableYet() { + D_ASSERT(!IsAvailable()); + return reason == UnavailableReason::NOT_AVAILABLE_YET; + } + +private: + explicit CompressionAvailabilityResult(UnavailableReason reason) : reason(reason) { + } + +public: + UnavailableReason reason = UnavailableReason::AVAILABLE; +}; + +CompressionAvailabilityResult CompressionTypeIsAvailable(CompressionType compression_type, + optional_ptr storage_manager = nullptr); vector ListCompressionTypes(void); CompressionType CompressionTypeFromString(const string &str); string CompressionTypeToString(CompressionType type); diff --git a/src/include/duckdb/common/http_util.hpp b/src/include/duckdb/common/http_util.hpp index 51127179dd22..11fc26c48e37 100644 --- a/src/include/duckdb/common/http_util.hpp +++ b/src/include/duckdb/common/http_util.hpp @@ -11,6 +11,7 @@ #include "duckdb/common/types.hpp" #include "duckdb/common/case_insensitive_map.hpp" #include "duckdb/common/enums/http_status_code.hpp" +#include "duckdb/common/types/timestamp.hpp" #include namespace duckdb { @@ -143,6 +144,11 @@ struct BaseRequest { //! Whether or not to return failed requests (instead of throwing) bool try_request = false; + // Requests will optionally contain their timings + bool have_request_timing = false; + timestamp_t request_start; + timestamp_t request_end; + template TARGET &Cast() { return reinterpret_cast(*this); @@ -210,6 +216,7 @@ struct PostRequestInfo : public BaseRequest { class HTTPClient { public: virtual ~HTTPClient() = default; + virtual void Initialize(HTTPParams &http_params) = 0; virtual unique_ptr Get(GetRequestInfo &info) = 0; virtual unique_ptr Put(PutRequestInfo &info) = 0; diff --git a/src/include/duckdb/common/limits.hpp b/src/include/duckdb/common/limits.hpp index 0662579effb1..67a98daf06df 100644 --- a/src/include/duckdb/common/limits.hpp +++ b/src/include/duckdb/common/limits.hpp @@ -24,10 +24,12 @@ namespace duckdb { template struct NumericLimits { static constexpr T Minimum() { - return std::numeric_limits::lowest(); + return std::numeric_limits::has_infinity ? -std::numeric_limits::infinity() + : std::numeric_limits::lowest(); } static constexpr T Maximum() { - return std::numeric_limits::max(); + return std::numeric_limits::has_infinity ? std::numeric_limits::infinity() + : std::numeric_limits::max(); } static constexpr bool IsSigned() { return std::is_signed::value; diff --git a/src/include/duckdb/common/local_file_system.hpp b/src/include/duckdb/common/local_file_system.hpp index 8b3f7aaf21f8..354886e50e28 100644 --- a/src/include/duckdb/common/local_file_system.hpp +++ b/src/include/duckdb/common/local_file_system.hpp @@ -38,6 +38,8 @@ class LocalFileSystem : public FileSystem { int64_t GetFileSize(FileHandle &handle) override; //! Returns the file last modified time of a file handle, returns timespec with zero on all attributes on error timestamp_t GetLastModifiedTime(FileHandle &handle) override; + //! Returns a tag that uniquely identifies the version of the file + string GetVersionTag(FileHandle &handle) override; //! Returns the file last modified time of a file handle, returns timespec with zero on all attributes on error FileType GetFileType(FileHandle &handle) override; //! Truncate a file to a maximum size of new_size, new_size should be smaller than or equal to the current size of diff --git a/src/include/duckdb/common/random_engine.hpp b/src/include/duckdb/common/random_engine.hpp index 8a5a3097e4f7..ec14b42e555f 100644 --- a/src/include/duckdb/common/random_engine.hpp +++ b/src/include/duckdb/common/random_engine.hpp @@ -38,6 +38,8 @@ class RandomEngine { void SetSeed(uint64_t seed); + void RandomData(duckdb::data_ptr_t data, duckdb::idx_t len); + static RandomEngine &Get(ClientContext &context); mutex lock; diff --git a/src/include/duckdb/common/types/row/block_iterator.hpp b/src/include/duckdb/common/types/row/block_iterator.hpp index c29b094a86cf..1e0ccf305f65 100644 --- a/src/include/duckdb/common/types/row/block_iterator.hpp +++ b/src/include/duckdb/common/types/row/block_iterator.hpp @@ -23,64 +23,100 @@ enum class BlockIteratorStateType : int8_t { EXTERNAL, }; -BlockIteratorStateType GetBlockIteratorStateType(const bool &external); +static BlockIteratorStateType GetBlockIteratorStateType(const bool &external) { + return external ? BlockIteratorStateType::EXTERNAL : BlockIteratorStateType::IN_MEMORY; +} -//! State for iterating over blocks of an in-memory TupleDataCollection -//! Multiple iterators can share the same state, everything is const -class InMemoryBlockIteratorState { -public: - explicit InMemoryBlockIteratorState(const TupleDataCollection &key_data); +template +class BlockIteratorStateBase { +protected: + friend BLOCK_ITERATOR_STATE; -public: - template - T &GetValueAtIndex(const idx_t &block_idx, const idx_t &tuple_idx) const { - D_ASSERT(GetIndex(block_idx, tuple_idx) < tuple_count); - return reinterpret_cast(block_ptrs[block_idx])[tuple_idx]; +private: + explicit BlockIteratorStateBase(const idx_t tuple_count_p) : tuple_count(tuple_count_p) { } - template - T &GetValueAtIndex(const idx_t &n) const { - const auto quotient = fast_mod.Div(n); - return GetValueAtIndex(quotient, fast_mod.Mod(n, quotient)); +public: + idx_t GetDivisor() const { + const auto &state = static_cast(*this); + return state.GetDivisor(); } - void RandomAccess(idx_t &block_idx, idx_t &tuple_idx, const idx_t &index) const { - block_idx = fast_mod.Div(index); - tuple_idx = fast_mod.Mod(index, block_idx); + void RandomAccess(idx_t &block_or_chunk_idx, idx_t &tuple_idx, const idx_t &index) const { + const auto &state = static_cast(*this); + state.RandomAccessInternal(block_or_chunk_idx, tuple_idx, index); } - void Add(idx_t &block_idx, idx_t &tuple_idx, const idx_t &value) const { + void Add(idx_t &block_or_chunk_idx, idx_t &tuple_idx, const idx_t &value) const { tuple_idx += value; - if (tuple_idx >= fast_mod.GetDivisor()) { - const auto div = fast_mod.Div(tuple_idx); - tuple_idx -= div * fast_mod.GetDivisor(); - block_idx += div; + if (tuple_idx >= GetDivisor()) { + RandomAccess(block_or_chunk_idx, tuple_idx, GetIndex(block_or_chunk_idx, tuple_idx)); } } - void Subtract(idx_t &block_idx, idx_t &tuple_idx, const idx_t &value) const { + void Subtract(idx_t &block_or_chunk_idx, idx_t &tuple_idx, const idx_t &value) const { tuple_idx -= value; - if (tuple_idx >= fast_mod.GetDivisor()) { - const auto div = fast_mod.Div(-tuple_idx); - tuple_idx += (div + 1) * fast_mod.GetDivisor(); - block_idx -= div + 1; + if (tuple_idx >= GetDivisor()) { + RandomAccess(block_or_chunk_idx, tuple_idx, GetIndex(block_or_chunk_idx, tuple_idx)); } } - void Increment(idx_t &block_idx, idx_t &tuple_idx) const { - const auto passed_boundary = ++tuple_idx == fast_mod.GetDivisor(); - block_idx += passed_boundary; - tuple_idx *= !passed_boundary; + void Increment(idx_t &block_or_chunk_idx, idx_t &tuple_idx) const { + const auto crossed_boundary = ++tuple_idx == GetDivisor(); + block_or_chunk_idx += crossed_boundary; + tuple_idx *= !crossed_boundary; } - void Decrement(idx_t &block_idx, idx_t &tuple_idx) const { + void Decrement(idx_t &block_or_chunk_idx, idx_t &tuple_idx) const { const auto crossed_boundary = tuple_idx-- == 0; - block_idx -= crossed_boundary; - tuple_idx += crossed_boundary * fast_mod.GetDivisor(); + block_or_chunk_idx -= crossed_boundary; + tuple_idx += crossed_boundary * GetDivisor(); + } + + idx_t GetIndex(const idx_t &block_or_chunk_idx, const idx_t &tuple_idx) const { + return block_or_chunk_idx * GetDivisor() + tuple_idx; + } + +protected: + const idx_t tuple_count; +}; + +template +class BlockIteratorState; + +//! State for iterating over blocks of an in-memory TupleDataCollection +//! Multiple iterators can share the same state, everything is const +template <> +class BlockIteratorState + : public BlockIteratorStateBase> { +public: + explicit BlockIteratorState(const TupleDataCollection &key_data) + : BlockIteratorStateBase(key_data.Count()), block_ptrs(ConvertBlockPointers(key_data.GetRowBlockPointers())), + fast_mod(key_data.TuplesPerBlock()) { + } + +public: + idx_t GetDivisor() const { + return fast_mod.GetDivisor(); + } + + void RandomAccessInternal(idx_t &block_idx, idx_t &tuple_idx, const idx_t &index) const { + block_idx = fast_mod.Div(index); + tuple_idx = fast_mod.Mod(index, block_idx); + } + + template + T &GetValueAtIndex(const idx_t &block_idx, const idx_t &tuple_idx) const { + D_ASSERT(GetIndex(block_idx, tuple_idx) < tuple_count); + return reinterpret_cast(block_ptrs[block_idx])[tuple_idx]; } - idx_t GetIndex(const idx_t &block_idx, const idx_t &tuple_idx) const { - return block_idx * fast_mod.GetDivisor() + tuple_idx; + template + T &GetValueAtIndex(const idx_t &index) const { + idx_t block_idx; + idx_t tuple_idx; + RandomAccess(block_idx, tuple_idx, index); + return GetValueAtIndex(block_idx, tuple_idx); } void SetKeepPinned(const bool &) { @@ -92,72 +128,63 @@ class InMemoryBlockIteratorState { } private: - static unsafe_vector ConvertBlockPointers(const vector &block_ptrs); + static unsafe_vector ConvertBlockPointers(const vector &block_ptrs) { + unsafe_vector converted_block_ptrs; + converted_block_ptrs.reserve(block_ptrs.size()); + for (const auto &block_ptr : block_ptrs) { + converted_block_ptrs.emplace_back(block_ptr); + } + return converted_block_ptrs; + } private: const unsafe_vector block_ptrs; const FastMod fast_mod; - const idx_t tuple_count; }; +using InMemoryBlockIteratorState = BlockIteratorState; + //! State for iterating over blocks of an external (larger-than-memory) TupleDataCollection //! This state cannot be shared by multiple iterators, it is stateful -class ExternalBlockIteratorState { +template <> +class BlockIteratorState + : public BlockIteratorStateBase> { public: - explicit ExternalBlockIteratorState(TupleDataCollection &key_data, optional_ptr payload_data); - -public: - template - T &GetValueAtIndex(const idx_t &chunk_idx, const idx_t &tuple_idx) { - if (chunk_idx != current_chunk_idx) { - InitializeChunk(chunk_idx); + explicit BlockIteratorState(TupleDataCollection &key_data_p, optional_ptr payload_data_p) + : BlockIteratorStateBase(key_data_p.Count()), current_chunk_idx(DConstants::INVALID_INDEX), + key_data(key_data_p), key_ptrs(FlatVector::GetData(key_scan_state.chunk_state.row_locations)), + payload_data(payload_data_p), keep_pinned(false), pin_payload(false) { + key_data.InitializeScan(key_scan_state); + if (payload_data) { + payload_data->InitializeScan(payload_scan_state); } - return *reinterpret_cast(key_ptrs)[tuple_idx]; } - template - T &GetValueAtIndex(const idx_t &n) { - D_ASSERT(n < tuple_count); - return GetValueAtIndex(n / STANDARD_VECTOR_SIZE, n % STANDARD_VECTOR_SIZE); +public: + static constexpr idx_t GetDivisor() { + return STANDARD_VECTOR_SIZE; } - static void RandomAccess(idx_t &chunk_idx, idx_t &tuple_idx, const idx_t &index) { + static void RandomAccessInternal(idx_t &chunk_idx, idx_t &tuple_idx, const idx_t &index) { chunk_idx = index / STANDARD_VECTOR_SIZE; tuple_idx = index % STANDARD_VECTOR_SIZE; } - static void Add(idx_t &chunk_idx, idx_t &tuple_idx, const idx_t &value) { - tuple_idx += value; - if (tuple_idx >= STANDARD_VECTOR_SIZE) { - const auto div = tuple_idx / STANDARD_VECTOR_SIZE; - tuple_idx -= div * STANDARD_VECTOR_SIZE; - chunk_idx += div; - } - } - - static void Subtract(idx_t &chunk_idx, idx_t &tuple_idx, const idx_t &value) { - tuple_idx -= value; - if (tuple_idx >= STANDARD_VECTOR_SIZE) { - const auto div = -tuple_idx / STANDARD_VECTOR_SIZE; - tuple_idx += (div + 1) * STANDARD_VECTOR_SIZE; - chunk_idx -= div + 1; + template + T &GetValueAtIndex(const idx_t &chunk_idx, const idx_t &tuple_idx) { + D_ASSERT(GetIndex(chunk_idx, tuple_idx) < tuple_count); + if (chunk_idx != current_chunk_idx) { + InitializeChunk(chunk_idx); } + return *reinterpret_cast(key_ptrs)[tuple_idx]; } - static void Increment(idx_t &chunk_idx, idx_t &tuple_idx) { - const auto passed_boundary = ++tuple_idx == STANDARD_VECTOR_SIZE; - chunk_idx += passed_boundary; - tuple_idx *= !passed_boundary; - } - - static void Decrement(idx_t &chunk_idx, idx_t &tuple_idx) { - const auto crossed_boundary = tuple_idx-- == 0; - chunk_idx -= crossed_boundary; - tuple_idx += crossed_boundary * static_cast(STANDARD_VECTOR_SIZE); - } - - static idx_t GetIndex(const idx_t &chunk_idx, const idx_t &tuple_idx) { - return chunk_idx * STANDARD_VECTOR_SIZE + tuple_idx; + template + T &GetValueAtIndex(const idx_t &index) { + idx_t chunk_idx; + idx_t tuple_idx; + RandomAccess(chunk_idx, tuple_idx, index); + return GetValueAtIndex(chunk_idx, tuple_idx); } void SetKeepPinned(const bool &enable) { @@ -201,7 +228,6 @@ class ExternalBlockIteratorState { } private: - const idx_t tuple_count; idx_t current_chunk_idx; TupleDataCollection &key_data; @@ -216,13 +242,7 @@ class ExternalBlockIteratorState { vector pins; }; -//! Utility so we can get the state using the type -template -using BlockIteratorState = typename std::conditional< - T == BlockIteratorStateType::IN_MEMORY, InMemoryBlockIteratorState, - typename std::conditional::type>::type; +using ExternalBlockIteratorState = BlockIteratorState; //! Iterator for data spread out over multiple blocks template @@ -305,16 +325,16 @@ class block_iterator_t { // NOLINT: match stl case return *this; } block_iterator_t operator+(const difference_type &n) const { - idx_t new_block_idx = block_or_chunk_idx; + idx_t new_block_or_chunk_idx = block_or_chunk_idx; idx_t new_tuple_idx = tuple_idx; - state->Add(new_block_idx, new_tuple_idx, n); - return block_iterator_t(*state, new_block_idx, new_tuple_idx); + state->Add(new_block_or_chunk_idx, new_tuple_idx, n); + return block_iterator_t(*state, new_block_or_chunk_idx, new_tuple_idx); } block_iterator_t operator-(const difference_type &n) const { - idx_t new_block_idx = block_or_chunk_idx; + idx_t new_block_or_chunk_idx = block_or_chunk_idx; idx_t new_tuple_idx = tuple_idx; - state->Subtract(new_block_idx, new_tuple_idx, n); - return block_iterator_t(*state, new_block_idx, new_tuple_idx); + state->Subtract(new_block_or_chunk_idx, new_tuple_idx, n); + return block_iterator_t(*state, new_block_or_chunk_idx, new_tuple_idx); } reference operator[](const difference_type &n) const { diff --git a/src/include/duckdb/execution/index/art/art_operator.hpp b/src/include/duckdb/execution/index/art/art_operator.hpp index 62903b198c2c..a71d9362a4da 100644 --- a/src/include/duckdb/execution/index/art/art_operator.hpp +++ b/src/include/duckdb/execution/index/art/art_operator.hpp @@ -62,6 +62,60 @@ class ARTOperator { return nullptr; } + //! LookupInLeaf returns true if the rowid is in the leaf: + //! 1) If the leaf is an inlined leaf, check if the rowid matches. + //! 2) If the leaf is a gate node, perform a search in the nested ART for the rowid. + static bool LookupInLeaf(ART &art, const Node &node, const ARTKey &rowid) { + reference ref(node); + idx_t depth = 0; + + while (ref.get().HasMetadata()) { + const auto type = ref.get().GetType(); + switch (type) { + case NType::LEAF_INLINED: { + return ref.get().GetRowId() == rowid.GetRowId(); + } + case NType::LEAF: { + throw InternalException("Invalid node type (LEAF) for ARTOperator::NestedLookup."); + } + case NType::NODE_7_LEAF: + case NType::NODE_15_LEAF: + case NType::NODE_256_LEAF: { + D_ASSERT(depth + 1 == Prefix::ROW_ID_SIZE); + const auto byte = rowid[Prefix::ROW_ID_COUNT]; + return ref.get().HasByte(art, byte); + } + case NType::NODE_4: + case NType::NODE_16: + case NType::NODE_48: + case NType::NODE_256: { + D_ASSERT(depth < Prefix::ROW_ID_SIZE); + auto child = ref.get().GetChild(art, rowid[depth]); + if (child) { + // Continue in the child. + ref = *child; + depth++; + D_ASSERT(ref.get().HasMetadata()); + continue; + } + return false; + } + case NType::PREFIX: { + Prefix prefix(art, ref.get()); + for (idx_t i = 0; i < prefix.data[Prefix::Count(art)]; i++) { + if (prefix.data[i] != rowid[depth]) { + // The key and the prefix don't match. + return false; + } + depth++; + } + ref = *prefix.ptr; + } + } + } + return false; + } + //! Insert a key and its row ID into the node. //! Starts at depth (in the key). //! status indicates if the insert happens inside a gate or not. @@ -202,6 +256,8 @@ class ARTOperator { if (parent.get().GetType() == NType::PREFIX) { // We might have to compress: // PREFIX (greatgrandparent) - Node4 (grandparent) - PREFIX - INLINED_LEAF. + // The parent does not have to be passed in, as it is a child of the possibly being compressed N4. + // Then, when we delete that child, we also free it. Node::DeleteChild(art, grandparent, greatgrandparent, current_key.get()[grandparent_depth], status, row_id); return; diff --git a/src/include/duckdb/execution/index/art/prefix.hpp b/src/include/duckdb/execution/index/art/prefix.hpp index 835e32c0fbc1..109497f2a2f5 100644 --- a/src/include/duckdb/execution/index/art/prefix.hpp +++ b/src/include/duckdb/execution/index/art/prefix.hpp @@ -48,7 +48,7 @@ class Prefix { //! Concatenates parent -> prev_node4 -> child. static void Concat(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte, - const GateStatus node4_status); + const GateStatus node4_status, const GateStatus status); //! Removes up to pos bytes from the prefix. //! Shifts all subsequent bytes by pos. Frees empty nodes. @@ -72,7 +72,7 @@ class Prefix { static Prefix GetTail(ART &art, const Node &node); static void ConcatInternal(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte, - const bool inside_gate); + const GateStatus status); static void ConcatNode4WasGate(ART &art, Node &node4, const Node child, uint8_t byte); static void ConcatChildIsGate(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte); static void ConcatOutsideGate(ART &art, Node &parent, Node &node4, const Node child, uint8_t byte); diff --git a/src/include/duckdb/execution/index/bound_index.hpp b/src/include/duckdb/execution/index/bound_index.hpp index 914288bfa62e..d09e664e5442 100644 --- a/src/include/duckdb/execution/index/bound_index.hpp +++ b/src/include/duckdb/execution/index/bound_index.hpp @@ -8,6 +8,7 @@ #pragma once +#include "duckdb/execution/index/unbound_index.hpp" #include "duckdb/common/enums/index_constraint_type.hpp" #include "duckdb/common/types/constraint_conflict_info.hpp" #include "duckdb/common/types/data_chunk.hpp" @@ -60,6 +61,16 @@ class BoundIndex : public Index { //! The index constraint type IndexConstraintType index_constraint_type; + //! The vector of unbound expressions, which are later turned into bound expressions. + //! We need to store the unbound expressions, as we might not always have the context + //! available to bind directly. + //! The leaves of these unbound expressions are BoundColumnRefExpressions. + //! These BoundColumnRefExpressions contain a binding (ColumnBinding), + //! and that contains a table_index and a column_index. + //! The table_index is a dummy placeholder. + //! The column_index indexes the column_ids vector in the Index base class. + //! Those column_ids store the physical table indexes of the Index, + //! and we use them when binding the unbound expressions. vector> unbound_expressions; public: @@ -155,14 +166,22 @@ class BoundIndex : public Index { virtual string GetConstraintViolationMessage(VerifyExistenceType verify_type, idx_t failed_index, DataChunk &input) = 0; - void ApplyBufferedAppends(const vector &table_types, ColumnDataCollection &buffered_appends, + //! Replay index insert and delete operations buffered during WAL replay. + //! table_types has the physical types of the table in the order they appear, not logical (no generated columns). + //! mapped_column_ids contains the sorted order of Indexed physical column ID's (see unbound_index.hpp comments). + void ApplyBufferedReplays(const vector &table_types, BufferedIndexReplays &buffered_replays, const vector &mapped_column_ids); protected: //! Lock used for any changes to the index mutex lock; - //! Bound expressions used during expression execution + //! The vector of bound expressions to generate the Index keys based on a data chunk. + //! The leaves of the bound expressions are BoundReferenceExpressions. + //! These BoundReferenceExpressions contain offsets into the DataChunk to retrieve the columns + //! for the expression. + //! With these offsets into the DataChunk, the expression executor can now evaluate the expression + //! on incoming data chunks to generate the keys. vector> bound_expressions; private: diff --git a/src/include/duckdb/execution/index/unbound_index.hpp b/src/include/duckdb/execution/index/unbound_index.hpp index ec2fc3cfdf02..0ca4aa9d293b 100644 --- a/src/include/duckdb/execution/index/unbound_index.hpp +++ b/src/include/duckdb/execution/index/unbound_index.hpp @@ -16,15 +16,61 @@ namespace duckdb { class ColumnDataCollection; +enum class BufferedIndexReplay : uint8_t { INSERT_ENTRY = 0, DEL_ENTRY = 1 }; + +struct ReplayRange { + BufferedIndexReplay type; + // [start, end) - start is inclusive, end is exclusive for the range within the ColumnDataCollection + // buffer for operations to replay for this range. + idx_t start; + idx_t end; + explicit ReplayRange(const BufferedIndexReplay replay_type, const idx_t start_p, const idx_t end_p) + : type(replay_type), start(start_p), end(end_p) { + } +}; + +// All inserts and deletes to be replayed are stored in their respective buffers. +// Since the inserts and deletes may be interleaved, however, ranges stores the ordering of operations +// and their offsets in the respective buffer. +// Simple example: +// ranges[0] - INSERT_ENTRY, [0,6) +// ranges[1] - DEL_ENTRY, [0,3) +// ranges[2] - INSERT_ENTRY [6,12) +// So even though the buffered_inserts has all the insert data from [0,12), ranges gives us the intervals for +// replaying the index operations in the right order. +struct BufferedIndexReplays { + vector ranges; + unique_ptr buffered_inserts; + unique_ptr buffered_deletes; + + BufferedIndexReplays() = default; + + unique_ptr &GetBuffer(const BufferedIndexReplay replay_type) { + if (replay_type == BufferedIndexReplay::INSERT_ENTRY) { + return buffered_inserts; + } + return buffered_deletes; + } + + bool HasBufferedReplays() const { + return !ranges.empty(); + } +}; + class UnboundIndex final : public Index { private: //! The CreateInfo of the index. unique_ptr create_info; //! The serialized storage information of the index. IndexStorageInfo storage_info; - //! Buffer for WAL replay appends. - unique_ptr buffered_appends; - //! Maps the column IDs in the buffered appends to the table columns. + + //! Buffered for index operations during WAL replay. They are replayed upon index binding. + BufferedIndexReplays buffered_replays; + + //! Maps the column IDs in the buffered replays to a physical table offset. + //! For example, column [i] in a buffered ColumnDataCollection is the data for an Indexed column with + //! physical table index mapped_column_ids[i]. + //! This is in sorted order of physical column IDs. vector mapped_column_ids; public: @@ -59,13 +105,19 @@ class UnboundIndex final : public Index { void CommitDrop() override; - void BufferChunk(DataChunk &chunk, Vector &row_ids, const vector &mapped_column_ids_p); - bool HasBufferedAppends() const { - return buffered_appends != nullptr; + //! Buffer Index delete or insert (replay_type) data chunk. + //! See note above on mapped_column_ids, this function assumes that index_column_chunk maps into + //! mapped_column_ids_p to get the physical column index for each Indexed column in the chunk. + void BufferChunk(DataChunk &index_column_chunk, Vector &row_ids, const vector &mapped_column_ids_p, + BufferedIndexReplay replay_type); + bool HasBufferedReplays() const { + return buffered_replays.HasBufferedReplays(); } - ColumnDataCollection &GetBufferedAppends() const { - return *buffered_appends; + + BufferedIndexReplays &GetBufferedReplays() { + return buffered_replays; } + const vector &GetMappedColumnIds() const { return mapped_column_ids; } diff --git a/src/include/duckdb/execution/join_hashtable.hpp b/src/include/duckdb/execution/join_hashtable.hpp index d2a55529a949..57b4243b3f9f 100644 --- a/src/include/duckdb/execution/join_hashtable.hpp +++ b/src/include/duckdb/execution/join_hashtable.hpp @@ -277,6 +277,8 @@ class JoinHashTable { uint64_t bitmask = DConstants::INVALID_INDEX; //! Whether or not we error on multiple rows found per match in a SINGLE join bool single_join_error_on_multiple_rows = true; + //! Number of probe matches + atomic total_probe_matches {0}; struct { mutex mj_lock; diff --git a/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp b/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp index 2a123827cf17..5e36690eeddb 100644 --- a/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp +++ b/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp @@ -121,6 +121,8 @@ class BaseScanner { virtual ~BaseScanner() = default; + void Print() const; + //! Returns true if the scanner is finished bool FinishedFile() const; @@ -164,10 +166,15 @@ class BaseScanner { //! States CSVStates states; + //! If the scanner ever entered a quoted state bool ever_quoted = false; + //! If the scanner ever entered an escaped state. bool ever_escaped = false; + //! If the scanner ever used advantage of the non-strict mode. + bool used_unstrictness = false; + //! Shared pointer to the buffer_manager, this is shared across multiple scanners shared_ptr buffer_manager; @@ -302,6 +309,9 @@ class BaseScanner { !state_machine->dialect_options.state_machine_options.strict_mode.GetValue())) { // We only set the ever escaped variable if this is either a quote char OR strict mode is off ever_escaped = true; + if (states.states[0] == CSVState::UNQUOTED_ESCAPE) { + used_unstrictness = true; + } } ever_quoted = true; T::SetQuoted(result, iterator.pos.buffer_pos); @@ -332,11 +342,15 @@ class BaseScanner { break; } case CSVState::ESCAPE: - case CSVState::UNQUOTED_ESCAPE: case CSVState::ESCAPED_RETURN: T::SetEscaped(result); iterator.pos.buffer_pos++; break; + case CSVState::UNQUOTED_ESCAPE: + T::SetEscaped(result); + iterator.pos.buffer_pos++; + used_unstrictness = true; + break; case CSVState::STANDARD: { iterator.pos.buffer_pos++; while (iterator.pos.buffer_pos + 8 < to_pos) { diff --git a/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp b/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp index 5b985e05cb71..8ab081ae3188 100644 --- a/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp +++ b/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp @@ -116,6 +116,7 @@ class CSVSniffer { //! Highest number of columns found idx_t max_columns_found = 0; idx_t max_columns_found_error = 0; + bool best_candidate_is_strict = false; //! Current Candidates being considered vector> candidates; //! Reference to original CSV Options, it will be modified as a result of the sniffer. diff --git a/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp b/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp index bacabfc4fca3..158c7ec12983 100644 --- a/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp +++ b/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp @@ -176,7 +176,7 @@ class StringValueResult : public ScannerResult { const shared_ptr &buffer_handle, Allocator &buffer_allocator, idx_t result_size_p, idx_t buffer_position, CSVErrorHandler &error_handler, CSVIterator &iterator, bool store_line_size, shared_ptr csv_file_scan, idx_t &lines_read, bool sniffing, - string path, idx_t scan_id); + const string &path, idx_t scan_id, bool &used_unstrictness); ~StringValueResult(); @@ -225,6 +225,7 @@ class StringValueResult : public ScannerResult { shared_ptr csv_file_scan; idx_t &lines_read; + bool &used_unstrictness; //! Information regarding projected columns unsafe_unique_array projected_columns; bool projecting_columns = false; diff --git a/src/include/duckdb/execution/physical_plan_generator.hpp b/src/include/duckdb/execution/physical_plan_generator.hpp index 5d9e0aa46fec..1a1080e7b85e 100644 --- a/src/include/duckdb/execution/physical_plan_generator.hpp +++ b/src/include/duckdb/execution/physical_plan_generator.hpp @@ -10,6 +10,7 @@ #include "duckdb/common/common.hpp" #include "duckdb/execution/physical_operator.hpp" +#include "duckdb/parser/group_by_node.hpp" #include "duckdb/planner/logical_operator.hpp" #include "duckdb/planner/logical_tokens.hpp" #include "duckdb/planner/joinside.hpp" @@ -152,7 +153,8 @@ class PhysicalPlanGenerator { PhysicalOperator &PlanComparisonJoin(LogicalComparisonJoin &op); PhysicalOperator &PlanDelimJoin(LogicalComparisonJoin &op); PhysicalOperator &ExtractAggregateExpressions(PhysicalOperator &child, vector> &expressions, - vector> &groups); + vector> &groups, + optional_ptr> grouping_sets); private: ClientContext &context; diff --git a/src/include/duckdb/function/function_binder.hpp b/src/include/duckdb/function/function_binder.hpp index 6eba740ab867..6b43a0777b58 100644 --- a/src/include/duckdb/function/function_binder.hpp +++ b/src/include/duckdb/function/function_binder.hpp @@ -70,7 +70,8 @@ class FunctionBinder { AggregateType aggr_type = AggregateType::NON_DISTINCT); DUCKDB_API static void BindSortedAggregate(ClientContext &context, BoundAggregateExpression &expr, - const vector> &groups); + const vector> &groups, + optional_ptr> grouping_sets); DUCKDB_API static void BindSortedAggregate(ClientContext &context, BoundWindowExpression &expr); //! Cast a set of expressions to the arguments of this function diff --git a/src/include/duckdb/function/table/read_file.hpp b/src/include/duckdb/function/table/read_file.hpp index 966fea5ef1b7..a0ef222fea17 100644 --- a/src/include/duckdb/function/table/read_file.hpp +++ b/src/include/duckdb/function/table/read_file.hpp @@ -31,53 +31,4 @@ struct ReadFileGlobalState : public GlobalTableFunctionState { bool requires_file_open = false; }; -struct ReadBlobOperation { - static constexpr const char *NAME = "read_blob"; - static constexpr const char *FILE_TYPE = "blob"; - - static inline LogicalType TYPE() { - return LogicalType::BLOB; - } -}; - -struct ReadTextOperation { - static constexpr const char *NAME = "read_text"; - static constexpr const char *FILE_TYPE = "text"; - - static inline LogicalType TYPE() { - return LogicalType::VARCHAR; - } -}; - -template -struct DirectMultiFileInfo : MultiFileReaderInterface { - static unique_ptr CreateInterface(ClientContext &context); - unique_ptr InitializeOptions(ClientContext &context, - optional_ptr info) override; - bool ParseCopyOption(ClientContext &context, const string &key, const vector &values, - BaseFileReaderOptions &options, vector &expected_names, - vector &expected_types) override; - bool ParseOption(ClientContext &context, const string &key, const Value &val, MultiFileOptions &file_options, - BaseFileReaderOptions &options) override; - unique_ptr InitializeBindData(MultiFileBindData &multi_file_data, - unique_ptr options) override; - void BindReader(ClientContext &context, vector &return_types, vector &names, - MultiFileBindData &bind_data) override; - optional_idx MaxThreads(const MultiFileBindData &bind_data_p, const MultiFileGlobalState &global_state, - FileExpandResult expand_result) override; - unique_ptr InitializeGlobalState(ClientContext &context, MultiFileBindData &bind_data, - MultiFileGlobalState &global_state) override; - unique_ptr InitializeLocalState(ExecutionContext &, GlobalTableFunctionState &) override; - shared_ptr CreateReader(ClientContext &context, GlobalTableFunctionState &gstate, - BaseUnionData &union_data, const MultiFileBindData &bind_data_p) override; - shared_ptr CreateReader(ClientContext &context, GlobalTableFunctionState &gstate, - const OpenFileInfo &file, idx_t file_idx, - const MultiFileBindData &bind_data) override; - shared_ptr CreateReader(ClientContext &context, const OpenFileInfo &file, - BaseFileReaderOptions &options, - const MultiFileOptions &file_options) override; - unique_ptr GetCardinality(const MultiFileBindData &bind_data, idx_t file_count) override; - FileGlobInput GetGlobInput() override; -}; - } // namespace duckdb diff --git a/src/include/duckdb/function/table_function.hpp b/src/include/duckdb/function/table_function.hpp index f6c9cc55e113..6da688d04bf6 100644 --- a/src/include/duckdb/function/table_function.hpp +++ b/src/include/duckdb/function/table_function.hpp @@ -432,6 +432,8 @@ class TableFunction : public SimpleNamedParameterFunction { // NOLINT: work-arou TableFunctionInitialization global_initialization = TableFunctionInitialization::INITIALIZE_ON_EXECUTE; DUCKDB_API bool Equal(const TableFunction &rhs) const; + DUCKDB_API bool operator==(const TableFunction &rhs) const; + DUCKDB_API bool operator!=(const TableFunction &rhs) const; }; } // namespace duckdb diff --git a/src/include/duckdb/logging/log_manager.hpp b/src/include/duckdb/logging/log_manager.hpp index 6ee88aeda547..54f623a55e44 100644 --- a/src/include/duckdb/logging/log_manager.hpp +++ b/src/include/duckdb/logging/log_manager.hpp @@ -21,7 +21,7 @@ class LogType; // - Creates Loggers with cached configuration // - Main sink for logs (either by logging directly into this, or by syncing a pre-cached set of log entries) // - Holds the log storage -class LogManager : public enable_shared_from_this { +class LogManager { friend class ThreadSafeLogger; friend class ThreadLocalLogger; friend class MutableLogger; diff --git a/src/include/duckdb/logging/log_type.hpp b/src/include/duckdb/logging/log_type.hpp index 23d901c4ea57..7ce97e5abcab 100644 --- a/src/include/duckdb/logging/log_type.hpp +++ b/src/include/duckdb/logging/log_type.hpp @@ -20,6 +20,7 @@ class PhysicalOperator; class AttachedDatabase; class RowGroup; struct DataTableInfo; +enum class MetricsType : uint8_t; //! Log types provide some structure to the formats that the different log messages can have //! For now, this holds a type that the VARCHAR value will be auto-cast into. @@ -106,6 +107,19 @@ class PhysicalOperatorLogType : public LogType { const vector> &info); }; +class MetricsLogType : public LogType { +public: + static constexpr const char *NAME = "Metrics"; + static constexpr LogLevel LEVEL = LogLevel::LOG_INFO; + + //! Construct the log type + MetricsLogType(); + + static LogicalType GetLogType(); + + static string ConstructLogMessage(const MetricsType &type, const Value &value); +}; + class CheckpointLogType : public LogType { public: static constexpr const char *NAME = "Checkpoint"; diff --git a/src/include/duckdb/main/database.hpp b/src/include/duckdb/main/database.hpp index 11936d5f74d7..7e43de6005c0 100644 --- a/src/include/duckdb/main/database.hpp +++ b/src/include/duckdb/main/database.hpp @@ -69,7 +69,7 @@ class DatabaseInstance : public enable_shared_from_this { DUCKDB_API SettingLookupResult TryGetCurrentSetting(const string &key, Value &result) const; - DUCKDB_API shared_ptr GetEncryptionUtil() const; + DUCKDB_API shared_ptr GetEncryptionUtil(); shared_ptr CreateAttachedDatabase(ClientContext &context, AttachInfo &info, AttachOptions &options); @@ -90,7 +90,7 @@ class DatabaseInstance : public enable_shared_from_this { unique_ptr extension_manager; ValidChecker db_validity; unique_ptr db_file_system; - shared_ptr log_manager; + unique_ptr log_manager; unique_ptr external_file_cache; duckdb_ext_api_v1 (*create_api_v1)(); diff --git a/src/include/duckdb/main/db_instance_cache.hpp b/src/include/duckdb/main/db_instance_cache.hpp index 8fa226b2d10e..a8a14c416930 100644 --- a/src/include/duckdb/main/db_instance_cache.hpp +++ b/src/include/duckdb/main/db_instance_cache.hpp @@ -26,6 +26,8 @@ struct DatabaseCacheEntry { mutex update_database_mutex; }; +enum class CacheBehavior { AUTOMATIC, ALWAYS_CACHE, NEVER_CACHE }; + class DBInstanceCache { public: DBInstanceCache(); @@ -41,6 +43,9 @@ class DBInstanceCache { //! Either returns an existing entry, or creates and caches a new DB Instance shared_ptr GetOrCreateInstance(const string &database, DBConfig &config_dict, bool cache_instance, const std::function &on_create = nullptr); + shared_ptr GetOrCreateInstance(const string &database, DBConfig &config_dict, + CacheBehavior cache_behavior = CacheBehavior::AUTOMATIC, + const std::function &on_create = nullptr); private: shared_ptr path_manager; diff --git a/src/include/duckdb/main/extension_entries.hpp b/src/include/duckdb/main/extension_entries.hpp index a32331c9b448..1d5a8510e771 100644 --- a/src/include/duckdb/main/extension_entries.hpp +++ b/src/include/duckdb/main/extension_entries.hpp @@ -227,6 +227,7 @@ static constexpr ExtensionFunctionEntry EXTENSION_FUNCTIONS[] = { {"iceberg_metadata", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"iceberg_scan", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"iceberg_snapshots", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, + {"iceberg_table_properties", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"iceberg_to_ducklake", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"icu_calendar_names", "icu", CatalogType::TABLE_FUNCTION_ENTRY}, {"icu_collate_af", "icu", CatalogType::SCALAR_FUNCTION_ENTRY}, @@ -525,6 +526,7 @@ static constexpr ExtensionFunctionEntry EXTENSION_FUNCTIONS[] = { {"regr_sxx", "core_functions", CatalogType::AGGREGATE_FUNCTION_ENTRY}, {"regr_sxy", "core_functions", CatalogType::AGGREGATE_FUNCTION_ENTRY}, {"regr_syy", "core_functions", CatalogType::AGGREGATE_FUNCTION_ENTRY}, + {"remove_iceberg_table_properties", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"repeat", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, {"replace", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, {"replace_type", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, @@ -540,6 +542,7 @@ static constexpr ExtensionFunctionEntry EXTENSION_FUNCTIONS[] = { {"rtrim", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, {"sem", "core_functions", CatalogType::AGGREGATE_FUNCTION_ENTRY}, {"set_bit", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, + {"set_iceberg_table_properties", "iceberg", CatalogType::TABLE_FUNCTION_ENTRY}, {"setseed", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, {"shapefile_meta", "spatial", CatalogType::TABLE_FUNCTION_ENTRY}, {"sign", "core_functions", CatalogType::SCALAR_FUNCTION_ENTRY}, @@ -599,6 +602,7 @@ static constexpr ExtensionFunctionEntry EXTENSION_FUNCTIONS[] = { {"st_envelope", "spatial", CatalogType::SCALAR_FUNCTION_ENTRY}, {"st_envelope_agg", "spatial", CatalogType::AGGREGATE_FUNCTION_ENTRY}, {"st_equals", "spatial", CatalogType::SCALAR_FUNCTION_ENTRY}, + {"st_expand", "spatial", CatalogType::SCALAR_FUNCTION_ENTRY}, {"st_extent", "spatial", CatalogType::SCALAR_FUNCTION_ENTRY}, {"st_extent_agg", "spatial", CatalogType::AGGREGATE_FUNCTION_ENTRY}, {"st_extent_approx", "spatial", CatalogType::SCALAR_FUNCTION_ENTRY}, @@ -1041,6 +1045,8 @@ static constexpr ExtensionEntry EXTENSION_SETTINGS[] = { {"http_retry_wait_ms", "httpfs"}, {"http_timeout", "httpfs"}, {"httpfs_client_implementation", "httpfs"}, + {"iceberg_via_aws_sdk_for_catalog_interactions", "iceberg"}, + {"merge_http_secret_into_s3_request", "httpfs"}, {"mysql_bit1_as_boolean", "mysql_scanner"}, {"mysql_debug_show_queries", "mysql_scanner"}, {"mysql_experimental_filter_pushdown", "mysql_scanner"}, diff --git a/src/include/duckdb/main/profiling_info.hpp b/src/include/duckdb/main/profiling_info.hpp index 904f0205dbe8..8016e4780067 100644 --- a/src/include/duckdb/main/profiling_info.hpp +++ b/src/include/duckdb/main/profiling_info.hpp @@ -56,6 +56,7 @@ class ProfilingInfo { public: string GetMetricAsString(const MetricsType metric) const; + void WriteMetricsToLog(ClientContext &context); void WriteMetricsToJSON(duckdb_yyjson::yyjson_mut_doc *doc, duckdb_yyjson::yyjson_mut_val *destination); public: diff --git a/src/include/duckdb/main/query_profiler.hpp b/src/include/duckdb/main/query_profiler.hpp index 0f7b8812d812..5f25a5cba4cf 100644 --- a/src/include/duckdb/main/query_profiler.hpp +++ b/src/include/duckdb/main/query_profiler.hpp @@ -183,6 +183,7 @@ class QueryProfiler { static InsertionOrderPreservingMap JSONSanitize(const InsertionOrderPreservingMap &input); static string JSONSanitize(const string &text); static string DrawPadded(const string &str, idx_t width); + DUCKDB_API void ToLog() const; DUCKDB_API string ToJSON() const; DUCKDB_API void WriteToFile(const char *path, string &info) const; diff --git a/src/include/duckdb/main/relation.hpp b/src/include/duckdb/main/relation.hpp index bc383ffe0467..94450b0be021 100644 --- a/src/include/duckdb/main/relation.hpp +++ b/src/include/duckdb/main/relation.hpp @@ -162,19 +162,27 @@ class Relation : public enable_shared_from_this { //! Insert the data from this relation into a table DUCKDB_API shared_ptr InsertRel(const string &schema_name, const string &table_name); + DUCKDB_API shared_ptr InsertRel(const string &catalog_name, const string &schema_name, + const string &table_name); DUCKDB_API void Insert(const string &table_name); DUCKDB_API void Insert(const string &schema_name, const string &table_name); + DUCKDB_API void Insert(const string &catalog_name, const string &schema_name, const string &table_name); //! Insert a row (i.e.,list of values) into a table - DUCKDB_API void Insert(const vector> &values); - DUCKDB_API void Insert(vector>> &&expressions); + DUCKDB_API virtual void Insert(const vector> &values); + DUCKDB_API virtual void Insert(vector>> &&expressions); //! Create a table and insert the data from this relation into that table DUCKDB_API shared_ptr CreateRel(const string &schema_name, const string &table_name, bool temporary = false, OnCreateConflict on_conflict = OnCreateConflict::ERROR_ON_CONFLICT); + DUCKDB_API shared_ptr CreateRel(const string &catalog_name, const string &schema_name, + const string &table_name, bool temporary = false, + OnCreateConflict on_conflict = OnCreateConflict::ERROR_ON_CONFLICT); DUCKDB_API void Create(const string &table_name, bool temporary = false, OnCreateConflict on_conflict = OnCreateConflict::ERROR_ON_CONFLICT); DUCKDB_API void Create(const string &schema_name, const string &table_name, bool temporary = false, OnCreateConflict on_conflict = OnCreateConflict::ERROR_ON_CONFLICT); + DUCKDB_API void Create(const string &catalog_name, const string &schema_name, const string &table_name, + bool temporary = false, OnCreateConflict on_conflict = OnCreateConflict::ERROR_ON_CONFLICT); //! Write a relation to a CSV file DUCKDB_API shared_ptr diff --git a/src/include/duckdb/main/relation/create_table_relation.hpp b/src/include/duckdb/main/relation/create_table_relation.hpp index 8df59b8d251e..cfc0e243aeac 100644 --- a/src/include/duckdb/main/relation/create_table_relation.hpp +++ b/src/include/duckdb/main/relation/create_table_relation.hpp @@ -16,8 +16,11 @@ class CreateTableRelation : public Relation { public: CreateTableRelation(shared_ptr child, string schema_name, string table_name, bool temporary, OnCreateConflict on_conflict); + CreateTableRelation(shared_ptr child, string catalog_name, string schema_name, string table_name, + bool temporary, OnCreateConflict on_conflict); shared_ptr child; + string catalog_name; string schema_name; string table_name; vector columns; diff --git a/src/include/duckdb/main/relation/insert_relation.hpp b/src/include/duckdb/main/relation/insert_relation.hpp index fccb0ae929b3..41756488fcc2 100644 --- a/src/include/duckdb/main/relation/insert_relation.hpp +++ b/src/include/duckdb/main/relation/insert_relation.hpp @@ -15,8 +15,10 @@ namespace duckdb { class InsertRelation : public Relation { public: InsertRelation(shared_ptr child, string schema_name, string table_name); + InsertRelation(shared_ptr child, string catalog_name, string schema_name, string table_name); shared_ptr child; + string catalog_name; string schema_name; string table_name; vector columns; diff --git a/src/include/duckdb/main/relation/table_relation.hpp b/src/include/duckdb/main/relation/table_relation.hpp index 9c2fddcecf0a..a3184dafa92a 100644 --- a/src/include/duckdb/main/relation/table_relation.hpp +++ b/src/include/duckdb/main/relation/table_relation.hpp @@ -29,6 +29,8 @@ class TableRelation : public Relation { unique_ptr GetTableRef() override; + void Insert(const vector> &values) override; + void Insert(vector>> &&expressions) override; void Update(const string &update, const string &condition = string()) override; void Update(vector column_names, vector> &&update, unique_ptr condition = nullptr) override; diff --git a/src/include/duckdb/main/settings.hpp b/src/include/duckdb/main/settings.hpp index 5f90ad3d0371..de5d1a11326b 100644 --- a/src/include/duckdb/main/settings.hpp +++ b/src/include/duckdb/main/settings.hpp @@ -337,6 +337,15 @@ struct DebugSkipCheckpointOnCommitSetting { static constexpr SetScope DefaultScope = SetScope::GLOBAL; }; +struct DebugVerifyBlocksSetting { + using RETURN_TYPE = bool; + static constexpr const char *Name = "debug_verify_blocks"; + static constexpr const char *Description = "DEBUG SETTING: verify block metadata during checkpointing"; + static constexpr const char *InputType = "BOOLEAN"; + static constexpr const char *DefaultValue = "false"; + static constexpr SetScope DefaultScope = SetScope::GLOBAL; +}; + struct DebugVerifyVectorSetting { using RETURN_TYPE = DebugVectorVerification; static constexpr const char *Name = "debug_verify_vector"; diff --git a/src/include/duckdb/optimizer/filter_combiner.hpp b/src/include/duckdb/optimizer/filter_combiner.hpp index 890b90970322..36cdbf59c78c 100644 --- a/src/include/duckdb/optimizer/filter_combiner.hpp +++ b/src/include/duckdb/optimizer/filter_combiner.hpp @@ -50,6 +50,7 @@ class FilterCombiner { //! If this returns true - this sorts "in_list" as a side-effect static bool IsDenseRange(vector &in_list); static bool ContainsNull(vector &in_list); + static bool FindNextLegalUTF8(string &prefix_string); void GenerateFilters(const std::function filter)> &callback); bool HasFilters(); diff --git a/src/include/duckdb/optimizer/filter_pullup.hpp b/src/include/duckdb/optimizer/filter_pullup.hpp index a35fbaab9d4d..b6cb1e704fb2 100644 --- a/src/include/duckdb/optimizer/filter_pullup.hpp +++ b/src/include/duckdb/optimizer/filter_pullup.hpp @@ -30,7 +30,7 @@ class FilterPullup { // only pull up filters when there is a fork bool can_pullup = false; - // identifiy case the branch is a set operation (INTERSECT or EXCEPT) + // identify case the branch is a set operation (INTERSECT or EXCEPT) bool can_add_column = false; private: @@ -40,30 +40,26 @@ class FilterPullup { //! Pull up a LogicalFilter op unique_ptr PullupFilter(unique_ptr op); - //! Pull up filter in a LogicalProjection op unique_ptr PullupProjection(unique_ptr op); - //! Pull up filter in a LogicalCrossProduct op unique_ptr PullupCrossProduct(unique_ptr op); - + //! Pullup a filter in a LogicalJoin unique_ptr PullupJoin(unique_ptr op); - - // PPullup filter in a left join + //! Pullup filter in a left join unique_ptr PullupFromLeft(unique_ptr op); - - // Pullup filter in a inner join + //! Pullup filter in an inner join unique_ptr PullupInnerJoin(unique_ptr op); - - // Pullup filter in LogicalIntersect or LogicalExcept op + //! Pullup filter through a distinct + unique_ptr PullupDistinct(unique_ptr op); + //! Pullup filter in LogicalIntersect or LogicalExcept op unique_ptr PullupSetOperation(unique_ptr op); - + //! Pullup filter in both sides of a join unique_ptr PullupBothSide(unique_ptr op); - // Finish pull up at this operator + //! Finish pull up at this operator unique_ptr FinishPullup(unique_ptr op); - - // special treatment for SetOperations and projections + //! special treatment for SetOperations and projections void ProjectSetOperation(LogicalProjection &proj); }; // end FilterPullup diff --git a/src/include/duckdb/optimizer/join_order/relation_manager.hpp b/src/include/duckdb/optimizer/join_order/relation_manager.hpp index 3b8fda1c67f6..13f037a69bbd 100644 --- a/src/include/duckdb/optimizer/join_order/relation_manager.hpp +++ b/src/include/duckdb/optimizer/join_order/relation_manager.hpp @@ -56,7 +56,11 @@ class RelationManager { //! Extract the set of relations referred to inside an expression bool ExtractBindings(Expression &expression, unordered_set &bindings); void AddRelation(LogicalOperator &op, optional_ptr parent, const RelationStats &stats); - + //! Add an unnest relation which can come from a logical unnest or a logical get which has an unnest function + void AddRelationWithChildren(JoinOrderOptimizer &optimizer, LogicalOperator &op, LogicalOperator &input_op, + optional_ptr parent, RelationStats &child_stats, + optional_ptr limit_op, + vector> &datasource_filters); void AddAggregateOrWindowRelation(LogicalOperator &op, optional_ptr parent, const RelationStats &stats, LogicalOperatorType op_type); vector> GetRelations(); diff --git a/src/include/duckdb/optimizer/rule/ordered_aggregate_optimizer.hpp b/src/include/duckdb/optimizer/rule/ordered_aggregate_optimizer.hpp index 1b757e78cb15..6330b4144867 100644 --- a/src/include/duckdb/optimizer/rule/ordered_aggregate_optimizer.hpp +++ b/src/include/duckdb/optimizer/rule/ordered_aggregate_optimizer.hpp @@ -10,6 +10,7 @@ #include "duckdb/optimizer/rule.hpp" #include "duckdb/parser/expression_map.hpp" +#include "duckdb/parser/group_by_node.hpp" namespace duckdb { @@ -18,7 +19,8 @@ class OrderedAggregateOptimizer : public Rule { explicit OrderedAggregateOptimizer(ExpressionRewriter &rewriter); static unique_ptr Apply(ClientContext &context, BoundAggregateExpression &aggr, - vector> &groups, bool &changes_made); + vector> &groups, + optional_ptr> grouping_sets, bool &changes_made); unique_ptr Apply(LogicalOperator &op, vector> &bindings, bool &changes_made, bool is_root) override; }; diff --git a/src/include/duckdb/parser/parsed_data/sample_options.hpp b/src/include/duckdb/parser/parsed_data/sample_options.hpp index dadbcfe922e2..766345f0e1d8 100644 --- a/src/include/duckdb/parser/parsed_data/sample_options.hpp +++ b/src/include/duckdb/parser/parsed_data/sample_options.hpp @@ -23,6 +23,9 @@ enum class SampleMethod : uint8_t { SYSTEM_SAMPLE = 0, BERNOULLI_SAMPLE = 1, RES string SampleMethodToString(SampleMethod method); class SampleOptions { +public: + // 1 billion rows should be enough. + static constexpr idx_t MAX_SAMPLE_ROWS = 1000000000; public: explicit SampleOptions(int64_t seed_ = -1); diff --git a/src/include/duckdb/planner/binder.hpp b/src/include/duckdb/planner/binder.hpp index e2493ec80583..0b778c7f7f19 100644 --- a/src/include/duckdb/planner/binder.hpp +++ b/src/include/duckdb/planner/binder.hpp @@ -407,7 +407,7 @@ class Binder : public enable_shared_from_this { unique_ptr BindTableMacro(FunctionExpression &function, TableMacroCatalogEntry ¯o_func, idx_t depth); - unique_ptr BindMaterializedCTE(CommonTableExpressionMap &cte_map); + unique_ptr BindMaterializedCTE(CommonTableExpressionMap &cte_map, unique_ptr &cte_root); unique_ptr BindCTE(CTENode &statement); unique_ptr BindNode(SelectNode &node); diff --git a/src/include/duckdb/planner/bound_result_modifier.hpp b/src/include/duckdb/planner/bound_result_modifier.hpp index 853384e0b221..5f26dd8f7a4c 100644 --- a/src/include/duckdb/planner/bound_result_modifier.hpp +++ b/src/include/duckdb/planner/bound_result_modifier.hpp @@ -9,6 +9,7 @@ #pragma once #include "duckdb/common/limits.hpp" +#include "duckdb/parser/group_by_node.hpp" #include "duckdb/parser/result_modifier.hpp" #include "duckdb/planner/bound_statement.hpp" #include "duckdb/planner/expression.hpp" @@ -155,8 +156,9 @@ class BoundOrderModifier : public BoundResultModifier { //! Remove unneeded/duplicate order elements. //! Returns true of orders is not empty. - static bool Simplify(vector &orders, const vector> &groups); - bool Simplify(const vector> &groups); + static bool Simplify(vector &orders, const vector> &groups, + optional_ptr> grouping_sets); + bool Simplify(const vector> &groups, optional_ptr> grouping_sets); }; enum class DistinctType : uint8_t { DISTINCT = 0, DISTINCT_ON = 1 }; diff --git a/src/include/duckdb/planner/expression_binder.hpp b/src/include/duckdb/planner/expression_binder.hpp index b2712f3bfe20..5b4f74e6fd4c 100644 --- a/src/include/duckdb/planner/expression_binder.hpp +++ b/src/include/duckdb/planner/expression_binder.hpp @@ -10,9 +10,8 @@ #include "duckdb/common/exception.hpp" #include "duckdb/common/stack_checker.hpp" -#include "duckdb/common/exception/binder_exception.hpp" #include "duckdb/common/error_data.hpp" -#include "duckdb/common/unordered_map.hpp" +#include "duckdb/common/exception/binder_exception.hpp" #include "duckdb/parser/expression/bound_expression.hpp" #include "duckdb/parser/expression/lambdaref_expression.hpp" #include "duckdb/parser/parsed_expression.hpp" diff --git a/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp b/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp index 14ad4510c9a2..5fa37d4ac020 100644 --- a/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp +++ b/src/include/duckdb/planner/subquery/flatten_dependent_join.hpp @@ -33,7 +33,7 @@ struct FlattenDependentJoins { bool parent_is_dependent_join = false); //! Mark entire subtree of Logical Operators as correlated by adding them to the has_correlated_expressions map. - bool MarkSubtreeCorrelated(LogicalOperator &op); + bool MarkSubtreeCorrelated(LogicalOperator &op, idx_t cte_index); //! Push the dependent join down a LogicalOperator unique_ptr PushDownDependentJoin(unique_ptr plan, diff --git a/src/include/duckdb/planner/subquery/rewrite_cte_scan.hpp b/src/include/duckdb/planner/subquery/rewrite_cte_scan.hpp index 72886f80e59c..323f3b9b4d7c 100644 --- a/src/include/duckdb/planner/subquery/rewrite_cte_scan.hpp +++ b/src/include/duckdb/planner/subquery/rewrite_cte_scan.hpp @@ -17,13 +17,15 @@ namespace duckdb { //! Helper class to rewrite correlated cte scans within a single LogicalOperator class RewriteCTEScan : public LogicalOperatorVisitor { public: - RewriteCTEScan(idx_t table_index, const CorrelatedColumns &correlated_columns); + RewriteCTEScan(idx_t table_index, const CorrelatedColumns &correlated_columns, + bool rewrite_dependent_joins = false); void VisitOperator(LogicalOperator &op) override; private: idx_t table_index; const CorrelatedColumns &correlated_columns; + bool rewrite_dependent_joins = false; }; } // namespace duckdb diff --git a/src/include/duckdb/storage/block.hpp b/src/include/duckdb/storage/block.hpp index 3aa18a7bcfe8..12fd7f818739 100644 --- a/src/include/duckdb/storage/block.hpp +++ b/src/include/duckdb/storage/block.hpp @@ -61,6 +61,15 @@ struct MetaBlockPointer { block_id_t GetBlockId() const; uint32_t GetBlockIndex() const; + bool operator==(const MetaBlockPointer &rhs) const { + return block_pointer == rhs.block_pointer && offset == rhs.offset; + } + + friend std::ostream &operator<<(std::ostream &os, const MetaBlockPointer &obj) { + return os << "{block_id: " << obj.GetBlockId() << " index: " << obj.GetBlockIndex() << " offset: " << obj.offset + << "}"; + } + void Serialize(Serializer &serializer) const; static MetaBlockPointer Deserialize(Deserializer &source); }; diff --git a/src/include/duckdb/storage/index.hpp b/src/include/duckdb/storage/index.hpp index 2b624c2c1126..492f37e29128 100644 --- a/src/include/duckdb/storage/index.hpp +++ b/src/include/duckdb/storage/index.hpp @@ -31,9 +31,15 @@ class Index { protected: Index(const vector &column_ids, TableIOManager &table_io_manager, AttachedDatabase &db); - //! The logical column ids of the indexed table + //! The physical column ids of the indexed columns. + //! For example, given a table with the following columns: + //! (a INT, gen AS (2 * a), b INT, c VARCHAR), an index on columns (a,c) would have physical + //! column_ids [0,2] (since the virtual column is skipped in the physical representation). + //! Also see comments in bound_index.hpp to see how these column IDs are used in the context of + //! bound/unbound expressions. + //! Note that these are the columns for this Index, not all Indexes on the table. vector column_ids; - //! Unordered set of column_ids used by the index + //! Unordered set of column_ids used by the Index unordered_set column_id_set; public: diff --git a/src/include/duckdb/storage/metadata/metadata_manager.hpp b/src/include/duckdb/storage/metadata/metadata_manager.hpp index cd63a96b89ea..8a29973ecbad 100644 --- a/src/include/duckdb/storage/metadata/metadata_manager.hpp +++ b/src/include/duckdb/storage/metadata/metadata_manager.hpp @@ -77,6 +77,8 @@ class MetadataManager { //! Flush all blocks to disk void Flush(); + bool BlockHasBeenCleared(const MetaBlockPointer &ptr); + void MarkBlocksAsModified(); void ClearModifiedBlocks(const vector &pointers); diff --git a/src/include/duckdb/storage/metadata/metadata_reader.hpp b/src/include/duckdb/storage/metadata/metadata_reader.hpp index 51894886a590..ce8d01b41402 100644 --- a/src/include/duckdb/storage/metadata/metadata_reader.hpp +++ b/src/include/duckdb/storage/metadata/metadata_reader.hpp @@ -52,7 +52,7 @@ class MetadataReader : public ReadStream { MetadataManager &manager; BlockReaderType type; MetadataHandle block; - MetadataPointer next_pointer; + MetaBlockPointer next_pointer; bool has_next_block; optional_ptr> read_pointers; idx_t index; diff --git a/src/include/duckdb/storage/storage_options.hpp b/src/include/duckdb/storage/storage_options.hpp index 786924b2cf6a..4cf1f539ba8a 100644 --- a/src/include/duckdb/storage/storage_options.hpp +++ b/src/include/duckdb/storage/storage_options.hpp @@ -40,11 +40,4 @@ struct StorageOptions { void Initialize(const unordered_map &options); }; -inline void ClearUserKey(shared_ptr const &encryption_key) { - if (encryption_key && !encryption_key->empty()) { - memset(&(*encryption_key)[0], 0, encryption_key->size()); - encryption_key->clear(); - } -} - } // namespace duckdb diff --git a/src/include/duckdb/storage/table/chunk_info.hpp b/src/include/duckdb/storage/table/chunk_info.hpp index 44b92dd7437e..fb1267278c0c 100644 --- a/src/include/duckdb/storage/table/chunk_info.hpp +++ b/src/include/duckdb/storage/table/chunk_info.hpp @@ -45,7 +45,7 @@ class ChunkInfo { virtual bool Fetch(TransactionData transaction, row_t row) = 0; virtual void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) = 0; virtual idx_t GetCommittedDeletedCount(idx_t max_count) = 0; - virtual bool Cleanup(transaction_t lowest_transaction, unique_ptr &result) const; + virtual bool Cleanup(transaction_t lowest_transaction) const; virtual bool HasDeletes() const = 0; @@ -87,7 +87,7 @@ class ChunkConstantInfo : public ChunkInfo { bool Fetch(TransactionData transaction, row_t row) override; void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) override; idx_t GetCommittedDeletedCount(idx_t max_count) override; - bool Cleanup(transaction_t lowest_transaction, unique_ptr &result) const override; + bool Cleanup(transaction_t lowest_transaction) const override; bool HasDeletes() const override; @@ -124,7 +124,7 @@ class ChunkVectorInfo : public ChunkInfo { SelectionVector &sel_vector, idx_t max_count) override; bool Fetch(TransactionData transaction, row_t row) override; void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) override; - bool Cleanup(transaction_t lowest_transaction, unique_ptr &result) const override; + bool Cleanup(transaction_t lowest_transaction) const override; idx_t GetCommittedDeletedCount(idx_t max_count) override; void Append(idx_t start, idx_t end, transaction_t commit_id); diff --git a/src/include/duckdb/storage/table/row_group.hpp b/src/include/duckdb/storage/table/row_group.hpp index 88d15d668b95..b5ea0b11bb9c 100644 --- a/src/include/duckdb/storage/table/row_group.hpp +++ b/src/include/duckdb/storage/table/row_group.hpp @@ -65,7 +65,8 @@ struct RowGroupWriteInfo { struct RowGroupWriteData { vector> states; vector statistics; - vector existing_pointers; + bool reuse_existing_metadata_blocks = false; + vector existing_extra_metadata_blocks; }; class RowGroup : public SegmentBase { @@ -94,11 +95,10 @@ class RowGroup : public SegmentBase { return collection.get(); } //! Returns the list of meta block pointers used by the columns - vector GetColumnPointers(); - //! Returns the list of meta block pointers used by the deletes - const vector &GetDeletesPointers() const { - return deletes_pointers; - } + vector GetOrComputeExtraMetadataBlocks(bool force_compute = false); + + const vector &GetColumnStartPointers() const; + BlockManager &GetBlockManager(); DataTableInfo &GetTableInfo(); @@ -194,6 +194,8 @@ class RowGroup : public SegmentBase { static FilterPropagateResult CheckRowIdFilter(const TableFilter &filter, idx_t beg_row, idx_t end_row); + vector CheckpointDeletes(MetadataManager &manager); + private: optional_ptr GetVersionInfo(); shared_ptr GetOrCreateVersionInfoPtr(); @@ -210,8 +212,6 @@ class RowGroup : public SegmentBase { template void TemplatedScan(TransactionData transaction, CollectionScanState &state, DataChunk &result); - vector CheckpointDeletes(MetadataManager &manager); - bool HasUnloadedDeletes() const; private: diff --git a/src/include/duckdb/storage/table/row_version_manager.hpp b/src/include/duckdb/storage/table/row_version_manager.hpp index bb0d0056b9f0..cc7464bde176 100644 --- a/src/include/duckdb/storage/table/row_version_manager.hpp +++ b/src/include/duckdb/storage/table/row_version_manager.hpp @@ -46,11 +46,14 @@ class RowVersionManager { static shared_ptr Deserialize(MetaBlockPointer delete_pointer, MetadataManager &manager, idx_t start); + bool HasUnserializedChanges(); + vector GetStoragePointers(); + private: mutex version_lock; idx_t start; vector> vector_info; - bool has_changes; + bool has_unserialized_changes; vector storage_pointers; private: diff --git a/src/include/duckdb/storage/table/validity_column_data.hpp b/src/include/duckdb/storage/table/validity_column_data.hpp index 286a5343b14f..25a016466bdd 100644 --- a/src/include/duckdb/storage/table/validity_column_data.hpp +++ b/src/include/duckdb/storage/table/validity_column_data.hpp @@ -23,6 +23,8 @@ class ValidityColumnData : public ColumnData { public: FilterPropagateResult CheckZonemap(ColumnScanState &state, TableFilter &filter) override; void AppendData(BaseStatistics &stats, ColumnAppendState &state, UnifiedVectorFormat &vdata, idx_t count) override; + void UpdateWithBase(TransactionData transaction, DataTable &data_table, idx_t column_index, Vector &update_vector, + row_t *row_ids, idx_t update_count, ColumnData &base); }; } // namespace duckdb diff --git a/src/logging/log_manager.cpp b/src/logging/log_manager.cpp index 2785386abbb6..07b31f7af892 100644 --- a/src/logging/log_manager.cpp +++ b/src/logging/log_manager.cpp @@ -205,7 +205,7 @@ void LogManager::SetEnableStructuredLoggers(vector &enabled_logger_types throw InvalidInputException("Unknown log type: '%s'", enabled_logger_type); } - new_config.enabled_log_types.insert(enabled_logger_type); + new_config.enabled_log_types.insert(lookup->name); min_log_level = MinValue(min_log_level, lookup->level); } @@ -266,6 +266,7 @@ void LogManager::RegisterDefaultLogTypes() { RegisterLogType(make_uniq()); RegisterLogType(make_uniq()); RegisterLogType(make_uniq()); + RegisterLogType(make_uniq()); } } // namespace duckdb diff --git a/src/logging/log_storage.cpp b/src/logging/log_storage.cpp index c6733d9685ff..e2596e0032e8 100644 --- a/src/logging/log_storage.cpp +++ b/src/logging/log_storage.cpp @@ -22,26 +22,19 @@ namespace duckdb { vector LogStorage::GetSchema(LoggingTargetTable table) { switch (table) { - case LoggingTargetTable::ALL_LOGS: - return { - LogicalType::UBIGINT, // context_id - LogicalType::VARCHAR, // scope - LogicalType::UBIGINT, // connection_id - LogicalType::UBIGINT, // transaction_id - LogicalType::UBIGINT, // query_id - LogicalType::UBIGINT, // thread - LogicalType::TIMESTAMP, // timestamp - LogicalType::VARCHAR, // log_type - LogicalType::VARCHAR, // level - LogicalType::VARCHAR, // message - }; + case LoggingTargetTable::ALL_LOGS: { + auto all_logs = GetSchema(LoggingTargetTable::LOG_CONTEXTS); + auto log_entries = GetSchema(LoggingTargetTable::LOG_ENTRIES); + all_logs.insert(all_logs.end(), log_entries.begin() + 1, log_entries.end()); + return all_logs; + } case LoggingTargetTable::LOG_ENTRIES: return { - LogicalType::UBIGINT, // context_id - LogicalType::TIMESTAMP, // timestamp - LogicalType::VARCHAR, // log_type - LogicalType::VARCHAR, // level - LogicalType::VARCHAR, // message + LogicalType::UBIGINT, // context_id + LogicalType::TIMESTAMP_TZ, // timestamp + LogicalType::VARCHAR, // log_type + LogicalType::VARCHAR, // level + LogicalType::VARCHAR, // message }; case LoggingTargetTable::LOG_CONTEXTS: return { @@ -59,11 +52,12 @@ vector LogStorage::GetSchema(LoggingTargetTable table) { vector LogStorage::GetColumnNames(LoggingTargetTable table) { switch (table) { - case LoggingTargetTable::ALL_LOGS: - return { - "context_id", "scope", "connection_id", "transaction_id", "query_id", - "thread_id", "timestamp", "type", "log_level", "message", - }; + case LoggingTargetTable::ALL_LOGS: { + auto all_logs = GetColumnNames(LoggingTargetTable::LOG_CONTEXTS); + auto log_entries = GetColumnNames(LoggingTargetTable::LOG_ENTRIES); + all_logs.insert(all_logs.end(), log_entries.begin() + 1, log_entries.end()); + return all_logs; + } case LoggingTargetTable::LOG_ENTRIES: return {"context_id", "timestamp", "type", "log_level", "message"}; case LoggingTargetTable::LOG_CONTEXTS: diff --git a/src/logging/log_types.cpp b/src/logging/log_types.cpp index f78abae591b4..93b0bc78de5b 100644 --- a/src/logging/log_types.cpp +++ b/src/logging/log_types.cpp @@ -14,6 +14,7 @@ constexpr LogLevel FileSystemLogType::LEVEL; constexpr LogLevel QueryLogType::LEVEL; constexpr LogLevel HTTPLogType::LEVEL; constexpr LogLevel PhysicalOperatorLogType::LEVEL; +constexpr LogLevel MetricsLogType::LEVEL; constexpr LogLevel CheckpointLogType::LEVEL; //===--------------------------------------------------------------------===// @@ -58,6 +59,8 @@ LogicalType HTTPLogType::GetLogType() { child_list_t request_child_list = { {"type", LogicalType::VARCHAR}, {"url", LogicalType::VARCHAR}, + {"start_time", LogicalType::TIMESTAMP_TZ}, + {"duration_ms", LogicalType::BIGINT}, {"headers", LogicalType::MAP(LogicalType::VARCHAR, LogicalType::VARCHAR)}, }; auto request_type = LogicalType::STRUCT(request_child_list); @@ -90,7 +93,10 @@ string HTTPLogType::ConstructLogMessage(BaseRequest &request, optional_ptr child_list = { + {"metric", LogicalType::VARCHAR}, + {"value", LogicalType::VARCHAR}, + }; + return LogicalType::STRUCT(child_list); +} + +string MetricsLogType::ConstructLogMessage(const MetricsType &metric, const Value &value) { + child_list_t child_list = { + {"metric", EnumUtil::ToString(metric)}, + {"value", value.ToString()}, + }; + return Value::STRUCT(std::move(child_list)).ToString(); +} + //===--------------------------------------------------------------------===// // CheckpointLogType //===--------------------------------------------------------------------===// diff --git a/src/main/attached_database.cpp b/src/main/attached_database.cpp index 0e4e8529c64b..b7f345f2ed4d 100644 --- a/src/main/attached_database.cpp +++ b/src/main/attached_database.cpp @@ -272,10 +272,6 @@ void AttachedDatabase::Close() { catalog.reset(); storage.reset(); stored_database_path.reset(); - - if (Allocator::SupportsFlush()) { - Allocator::FlushAll(); - } } } // namespace duckdb diff --git a/src/main/capi/duckdb-c.cpp b/src/main/capi/duckdb-c.cpp index 344fa265df6f..3cfedbf61650 100644 --- a/src/main/capi/duckdb-c.cpp +++ b/src/main/capi/duckdb-c.cpp @@ -41,7 +41,7 @@ duckdb_state duckdb_open_internal(DBInstanceCacheWrapper *cache, const char *pat if (path) { path_str = path; } - wrapper->database = cache->instance_cache->GetOrCreateInstance(path_str, *db_config, true); + wrapper->database = cache->instance_cache->GetOrCreateInstance(path_str, *db_config); } else { wrapper->database = duckdb::make_shared_ptr(path, db_config); } diff --git a/src/main/config.cpp b/src/main/config.cpp index 57539ab2e9dc..a72d1329b36a 100644 --- a/src/main/config.cpp +++ b/src/main/config.cpp @@ -86,6 +86,7 @@ static const ConfigurationOption internal_options[] = { DUCKDB_LOCAL(DebugForceExternalSetting), DUCKDB_SETTING(DebugForceNoCrossProductSetting), DUCKDB_SETTING(DebugSkipCheckpointOnCommitSetting), + DUCKDB_SETTING(DebugVerifyBlocksSetting), DUCKDB_SETTING_CALLBACK(DebugVerifyVectorSetting), DUCKDB_SETTING_CALLBACK(DebugWindowModeSetting), DUCKDB_GLOBAL(DefaultBlockSizeSetting), @@ -177,12 +178,12 @@ static const ConfigurationOption internal_options[] = { DUCKDB_GLOBAL(ZstdMinStringLengthSetting), FINAL_SETTING}; -static const ConfigurationAlias setting_aliases[] = {DUCKDB_SETTING_ALIAS("memory_limit", 83), - DUCKDB_SETTING_ALIAS("null_order", 33), - DUCKDB_SETTING_ALIAS("profiling_output", 102), - DUCKDB_SETTING_ALIAS("user", 116), +static const ConfigurationAlias setting_aliases[] = {DUCKDB_SETTING_ALIAS("memory_limit", 84), + DUCKDB_SETTING_ALIAS("null_order", 34), + DUCKDB_SETTING_ALIAS("profiling_output", 103), + DUCKDB_SETTING_ALIAS("user", 117), DUCKDB_SETTING_ALIAS("wal_autocheckpoint", 20), - DUCKDB_SETTING_ALIAS("worker_threads", 115), + DUCKDB_SETTING_ALIAS("worker_threads", 116), FINAL_ALIAS}; vector DBConfig::GetOptions() { diff --git a/src/main/connection.cpp b/src/main/connection.cpp index af76cfd1739e..ccb2775bc56e 100644 --- a/src/main/connection.cpp +++ b/src/main/connection.cpp @@ -23,11 +23,6 @@ Connection::Connection(DatabaseInstance &database) auto &connection_manager = ConnectionManager::Get(database); connection_manager.AddConnection(*context); connection_manager.AssignConnectionId(*this); - -#ifdef DEBUG - EnableProfiling(); - context->config.emit_profiler_output = false; -#endif } Connection::Connection(DuckDB &database) : Connection(*database.instance) { diff --git a/src/main/database.cpp b/src/main/database.cpp index 419d3304ba3b..8d23656aab8f 100644 --- a/src/main/database.cpp +++ b/src/main/database.cpp @@ -285,7 +285,7 @@ void DatabaseInstance::Initialize(const char *database_path, DBConfig *user_conf buffer_manager = make_uniq(*this, config.options.temporary_directory); } - log_manager = make_shared_ptr(*this, LogConfig()); + log_manager = make_uniq(*this, LogConfig()); log_manager->Initialize(); external_file_cache = make_uniq(*this, config.options.enable_external_file_cache); @@ -507,12 +507,18 @@ SettingLookupResult DatabaseInstance::TryGetCurrentSetting(const string &key, Va return db_config.TryGetCurrentSetting(key, result); } -shared_ptr DatabaseInstance::GetEncryptionUtil() const { +shared_ptr DatabaseInstance::GetEncryptionUtil() { + if (!config.encryption_util || !config.encryption_util->SupportsEncryption()) { + ExtensionHelper::TryAutoLoadExtension(*this, "httpfs"); + } + if (config.encryption_util) { return config.encryption_util; } - return make_shared_ptr(); + auto result = make_shared_ptr(); + + return std::move(result); } ValidChecker &DatabaseInstance::GetValidChecker() { diff --git a/src/main/database_manager.cpp b/src/main/database_manager.cpp index 848f080fc6dc..d9366396ae32 100644 --- a/src/main/database_manager.cpp +++ b/src/main/database_manager.cpp @@ -83,7 +83,16 @@ shared_ptr DatabaseManager::GetDatabaseInternal(const lock_gua shared_ptr DatabaseManager::AttachDatabase(ClientContext &context, AttachInfo &info, AttachOptions &options) { - auto &config = DBConfig::GetConfig(context); + string extension = ""; + if (FileSystem::IsRemoteFile(info.path, extension)) { + if (options.access_mode == AccessMode::AUTOMATIC) { + // Attaching of remote files gets bumped to READ_ONLY + // This is due to the fact that on most (all?) remote files writes to DB are not available + // and having this raised later is not super helpful + options.access_mode = AccessMode::READ_ONLY; + } + } + if (options.db_type.empty() || StringUtil::CIEquals(options.db_type, "duckdb")) { while (InsertDatabasePath(info, options) == InsertDatabasePathResult::ALREADY_EXISTS) { // database with this name and path already exists @@ -99,6 +108,7 @@ shared_ptr DatabaseManager::AttachDatabase(ClientContext &cont } } } + auto &config = DBConfig::GetConfig(context); GetDatabaseType(context, info, config, options); if (!options.db_type.empty()) { // we only need to prevent duplicate opening of DuckDB files @@ -108,18 +118,11 @@ shared_ptr DatabaseManager::AttachDatabase(ClientContext &cont if (AttachedDatabase::NameIsReserved(info.name)) { throw BinderException("Attached database name \"%s\" cannot be used because it is a reserved name", info.name); } - string extension = ""; - if (FileSystem::IsRemoteFile(info.path, extension)) { + if (!extension.empty()) { if (!ExtensionHelper::TryAutoLoadExtension(context, extension)) { throw MissingExtensionException("Attaching path '%s' requires extension '%s' to be loaded", info.path, extension); } - if (options.access_mode == AccessMode::AUTOMATIC) { - // Attaching of remote files gets bumped to READ_ONLY - // This is due to the fact that on most (all?) remote files writes to DB are not available - // and having this raised later is not super helpful - options.access_mode = AccessMode::READ_ONLY; - } } // now create the attached database diff --git a/src/main/db_instance_cache.cpp b/src/main/db_instance_cache.cpp index 57f4ee457fae..1960c5ee3b68 100644 --- a/src/main/db_instance_cache.cpp +++ b/src/main/db_instance_cache.cpp @@ -137,9 +137,23 @@ shared_ptr DBInstanceCache::CreateInstance(const string &database, DBCon shared_ptr DBInstanceCache::GetOrCreateInstance(const string &database, DBConfig &config_dict, bool cache_instance, const std::function &on_create) { + auto cache_behavior = cache_instance ? CacheBehavior::ALWAYS_CACHE : CacheBehavior::NEVER_CACHE; + return GetOrCreateInstance(database, config_dict, cache_behavior, on_create); +} + +shared_ptr DBInstanceCache::GetOrCreateInstance(const string &database, DBConfig &config_dict, + CacheBehavior cache_behavior, + const std::function &on_create) { unique_lock lock(cache_lock, std::defer_lock); + bool cache_instance = cache_behavior == CacheBehavior::ALWAYS_CACHE; + if (cache_behavior == CacheBehavior::AUTOMATIC) { + // cache all unnamed in-memory connections + cache_instance = true; + if (database == IN_MEMORY_PATH || database.empty()) { + cache_instance = false; + } + } if (cache_instance) { - // While we do not own the lock, we cannot definitively say that the database instance does not exist. while (!lock.owns_lock()) { // The problem is, that we have to unlock the mutex in GetInstanceInternal, so we can non-blockingly wait diff --git a/src/main/extension/extension_alias.cpp b/src/main/extension/extension_alias.cpp index 81d3c1e1b7ae..4a1ae7146e22 100644 --- a/src/main/extension/extension_alias.cpp +++ b/src/main/extension/extension_alias.cpp @@ -10,6 +10,7 @@ static const ExtensionAlias internal_aliases[] = {{"http", "httpfs"}, // httpfs {"postgres", "postgres_scanner"}, // postgres {"sqlite", "sqlite_scanner"}, // sqlite {"sqlite3", "sqlite_scanner"}, + {"uc_catalog", "unity_catalog"}, // old name for compatibility {nullptr, nullptr}}; idx_t ExtensionHelper::ExtensionAliasCount() { diff --git a/src/main/http/http_util.cpp b/src/main/http/http_util.cpp index 554346489d59..fb5a9491fcb4 100644 --- a/src/main/http/http_util.cpp +++ b/src/main/http/http_util.cpp @@ -130,9 +130,12 @@ BaseRequest::BaseRequest(RequestType type, const string &url, const HTTPHeaders class HTTPLibClient : public HTTPClient { public: HTTPLibClient(HTTPParams &http_params, const string &proto_host_port) { + client = make_uniq(proto_host_port); + Initialize(http_params); + } + void Initialize(HTTPParams &http_params) override { auto sec = static_cast(http_params.timeout); auto usec = static_cast(http_params.timeout_usec); - client = make_uniq(proto_host_port); client->set_follow_location(http_params.follow_location); client->set_keep_alive(http_params.keep_alive); client->set_write_timeout(sec, usec); @@ -228,12 +231,27 @@ unique_ptr HTTPUtil::SendRequest(BaseRequest &request, unique_ptr< std::function(void)> on_request([&]() { unique_ptr response; + + // When logging is enabled, we collect request timings + if (request.params.logger) { + request.have_request_timing = request.params.logger->ShouldLog(HTTPLogType::NAME, HTTPLogType::LEVEL); + } + try { + if (request.have_request_timing) { + request.request_start = Timestamp::GetCurrentTimestamp(); + } response = client->Request(request); } catch (...) { + if (request.have_request_timing) { + request.request_end = Timestamp::GetCurrentTimestamp(); + } LogRequest(request, nullptr); throw; } + if (request.have_request_timing) { + request.request_end = Timestamp::GetCurrentTimestamp(); + } LogRequest(request, response ? response.get() : nullptr); return response; }); diff --git a/src/main/profiling_info.cpp b/src/main/profiling_info.cpp index 8f744d51b457..ad76de81d147 100644 --- a/src/main/profiling_info.cpp +++ b/src/main/profiling_info.cpp @@ -2,6 +2,7 @@ #include "duckdb/common/enum_util.hpp" #include "duckdb/main/query_profiler.hpp" +#include "duckdb/logging/log_manager.hpp" #include "yyjson.hpp" @@ -169,6 +170,16 @@ string ProfilingInfo::GetMetricAsString(const MetricsType metric) const { return metrics.at(metric).ToString(); } +void ProfilingInfo::WriteMetricsToLog(ClientContext &context) { + auto &logger = Logger::Get(context); + if (logger.ShouldLog(MetricsLogType::NAME, MetricsLogType::LEVEL)) { + for (auto &metric : settings) { + logger.WriteLog(MetricsLogType::NAME, MetricsLogType::LEVEL, + MetricsLogType::ConstructLogMessage(metric, metrics[metric])); + } + } +} + void ProfilingInfo::WriteMetricsToJSON(yyjson_mut_doc *doc, yyjson_mut_val *dest) { for (auto &metric : settings) { auto metric_str = StringUtil::Lower(EnumUtil::ToString(metric)); diff --git a/src/main/query_profiler.cpp b/src/main/query_profiler.cpp index 4c9c9328a3b1..bbbc62a7f284 100644 --- a/src/main/query_profiler.cpp +++ b/src/main/query_profiler.cpp @@ -297,6 +297,9 @@ void QueryProfiler::EndQuery() { guard.unlock(); + // To log is inexpensive, whether to log or not depends on whether logging is active + ToLog(); + if (emit_output) { string tree = ToString(); auto save_location = GetSaveLocation(); @@ -797,6 +800,19 @@ static string StringifyAndFree(yyjson_mut_doc *doc, yyjson_mut_val *object) { return result; } +void QueryProfiler::ToLog() const { + lock_guard guard(lock); + + if (!root) { + // No root, not much to do + return; + } + + auto &settings = root->GetProfilingInfo(); + + settings.WriteMetricsToLog(context); +} + string QueryProfiler::ToJSON() const { lock_guard guard(lock); auto doc = yyjson_mut_doc_new(nullptr); diff --git a/src/main/relation.cpp b/src/main/relation.cpp index b9e4d50ff49e..9bd687b82732 100644 --- a/src/main/relation.cpp +++ b/src/main/relation.cpp @@ -241,7 +241,12 @@ BoundStatement Relation::Bind(Binder &binder) { } shared_ptr Relation::InsertRel(const string &schema_name, const string &table_name) { - return make_shared_ptr(shared_from_this(), schema_name, table_name); + return InsertRel(INVALID_CATALOG, schema_name, table_name); +} + +shared_ptr Relation::InsertRel(const string &catalog_name, const string &schema_name, + const string &table_name) { + return make_shared_ptr(shared_from_this(), catalog_name, schema_name, table_name); } void Relation::Insert(const string &table_name) { @@ -249,7 +254,11 @@ void Relation::Insert(const string &table_name) { } void Relation::Insert(const string &schema_name, const string &table_name) { - auto insert = InsertRel(schema_name, table_name); + Insert(INVALID_CATALOG, schema_name, table_name); +} + +void Relation::Insert(const string &catalog_name, const string &schema_name, const string &table_name) { + auto insert = InsertRel(catalog_name, schema_name, table_name); auto res = insert->Execute(); if (res->HasError()) { const string prepended_message = "Failed to insert into table '" + table_name + "': "; @@ -258,30 +267,37 @@ void Relation::Insert(const string &schema_name, const string &table_name) { } void Relation::Insert(const vector> &values) { - vector column_names; - auto rel = make_shared_ptr(context->GetContext(), values, std::move(column_names), "values"); - rel->Insert(GetAlias()); + throw InvalidInputException("INSERT with values can only be used on base tables!"); } void Relation::Insert(vector>> &&expressions) { - vector column_names; - auto rel = make_shared_ptr(context->GetContext(), std::move(expressions), std::move(column_names), - "values"); - rel->Insert(GetAlias()); + (void)std::move(expressions); + throw InvalidInputException("INSERT with expressions can only be used on base tables!"); } shared_ptr Relation::CreateRel(const string &schema_name, const string &table_name, bool temporary, OnCreateConflict on_conflict) { - return make_shared_ptr(shared_from_this(), schema_name, table_name, temporary, on_conflict); + return CreateRel(INVALID_CATALOG, schema_name, table_name, temporary, on_conflict); +} + +shared_ptr Relation::CreateRel(const string &catalog_name, const string &schema_name, + const string &table_name, bool temporary, OnCreateConflict on_conflict) { + return make_shared_ptr(shared_from_this(), catalog_name, schema_name, table_name, temporary, + on_conflict); } void Relation::Create(const string &table_name, bool temporary, OnCreateConflict on_conflict) { - Create(INVALID_SCHEMA, table_name, temporary, on_conflict); + Create(INVALID_CATALOG, INVALID_SCHEMA, table_name, temporary, on_conflict); } void Relation::Create(const string &schema_name, const string &table_name, bool temporary, OnCreateConflict on_conflict) { - auto create = CreateRel(schema_name, table_name, temporary, on_conflict); + Create(INVALID_CATALOG, schema_name, table_name, temporary, on_conflict); +} + +void Relation::Create(const string &catalog_name, const string &schema_name, const string &table_name, bool temporary, + OnCreateConflict on_conflict) { + auto create = CreateRel(catalog_name, schema_name, table_name, temporary, on_conflict); auto res = create->Execute(); if (res->HasError()) { const string prepended_message = "Failed to create table '" + table_name + "': "; diff --git a/src/main/relation/create_table_relation.cpp b/src/main/relation/create_table_relation.cpp index 39aa65e3694a..2a08194c0cd6 100644 --- a/src/main/relation/create_table_relation.cpp +++ b/src/main/relation/create_table_relation.cpp @@ -14,12 +14,21 @@ CreateTableRelation::CreateTableRelation(shared_ptr child_p, string sc TryBindRelation(columns); } +CreateTableRelation::CreateTableRelation(shared_ptr child_p, string catalog_name, string schema_name, + string table_name, bool temporary_p, OnCreateConflict on_conflict) + : Relation(child_p->context, RelationType::CREATE_TABLE_RELATION), child(std::move(child_p)), + catalog_name(std::move(catalog_name)), schema_name(std::move(schema_name)), table_name(std::move(table_name)), + temporary(temporary_p), on_conflict(on_conflict) { + TryBindRelation(columns); +} + BoundStatement CreateTableRelation::Bind(Binder &binder) { auto select = make_uniq(); select->node = child->GetQueryNode(); CreateStatement stmt; auto info = make_uniq(); + info->catalog = catalog_name; info->schema = schema_name; info->table = table_name; info->query = std::move(select); diff --git a/src/main/relation/insert_relation.cpp b/src/main/relation/insert_relation.cpp index 84ef16ec6e47..461133255c63 100644 --- a/src/main/relation/insert_relation.cpp +++ b/src/main/relation/insert_relation.cpp @@ -13,11 +13,18 @@ InsertRelation::InsertRelation(shared_ptr child_p, string schema_name, TryBindRelation(columns); } +InsertRelation::InsertRelation(shared_ptr child_p, string catalog_name, string schema_name, string table_name) + : Relation(child_p->context, RelationType::INSERT_RELATION), child(std::move(child_p)), + catalog_name(std::move(catalog_name)), schema_name(std::move(schema_name)), table_name(std::move(table_name)) { + TryBindRelation(columns); +} + BoundStatement InsertRelation::Bind(Binder &binder) { InsertStatement stmt; auto select = make_uniq(); select->node = child->GetQueryNode(); + stmt.catalog = catalog_name; stmt.schema = schema_name; stmt.table = table_name; stmt.select_statement = std::move(select); diff --git a/src/main/relation/table_relation.cpp b/src/main/relation/table_relation.cpp index c82ace698bd0..78d5aaaa4e09 100644 --- a/src/main/relation/table_relation.cpp +++ b/src/main/relation/table_relation.cpp @@ -3,6 +3,7 @@ #include "duckdb/parser/query_node/select_node.hpp" #include "duckdb/parser/expression/star_expression.hpp" #include "duckdb/main/relation/delete_relation.hpp" +#include "duckdb/main/relation/value_relation.hpp" #include "duckdb/main/relation/update_relation.hpp" #include "duckdb/parser/parser.hpp" #include "duckdb/main/client_context.hpp" @@ -87,4 +88,17 @@ void TableRelation::Delete(const string &condition) { del->Execute(); } +void TableRelation::Insert(const vector> &values) { + vector column_names; + auto rel = make_shared_ptr(context->GetContext(), values, std::move(column_names), "values"); + rel->Insert(description->database, description->schema, description->table); +} + +void TableRelation::Insert(vector>> &&expressions) { + vector column_names; + auto rel = make_shared_ptr(context->GetContext(), std::move(expressions), std::move(column_names), + "values"); + rel->Insert(description->database, description->schema, description->table); +} + } // namespace duckdb diff --git a/src/main/settings/custom_settings.cpp b/src/main/settings/custom_settings.cpp index 4162d5e6f59f..f1594fdec572 100644 --- a/src/main/settings/custom_settings.cpp +++ b/src/main/settings/custom_settings.cpp @@ -961,9 +961,15 @@ void ForceCompressionSetting::SetGlobal(DatabaseInstance *db, DBConfig &config, } else { auto compression_type = CompressionTypeFromString(compression); //! FIXME: do we want to try to retrieve the AttachedDatabase here to get the StorageManager ?? - if (CompressionTypeIsDeprecated(compression_type)) { - throw ParserException("Attempted to force a deprecated compression type (%s)", - CompressionTypeToString(compression_type)); + auto compression_availability_result = CompressionTypeIsAvailable(compression_type); + if (!compression_availability_result.IsAvailable()) { + if (compression_availability_result.IsDeprecated()) { + throw ParserException("Attempted to force a deprecated compression type (%s)", + CompressionTypeToString(compression_type)); + } else { + throw ParserException("Attempted to force a compression type that isn't available yet (%s)", + CompressionTypeToString(compression_type)); + } } if (compression_type == CompressionType::COMPRESSION_AUTO) { auto compression_types = StringUtil::Join(ListCompressionTypes(), ", "); diff --git a/src/optimizer/filter_combiner.cpp b/src/optimizer/filter_combiner.cpp index ddbe82ab0dfb..f7099c9a15e1 100644 --- a/src/optimizer/filter_combiner.cpp +++ b/src/optimizer/filter_combiner.cpp @@ -2,6 +2,7 @@ #include "duckdb/common/enums/expression_type.hpp" #include "duckdb/execution/expression_executor.hpp" +#include "duckdb/function/scalar/string_common.hpp" #include "duckdb/optimizer/optimizer.hpp" #include "duckdb/planner/expression.hpp" #include "duckdb/planner/expression/bound_between_expression.hpp" @@ -24,6 +25,7 @@ #include "duckdb/optimizer/column_lifetime_analyzer.hpp" #include "duckdb/planner/expression_iterator.hpp" #include "duckdb/planner/operator/logical_get.hpp" +#include "utf8proc_wrapper.hpp" namespace duckdb { @@ -282,6 +284,35 @@ static bool SupportedFilterComparison(ExpressionType expression_type) { } } +bool FilterCombiner::FindNextLegalUTF8(string &prefix_string) { + // find the start of the last codepoint + idx_t last_codepoint_start; + for (last_codepoint_start = prefix_string.size(); last_codepoint_start > 0; last_codepoint_start--) { + if (IsCharacter(prefix_string[last_codepoint_start - 1])) { + break; + } + } + if (last_codepoint_start == 0) { + throw InvalidInputException("Invalid UTF8 found in string \"%s\"", prefix_string); + } + last_codepoint_start--; + int codepoint_size; + auto codepoint = Utf8Proc::UTF8ToCodepoint(prefix_string.c_str() + last_codepoint_start, codepoint_size) + 1; + if (codepoint >= 0xD800 && codepoint <= 0xDFFF) { + // next codepoint falls within surrogate range increment to next valid character + codepoint = 0xE000; + } + char next_codepoint_text[4]; + int next_codepoint_size; + if (!Utf8Proc::CodepointToUtf8(codepoint, next_codepoint_size, next_codepoint_text)) { + // invalid codepoint + return false; + } + auto s = static_cast(next_codepoint_size); + prefix_string = prefix_string.substr(0, last_codepoint_start) + string(next_codepoint_text, s); + return true; +} + bool TypeSupportsConstantFilter(const LogicalType &type) { if (TypeIsNumeric(type.InternalType())) { return true; @@ -397,11 +428,14 @@ FilterPushdownResult FilterCombiner::TryPushdownPrefixFilter(TableFilterSet &tab auto &column_index = column_ids[column_ref.binding.column_index]; //! Replace prefix with a set of comparisons auto lower_bound = make_uniq(ExpressionType::COMPARE_GREATERTHANOREQUALTO, Value(prefix_string)); - prefix_string[prefix_string.size() - 1]++; - auto upper_bound = make_uniq(ExpressionType::COMPARE_LESSTHAN, Value(prefix_string)); table_filters.PushFilter(column_index, std::move(lower_bound)); - table_filters.PushFilter(column_index, std::move(upper_bound)); - return FilterPushdownResult::PUSHED_DOWN_FULLY; + if (FilterCombiner::FindNextLegalUTF8(prefix_string)) { + auto upper_bound = make_uniq(ExpressionType::COMPARE_LESSTHAN, Value(prefix_string)); + table_filters.PushFilter(column_index, std::move(upper_bound)); + return FilterPushdownResult::PUSHED_DOWN_FULLY; + } + // could not find next legal utf8 string - skip upper bound + return FilterPushdownResult::NO_PUSHDOWN; } FilterPushdownResult FilterCombiner::TryPushdownLikeFilter(TableFilterSet &table_filters, diff --git a/src/optimizer/filter_pullup.cpp b/src/optimizer/filter_pullup.cpp index 21961138727b..f9ebb63c3ccc 100644 --- a/src/optimizer/filter_pullup.cpp +++ b/src/optimizer/filter_pullup.cpp @@ -6,6 +6,7 @@ #include "duckdb/planner/operator/logical_comparison_join.hpp" #include "duckdb/planner/operator/logical_cross_product.hpp" #include "duckdb/planner/operator/logical_join.hpp" +#include "duckdb/planner/operator/logical_distinct.hpp" namespace duckdb { @@ -26,6 +27,7 @@ unique_ptr FilterPullup::Rewrite(unique_ptr op case LogicalOperatorType::LOGICAL_EXCEPT: return PullupSetOperation(std::move(op)); case LogicalOperatorType::LOGICAL_DISTINCT: + return PullupDistinct(std::move(op)); case LogicalOperatorType::LOGICAL_ORDER_BY: { // we can just pull directly through these operations without any rewriting op->children[0] = Rewrite(std::move(op->children[0])); @@ -115,6 +117,18 @@ unique_ptr FilterPullup::PullupCrossProduct(unique_ptr FilterPullup::PullupDistinct(unique_ptr op) { + const auto &distinct = op->Cast(); + if (distinct.distinct_type == DistinctType::DISTINCT) { + // Can pull up through a DISTINCT + op->children[0] = Rewrite(std::move(op->children[0])); + return op; + } + // Cannot pull up through a DISTINCT ON (see #19327) + D_ASSERT(distinct.distinct_type == DistinctType::DISTINCT_ON); + return FinishPullup(std::move(op)); +} + unique_ptr FilterPullup::GeneratePullupFilter(unique_ptr child, vector> &expressions) { unique_ptr filter = make_uniq(); diff --git a/src/optimizer/join_order/relation_manager.cpp b/src/optimizer/join_order/relation_manager.cpp index 4916f662e6a8..60f470224cf1 100644 --- a/src/optimizer/join_order/relation_manager.cpp +++ b/src/optimizer/join_order/relation_manager.cpp @@ -54,6 +54,10 @@ void RelationManager::AddRelation(LogicalOperator &op, optional_ptr limit_op, RelationS } } +void RelationManager::AddRelationWithChildren(JoinOrderOptimizer &optimizer, LogicalOperator &op, + LogicalOperator &input_op, optional_ptr parent, + RelationStats &child_stats, optional_ptr limit_op, + vector> &datasource_filters) { + D_ASSERT(!op.children.empty()); + auto child_optimizer = optimizer.CreateChildOptimizer(); + op.children[0] = child_optimizer.Optimize(std::move(op.children[0]), &child_stats); + if (!datasource_filters.empty()) { + child_stats.cardinality = LossyNumericCast(static_cast(child_stats.cardinality) * + RelationStatisticsHelper::DEFAULT_SELECTIVITY); + } + ModifyStatsIfLimit(limit_op.get(), child_stats); + AddRelation(input_op, parent, child_stats); +} + bool RelationManager::ExtractJoinRelations(JoinOrderOptimizer &optimizer, LogicalOperator &input_op, vector> &filter_operators, optional_ptr parent) { @@ -279,15 +298,7 @@ bool RelationManager::ExtractJoinRelations(JoinOrderOptimizer &optimizer, Logica case LogicalOperatorType::LOGICAL_UNNEST: { // optimize children of unnest RelationStats child_stats; - auto child_optimizer = optimizer.CreateChildOptimizer(); - op->children[0] = child_optimizer.Optimize(std::move(op->children[0]), &child_stats); - // the extracted cardinality should be set for window - if (!datasource_filters.empty()) { - child_stats.cardinality = LossyNumericCast(static_cast(child_stats.cardinality) * - RelationStatisticsHelper::DEFAULT_SELECTIVITY); - } - ModifyStatsIfLimit(limit_op.get(), child_stats); - AddRelation(input_op, parent, child_stats); + AddRelationWithChildren(optimizer, *op, input_op, parent, child_stats, limit_op, datasource_filters); return true; } case LogicalOperatorType::LOGICAL_COMPARISON_JOIN: { @@ -345,6 +356,14 @@ bool RelationManager::ExtractJoinRelations(JoinOrderOptimizer &optimizer, Logica case LogicalOperatorType::LOGICAL_GET: { // TODO: Get stats from a logical GET auto &get = op->Cast(); + // this is a get that *most likely* has a function (like unnest or json_each). + // there are new bindings for output of the function, but child bindings also exist, and can + // be used in joins + if (!op->children.empty()) { + RelationStats child_stats; + AddRelationWithChildren(optimizer, *op, input_op, parent, child_stats, limit_op, datasource_filters); + return true; + } auto stats = RelationStatisticsHelper::ExtractGetStats(get, context); // if there is another logical filter that could not be pushed down into the // table scan, apply another selectivity. diff --git a/src/optimizer/late_materialization.cpp b/src/optimizer/late_materialization.cpp index a144df188b82..689c87a9a12b 100644 --- a/src/optimizer/late_materialization.cpp +++ b/src/optimizer/late_materialization.cpp @@ -432,6 +432,11 @@ bool LateMaterialization::OptimizeLargeLimit(LogicalLimit &limit, idx_t limit_va } current_op = *current_op.get().children[0]; } + // if there are any filters we shouldn't do large limit optimization + auto &get = current_op.get().Cast(); + if (!get.table_filters.filters.empty()) { + return false; + } return true; } diff --git a/src/optimizer/rule/ordered_aggregate_optimizer.cpp b/src/optimizer/rule/ordered_aggregate_optimizer.cpp index 8f6435e1f61f..5bbbc2c487c0 100644 --- a/src/optimizer/rule/ordered_aggregate_optimizer.cpp +++ b/src/optimizer/rule/ordered_aggregate_optimizer.cpp @@ -17,7 +17,9 @@ OrderedAggregateOptimizer::OrderedAggregateOptimizer(ExpressionRewriter &rewrite } unique_ptr OrderedAggregateOptimizer::Apply(ClientContext &context, BoundAggregateExpression &aggr, - vector> &groups, bool &changes_made) { + vector> &groups, + optional_ptr> grouping_sets, + bool &changes_made) { if (!aggr.order_bys) { // no ORDER BYs defined return nullptr; @@ -30,7 +32,7 @@ unique_ptr OrderedAggregateOptimizer::Apply(ClientContext &context, } // Remove unnecessary ORDER BY clauses and return if nothing remains - if (aggr.order_bys->Simplify(groups)) { + if (aggr.order_bys->Simplify(groups, grouping_sets)) { aggr.order_bys.reset(); changes_made = true; return nullptr; @@ -90,7 +92,8 @@ unique_ptr OrderedAggregateOptimizer::Apply(ClientContext &context, unique_ptr OrderedAggregateOptimizer::Apply(LogicalOperator &op, vector> &bindings, bool &changes_made, bool is_root) { auto &aggr = bindings[0].get().Cast(); - return Apply(rewriter.context, aggr, op.Cast().groups, changes_made); + return Apply(rewriter.context, aggr, op.Cast().groups, op.Cast().grouping_sets, + changes_made); } } // namespace duckdb diff --git a/src/parser/transform/expression/transform_cast.cpp b/src/parser/transform/expression/transform_cast.cpp index a4b1dde59bbe..0412a3c96961 100644 --- a/src/parser/transform/expression/transform_cast.cpp +++ b/src/parser/transform/expression/transform_cast.cpp @@ -21,7 +21,9 @@ unique_ptr Transformer::TransformTypeCast(duckdb_libpgquery::P parameters.query_location = NumericCast(root.location); } auto blob_data = Blob::ToBlob(string(c->val.val.str), parameters); - return make_uniq(Value::BLOB_RAW(blob_data)); + auto result = make_uniq(Value::BLOB_RAW(blob_data)); + SetQueryLocation(*result, root.location); + return std::move(result); } } // transform the expression node diff --git a/src/parser/transform/helpers/transform_sample.cpp b/src/parser/transform/helpers/transform_sample.cpp index bd1cc75a252f..30d4748dda76 100644 --- a/src/parser/transform/helpers/transform_sample.cpp +++ b/src/parser/transform/helpers/transform_sample.cpp @@ -44,8 +44,9 @@ unique_ptr Transformer::TransformSampleOptions(optional_ptr(); - if (rows < 0) { - throw ParserException("Sample rows %lld out of range, must be bigger than or equal to 0", rows); + if (rows < 0 || sample_value.GetValue() > SampleOptions::MAX_SAMPLE_ROWS) { + throw ParserException("Sample rows %lld out of range, must be between 0 and %lld", rows, + SampleOptions::MAX_SAMPLE_ROWS); } result->sample_size = Value::BIGINT(rows); result->method = SampleMethod::RESERVOIR_SAMPLE; diff --git a/src/planner/binder.cpp b/src/planner/binder.cpp index 01895705809a..9628e463ae1e 100644 --- a/src/planner/binder.cpp +++ b/src/planner/binder.cpp @@ -70,7 +70,7 @@ Binder::Binder(ClientContext &context, shared_ptr parent_p, BinderType b } } -unique_ptr Binder::BindMaterializedCTE(CommonTableExpressionMap &cte_map) { +unique_ptr Binder::BindMaterializedCTE(CommonTableExpressionMap &cte_map, unique_ptr &cte_root) { // Extract materialized CTEs from cte_map vector> materialized_ctes; for (auto &cte : cte_map.map) { @@ -87,7 +87,6 @@ unique_ptr Binder::BindMaterializedCTE(CommonTableExpressionMap &c return nullptr; } - unique_ptr cte_root = nullptr; while (!materialized_ctes.empty()) { unique_ptr node_result; node_result = std::move(materialized_ctes.back()); @@ -110,7 +109,8 @@ unique_ptr Binder::BindMaterializedCTE(CommonTableExpressionMap &c template BoundStatement Binder::BindWithCTE(T &statement) { BoundStatement bound_statement; - auto bound_cte = BindMaterializedCTE(statement.template Cast().cte_map); + unique_ptr cte_root; + auto bound_cte = BindMaterializedCTE(statement.template Cast().cte_map, cte_root); if (bound_cte) { reference tail_ref = *bound_cte; diff --git a/src/planner/binder/expression/bind_star_expression.cpp b/src/planner/binder/expression/bind_star_expression.cpp index f48fc14e6710..84333d333d02 100644 --- a/src/planner/binder/expression/bind_star_expression.cpp +++ b/src/planner/binder/expression/bind_star_expression.cpp @@ -216,7 +216,7 @@ void TryTransformStarLike(unique_ptr &root) { child_expr = std::move(list_filter); } - auto columns_expr = make_uniq(); + auto columns_expr = make_uniq(star.relation_name); columns_expr->columns = true; columns_expr->expr = std::move(child_expr); columns_expr->SetAlias(std::move(original_alias)); diff --git a/src/planner/binder/query_node/plan_select_node.cpp b/src/planner/binder/query_node/plan_select_node.cpp index 46e5d2e12751..dec721262629 100644 --- a/src/planner/binder/query_node/plan_select_node.cpp +++ b/src/planner/binder/query_node/plan_select_node.cpp @@ -30,7 +30,7 @@ unique_ptr Binder::CreatePlan(BoundSelectNode &statement) { root = PlanFilter(std::move(statement.where_clause), std::move(root)); } - if (!statement.aggregates.empty() || !statement.groups.group_expressions.empty()) { + if (!statement.aggregates.empty() || !statement.groups.group_expressions.empty() || statement.having) { if (!statement.groups.group_expressions.empty()) { // visit the groups for (auto &group : statement.groups.group_expressions) { diff --git a/src/planner/binder/statement/bind_copy.cpp b/src/planner/binder/statement/bind_copy.cpp index b7881a0a1d67..10bfbdc7b372 100644 --- a/src/planner/binder/statement/bind_copy.cpp +++ b/src/planner/binder/statement/bind_copy.cpp @@ -423,7 +423,10 @@ vector BindCopyOption(ClientContext &context, TableFunctionBinder &option } } auto bound_expr = option_binder.Bind(expr); - auto val = ExpressionExecutor::EvaluateScalar(context, *bound_expr); + if (bound_expr->HasParameter()) { + throw ParameterNotResolvedException(); + } + auto val = ExpressionExecutor::EvaluateScalar(context, *bound_expr, true); if (val.IsNull()) { throw BinderException("NULL is not supported as a valid option for COPY option \"" + name + "\""); } diff --git a/src/planner/binder/statement/bind_create_table.cpp b/src/planner/binder/statement/bind_create_table.cpp index ad70fe14abc0..2739dcb97f94 100644 --- a/src/planner/binder/statement/bind_create_table.cpp +++ b/src/planner/binder/statement/bind_create_table.cpp @@ -40,10 +40,18 @@ static void VerifyCompressionType(ClientContext &context, optional_ptrCast(); for (auto &col : base.columns.Logical()) { auto compression_type = col.CompressionType(); - if (CompressionTypeIsDeprecated(compression_type, storage_manager)) { - throw BinderException("Can't compress using user-provided compression type '%s', that type is deprecated " - "and only has decompress support", - CompressionTypeToString(compression_type)); + auto compression_availability_result = CompressionTypeIsAvailable(compression_type, storage_manager); + if (!compression_availability_result.IsAvailable()) { + if (compression_availability_result.IsDeprecated()) { + throw BinderException( + "Can't compress using user-provided compression type '%s', that type is deprecated " + "and only has decompress support", + CompressionTypeToString(compression_type)); + } else { + throw BinderException( + "Can't compress using user-provided compression type '%s', that type is not available yet", + CompressionTypeToString(compression_type)); + } } auto logical_type = col.GetType(); if (logical_type.id() == LogicalTypeId::USER && logical_type.HasAlias()) { diff --git a/src/planner/binder/statement/bind_insert.cpp b/src/planner/binder/statement/bind_insert.cpp index 8a9009562ee3..96222f0c51e1 100644 --- a/src/planner/binder/statement/bind_insert.cpp +++ b/src/planner/binder/statement/bind_insert.cpp @@ -465,7 +465,11 @@ unique_ptr Binder::GenerateMergeInto(InsertStatement &stmt, if (on_conflict_info.action_type == OnConflictAction::REPLACE) { D_ASSERT(!on_conflict_info.set_info); - on_conflict_info.set_info = CreateSetInfoForReplace(table, stmt, storage_info); + // For BY POSITION, create explicit SET information + // For BY NAME, leave it empty and let bind_merge_into handle it automatically + if (stmt.column_order != InsertColumnOrder::INSERT_BY_NAME) { + on_conflict_info.set_info = CreateSetInfoForReplace(table, stmt, storage_info); + } on_conflict_info.action_type = OnConflictAction::UPDATE; } // now set up the merge actions @@ -484,16 +488,19 @@ unique_ptr Binder::GenerateMergeInto(InsertStatement &stmt, // when doing UPDATE set up the when matched action auto update_action = make_uniq(); update_action->action_type = MergeActionType::MERGE_UPDATE; - for (auto &col : on_conflict_info.set_info->expressions) { - vector> lambda_params; - DoUpdateSetQualify(col, table_name, lambda_params); - } - if (on_conflict_info.set_info->condition) { - vector> lambda_params; - DoUpdateSetQualify(on_conflict_info.set_info->condition, table_name, lambda_params); - update_action->condition = std::move(on_conflict_info.set_info->condition); + update_action->column_order = stmt.column_order; + if (on_conflict_info.set_info) { + for (auto &col : on_conflict_info.set_info->expressions) { + vector> lambda_params; + DoUpdateSetQualify(col, table_name, lambda_params); + } + if (on_conflict_info.set_info->condition) { + vector> lambda_params; + DoUpdateSetQualify(on_conflict_info.set_info->condition, table_name, lambda_params); + update_action->condition = std::move(on_conflict_info.set_info->condition); + } + update_action->update_info = std::move(on_conflict_info.set_info); } - update_action->update_info = std::move(on_conflict_info.set_info); merge_into->actions[MergeActionCondition::WHEN_MATCHED].push_back(std::move(update_action)); } diff --git a/src/planner/binder/statement/bind_merge_into.cpp b/src/planner/binder/statement/bind_merge_into.cpp index b52a04cf2368..436f7d3e5c42 100644 --- a/src/planner/binder/statement/bind_merge_into.cpp +++ b/src/planner/binder/statement/bind_merge_into.cpp @@ -41,10 +41,20 @@ unique_ptr Binder::BindMergeAction(LogicalMergeInto &merge auto result = make_uniq(); result->action_type = action.action_type; if (action.condition) { - ProjectionBinder proj_binder(*this, context, proj_index, expressions, "WHERE clause"); - proj_binder.target_type = LogicalType::BOOLEAN; - auto cond = proj_binder.Bind(action.condition); - result->condition = std::move(cond); + if (action.condition->HasSubquery()) { + // if we have a subquery we need to execute the condition outside of the MERGE INTO statement + WhereBinder where_binder(*this, context); + auto cond = where_binder.Bind(action.condition); + PlanSubqueries(cond, root); + result->condition = + make_uniq(cond->return_type, ColumnBinding(proj_index, expressions.size())); + expressions.push_back(std::move(cond)); + } else { + ProjectionBinder proj_binder(*this, context, proj_index, expressions, "WHERE clause"); + proj_binder.target_type = LogicalType::BOOLEAN; + auto cond = proj_binder.Bind(action.condition); + result->condition = std::move(cond); + } } switch (action.action_type) { case MergeActionType::MERGE_UPDATE: { @@ -173,6 +183,32 @@ void RewriteMergeBindings(LogicalOperator &op, const vector &sour op, [&](unique_ptr *child) { RewriteMergeBindings(*child, source_bindings, new_table_index); }); } +LogicalGet &ExtractLogicalGet(LogicalOperator &op) { + reference current_op(op); + while (current_op.get().type == LogicalOperatorType::LOGICAL_FILTER) { + current_op = *current_op.get().children[0]; + } + if (current_op.get().type != LogicalOperatorType::LOGICAL_GET) { + throw InvalidInputException("BindMerge - expected to find an operator of type LOGICAL_GET but got %s", + op.ToString()); + } + return current_op.get().Cast(); +} + +void CheckMergeAction(MergeActionCondition condition, MergeActionType action_type) { + if (condition == MergeActionCondition::WHEN_NOT_MATCHED_BY_TARGET) { + switch (action_type) { + case MergeActionType::MERGE_UPDATE: + case MergeActionType::MERGE_DELETE: + throw ParserException("WHEN NOT MATCHED (BY TARGET) cannot be combined with UPDATE or DELETE actions - as " + "there is no corresponding row in the target to update or delete.\nDid you mean to " + "use WHEN MATCHED or WHEN NOT MATCHED BY SOURCE?"); + default: + break; + } + } +} + BoundStatement Binder::Bind(MergeIntoStatement &stmt) { // bind the target table auto target_binder = Binder::CreateBinder(context, this); @@ -243,7 +279,7 @@ BoundStatement Binder::Bind(MergeIntoStatement &stmt) { // kind of hacky, CreatePlan turns a RIGHT join into a LEFT join so the children get reversed from what we need bool inverted = join.type == JoinType::RIGHT; auto &source = join_ref.get().children[inverted ? 1 : 0]; - auto &get = join_ref.get().children[inverted ? 0 : 1]->Cast(); + auto &get = ExtractLogicalGet(*join_ref.get().children[inverted ? 0 : 1]); auto merge_into = make_uniq(table); merge_into->table_index = GenerateTableIndex(); @@ -265,6 +301,7 @@ BoundStatement Binder::Bind(MergeIntoStatement &stmt) { for (auto &entry : stmt.actions) { vector> bound_actions; for (auto &action : entry.second) { + CheckMergeAction(entry.first, action->action_type); bound_actions.push_back(BindMergeAction(*merge_into, table, get, proj_index, projection_expressions, root, *action, source_aliases, source_names)); } diff --git a/src/planner/binder/tableref/bind_table_function.cpp b/src/planner/binder/tableref/bind_table_function.cpp index 91bd5775d44e..f06e9e1ad668 100644 --- a/src/planner/binder/tableref/bind_table_function.cpp +++ b/src/planner/binder/tableref/bind_table_function.cpp @@ -147,7 +147,7 @@ bool Binder::BindTableFunctionParameters(TableFunctionCatalogEntry &table_functi MoveCorrelatedExpressions(*subquery->binder); seen_subquery = true; arguments.emplace_back(LogicalTypeId::TABLE); - parameters.emplace_back(Value()); + parameters.emplace_back(); continue; } diff --git a/src/planner/bound_result_modifier.cpp b/src/planner/bound_result_modifier.cpp index edf49c4b13b9..4b7710bce59a 100644 --- a/src/planner/bound_result_modifier.cpp +++ b/src/planner/bound_result_modifier.cpp @@ -101,14 +101,17 @@ bool BoundOrderModifier::Equals(const unique_ptr &left, return BoundOrderModifier::Equals(*left, *right); } -bool BoundOrderModifier::Simplify(vector &orders, const vector> &groups) { +bool BoundOrderModifier::Simplify(vector &orders, const vector> &groups, + optional_ptr> grouping_sets) { // for each ORDER BY - check if it is actually necessary // expressions that are in the groups do not need to be ORDERED BY // `ORDER BY` on a group has no effect, because for each aggregate, the group is unique // similarly, we only need to ORDER BY each aggregate once + expression_map_t group_expressions; expression_set_t seen_expressions; + idx_t i = 0; for (auto &target : groups) { - seen_expressions.insert(*target); + group_expressions.insert({*target, i++}); } vector new_order_nodes; for (auto &order_node : orders) { @@ -116,16 +119,30 @@ bool BoundOrderModifier::Simplify(vector &orders, const vector // we do not need to order by this node continue; } + auto it = group_expressions.find(*order_node.expression); + bool add_to_new_order = it == group_expressions.end(); + if (!add_to_new_order && grouping_sets) { + idx_t group_idx = it->second; + for (auto &grouping_set : *grouping_sets) { + if (grouping_set.find(group_idx) == grouping_set.end()) { + add_to_new_order = true; + break; + } + } + } seen_expressions.insert(*order_node.expression); - new_order_nodes.push_back(std::move(order_node)); + if (add_to_new_order) { + new_order_nodes.push_back(std::move(order_node)); + } } orders.swap(new_order_nodes); return orders.empty(); // NOLINT } -bool BoundOrderModifier::Simplify(const vector> &groups) { - return Simplify(orders, groups); +bool BoundOrderModifier::Simplify(const vector> &groups, + optional_ptr> grouping_sets) { + return Simplify(orders, groups, grouping_sets); } BoundLimitNode::BoundLimitNode(LimitNodeType type, idx_t constant_integer, double constant_percentage, diff --git a/src/planner/expression/bound_function_expression.cpp b/src/planner/expression/bound_function_expression.cpp index 5556dec21e02..44678762cc46 100644 --- a/src/planner/expression/bound_function_expression.cpp +++ b/src/planner/expression/bound_function_expression.cpp @@ -39,7 +39,10 @@ bool BoundFunctionExpression::IsFoldable() const { } } } - return function.stability == FunctionStability::VOLATILE ? false : Expression::IsFoldable(); + if (function.stability == FunctionStability::VOLATILE) { + return false; + } + return Expression::IsFoldable(); } bool BoundFunctionExpression::CanThrow() const { diff --git a/src/planner/expression_binder.cpp b/src/planner/expression_binder.cpp index 2207147337fa..eeeaeb97cb47 100644 --- a/src/planner/expression_binder.cpp +++ b/src/planner/expression_binder.cpp @@ -1,6 +1,5 @@ #include "duckdb/planner/expression_binder.hpp" -#include "duckdb/catalog/catalog_entry/scalar_function_catalog_entry.hpp" #include "duckdb/parser/expression/list.hpp" #include "duckdb/parser/parsed_expression_iterator.hpp" #include "duckdb/planner/binder.hpp" @@ -166,7 +165,7 @@ static bool CombineMissingColumns(ErrorData ¤t, ErrorData new_error) { } auto score = StringUtil::SimilarityRating(candidate_column, column_name); candidates.insert(candidate); - scores.emplace_back(make_pair(std::move(candidate), score)); + scores.emplace_back(std::move(candidate), score); } // get a new top-n auto top_candidates = StringUtil::TopNStrings(scores); diff --git a/src/planner/expression_binder/constant_binder.cpp b/src/planner/expression_binder/constant_binder.cpp index 97a65ba311d8..01f4ab11d2fb 100644 --- a/src/planner/expression_binder/constant_binder.cpp +++ b/src/planner/expression_binder/constant_binder.cpp @@ -19,7 +19,7 @@ BindResult ConstantBinder::BindExpression(unique_ptr &expr_ptr return BindExpression(expr_ptr, depth, root_expression); } } - return BindUnsupportedExpression(expr, depth, clause + " cannot contain column names"); + throw BinderException::Unsupported(expr, clause + " cannot contain column names"); } case ExpressionClass::SUBQUERY: throw BinderException(clause + " cannot contain subqueries"); diff --git a/src/planner/expression_binder/having_binder.cpp b/src/planner/expression_binder/having_binder.cpp index 902add5e28cc..48ee194ff2ad 100644 --- a/src/planner/expression_binder/having_binder.cpp +++ b/src/planner/expression_binder/having_binder.cpp @@ -3,7 +3,6 @@ #include "duckdb/parser/expression/columnref_expression.hpp" #include "duckdb/parser/expression/window_expression.hpp" #include "duckdb/planner/binder.hpp" -#include "duckdb/planner/expression_binder/aggregate_binder.hpp" #include "duckdb/common/string_util.hpp" #include "duckdb/planner/query_node/bound_select_node.hpp" @@ -91,7 +90,7 @@ BindResult HavingBinder::BindColumnRef(unique_ptr &expr_ptr, i } BindResult HavingBinder::BindWindow(WindowExpression &expr, idx_t depth) { - return BindResult(BinderException::Unsupported(expr, "HAVING clause cannot contain window functions!")); + throw BinderException::Unsupported(expr, "HAVING clause cannot contain window functions!"); } } // namespace duckdb diff --git a/src/planner/subquery/flatten_dependent_join.cpp b/src/planner/subquery/flatten_dependent_join.cpp index a4a00f185457..a9169787dd56 100644 --- a/src/planner/subquery/flatten_dependent_join.cpp +++ b/src/planner/subquery/flatten_dependent_join.cpp @@ -236,6 +236,16 @@ bool FlattenDependentJoins::DetectCorrelatedExpressions(LogicalOperator &op, boo if (DetectCorrelatedExpressions(*child, lateral, new_lateral_depth, condition)) { has_correlation = true; } + + if (op.type == LogicalOperatorType::LOGICAL_MATERIALIZED_CTE && child_idx == 0) { + auto &setop = op.Cast(); + binder.recursive_ctes[setop.table_index] = &setop; + has_correlated_expressions[op] = has_correlation; + if (has_correlation) { + setop.correlated_columns = correlated_columns; + } + } + child_idx++; } @@ -261,6 +271,7 @@ bool FlattenDependentJoins::DetectCorrelatedExpressions(LogicalOperator &op, boo return true; } // Found a materialized CTE, subtree correlation depends on the CTE node + has_correlated_expressions[op] = has_correlated_expressions[*cte_node]; return has_correlated_expressions[*cte_node]; } // No CTE found: subtree is correlated @@ -279,47 +290,32 @@ bool FlattenDependentJoins::DetectCorrelatedExpressions(LogicalOperator &op, boo binder.recursive_ctes[setop.table_index] = &setop; if (has_correlation) { setop.correlated_columns = correlated_columns; - MarkSubtreeCorrelated(*op.children[1].get()); - } - } - - if (op.type == LogicalOperatorType::LOGICAL_MATERIALIZED_CTE) { - auto &setop = op.Cast(); - binder.recursive_ctes[setop.table_index] = &setop; - // only mark the entire subtree as correlated if the materializing side is correlated - auto entry = has_correlated_expressions.find(*op.children[0]); - if (entry != has_correlated_expressions.end()) { - if (has_correlation && entry->second) { - setop.correlated_columns = correlated_columns; - MarkSubtreeCorrelated(*op.children[1].get()); - } + MarkSubtreeCorrelated(*op.children[1].get(), setop.table_index); } } return has_correlation; } -bool FlattenDependentJoins::MarkSubtreeCorrelated(LogicalOperator &op) { +bool FlattenDependentJoins::MarkSubtreeCorrelated(LogicalOperator &op, idx_t cte_index) { // Do not mark base table scans as correlated auto entry = has_correlated_expressions.find(op); D_ASSERT(entry != has_correlated_expressions.end()); bool has_correlation = entry->second; for (auto &child : op.children) { - has_correlation |= MarkSubtreeCorrelated(*child.get()); + has_correlation |= MarkSubtreeCorrelated(*child.get(), cte_index); } if (op.type != LogicalOperatorType::LOGICAL_GET || op.children.size() == 1) { if (op.type == LogicalOperatorType::LOGICAL_CTE_REF) { // There may be multiple recursive CTEs. Only mark CTE_REFs as correlated, // IFF the CTE that we are reading from is correlated. auto &cteref = op.Cast(); - auto cte = binder.recursive_ctes.find(cteref.cte_index); - bool has_correlation = false; - if (cte != binder.recursive_ctes.end()) { - auto &rec_cte = cte->second->Cast(); - has_correlation = !rec_cte.correlated_columns.empty(); + if (cteref.cte_index != cte_index) { + has_correlated_expressions[op] = has_correlation; + return has_correlation; } - has_correlated_expressions[op] = has_correlation; - return has_correlation; + has_correlated_expressions[op] = true; + return true; } else { has_correlated_expressions[op] = has_correlation; } @@ -695,6 +691,42 @@ unique_ptr FlattenDependentJoins::PushDownDependentJoinInternal return plan; } } else if (join.join_type == JoinType::MARK) { + if (!left_has_correlation && right_has_correlation) { + // found a MARK join where the left side has no correlation + + ColumnBinding right_binding; + + // there may still be correlation on the right side that we have to deal with + // push into the right side if necessary or decorrelate it independently otherwise + plan->children[1] = PushDownDependentJoinInternal(std::move(plan->children[1]), + parent_propagate_null_values, lateral_depth); + right_binding = this->base_binding; + + // now push into the left side of the MARK join even though it has no correlation + // this is necessary to add the correlated columns to the column bindings and allow + // the join condition to be rewritten correctly + plan->children[0] = PushDownDependentJoinInternal(std::move(plan->children[0]), + parent_propagate_null_values, lateral_depth); + + auto left_binding = this->base_binding; + + // add the correlated columns to the join conditions + for (idx_t i = 0; i < correlated_columns.size(); i++) { + JoinCondition cond; + cond.left = make_uniq( + correlated_columns[i].type, + ColumnBinding(left_binding.table_index, left_binding.column_index + i)); + cond.right = make_uniq( + correlated_columns[i].type, + ColumnBinding(right_binding.table_index, right_binding.column_index + i)); + cond.comparison = ExpressionType::COMPARE_NOT_DISTINCT_FROM; + + auto &comparison_join = join.Cast(); + comparison_join.conditions.push_back(std::move(cond)); + } + return plan; + } + // push the child into the LHS plan->children[0] = PushDownDependentJoinInternal(std::move(plan->children[0]), parent_propagate_null_values, lateral_depth); @@ -1031,7 +1063,8 @@ unique_ptr FlattenDependentJoins::PushDownDependentJoinInternal } } - RewriteCTEScan cte_rewriter(table_index, correlated_columns); + RewriteCTEScan cte_rewriter(table_index, correlated_columns, + plan->type == LogicalOperatorType::LOGICAL_RECURSIVE_CTE); cte_rewriter.VisitOperator(*plan->children[1]); parent_propagate_null_values = false; diff --git a/src/planner/subquery/rewrite_cte_scan.cpp b/src/planner/subquery/rewrite_cte_scan.cpp index f846d9b3638c..7df4f13a8fee 100644 --- a/src/planner/subquery/rewrite_cte_scan.cpp +++ b/src/planner/subquery/rewrite_cte_scan.cpp @@ -14,8 +14,10 @@ namespace duckdb { -RewriteCTEScan::RewriteCTEScan(idx_t table_index, const CorrelatedColumns &correlated_columns) - : table_index(table_index), correlated_columns(correlated_columns) { +RewriteCTEScan::RewriteCTEScan(idx_t table_index, const CorrelatedColumns &correlated_columns, + bool rewrite_dependent_joins) + : table_index(table_index), correlated_columns(correlated_columns), + rewrite_dependent_joins(rewrite_dependent_joins) { } void RewriteCTEScan::VisitOperator(LogicalOperator &op) { @@ -29,7 +31,7 @@ void RewriteCTEScan::VisitOperator(LogicalOperator &op) { } cteref.correlated_columns += correlated_columns.size(); } - } else if (op.type == LogicalOperatorType::LOGICAL_DEPENDENT_JOIN) { + } else if (op.type == LogicalOperatorType::LOGICAL_DEPENDENT_JOIN && rewrite_dependent_joins) { // There is another DependentJoin below the correlated recursive CTE. // We have to add the correlated columns of the recursive CTE to the // set of columns of this operator. diff --git a/src/storage/checkpoint/table_data_writer.cpp b/src/storage/checkpoint/table_data_writer.cpp index 342ea1ff587d..4bbdff138d42 100644 --- a/src/storage/checkpoint/table_data_writer.cpp +++ b/src/storage/checkpoint/table_data_writer.cpp @@ -4,6 +4,7 @@ #include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp" #include "duckdb/common/serializer/binary_serializer.hpp" #include "duckdb/main/database.hpp" +#include "duckdb/main/settings.hpp" #include "duckdb/parallel/task_scheduler.hpp" #include "duckdb/storage/table/column_checkpoint_state.hpp" #include "duckdb/storage/table/table_statistics.hpp" @@ -117,17 +118,27 @@ void SingleFileTableDataWriter::FinalizeTable(const TableStatistics &global_stat if (!v1_0_0_storage) { options.emplace("v1_0_0_storage", v1_0_0_storage); } + + // If there is a context available, bind indexes before serialization. + // This is necessary so that buffered index operations are replayed before we checkpoint, otherwise + // we would lose them if there was a restart after this. + if (context && context->transaction.HasActiveTransaction()) { + info.BindIndexes(*context); + } + // FIXME: If we do not have a context, however, the unbound indexes have to be serialized to disk. + auto index_storage_infos = info.GetIndexes().SerializeToDisk(context, options); -#ifdef DUCKDB_BLOCK_VERIFICATION - for (auto &entry : index_storage_infos) { - for (auto &allocator : entry.allocator_infos) { - for (auto &block : allocator.block_pointers) { - checkpoint_manager.verify_block_usage_count[block.block_id]++; + auto debug_verify_blocks = DBConfig::GetSetting(GetDatabase()); + if (debug_verify_blocks) { + for (auto &entry : index_storage_infos) { + for (auto &allocator : entry.allocator_infos) { + for (auto &block : allocator.block_pointers) { + checkpoint_manager.verify_block_usage_count[block.block_id]++; + } } } } -#endif // write empty block pointers for forwards compatibility vector compat_block_pointers; diff --git a/src/storage/checkpoint_manager.cpp b/src/storage/checkpoint_manager.cpp index 8618b38eba96..0996b6bf8523 100644 --- a/src/storage/checkpoint_manager.cpp +++ b/src/storage/checkpoint_manager.cpp @@ -214,33 +214,35 @@ void SingleFileCheckpointWriter::CreateCheckpoint() { header.vector_size = STANDARD_VECTOR_SIZE; block_manager.WriteHeader(context, header); -#ifdef DUCKDB_BLOCK_VERIFICATION - // extend verify_block_usage_count - auto metadata_info = storage_manager.GetMetadataInfo(); - for (auto &info : metadata_info) { - verify_block_usage_count[info.block_id]++; - } - for (auto &entry_ref : catalog_entries) { - auto &entry = entry_ref.get(); - if (entry.type == CatalogType::TABLE_ENTRY) { - auto &table = entry.Cast(); - auto &storage = table.GetStorage(); - auto segment_info = storage.GetColumnSegmentInfo(); - for (auto &segment : segment_info) { - verify_block_usage_count[segment.block_id]++; - if (StringUtil::Contains(segment.segment_info, "Overflow String Block Ids: ")) { - auto overflow_blocks = StringUtil::Replace(segment.segment_info, "Overflow String Block Ids: ", ""); - auto splits = StringUtil::Split(overflow_blocks, ", "); - for (auto &split : splits) { - auto overflow_block_id = std::stoll(split); - verify_block_usage_count[overflow_block_id]++; + auto debug_verify_blocks = DBConfig::GetSetting(db.GetDatabase()); + if (debug_verify_blocks) { + // extend verify_block_usage_count + auto metadata_info = storage_manager.GetMetadataInfo(); + for (auto &info : metadata_info) { + verify_block_usage_count[info.block_id]++; + } + for (auto &entry_ref : catalog_entries) { + auto &entry = entry_ref.get(); + if (entry.type == CatalogType::TABLE_ENTRY) { + auto &table = entry.Cast(); + auto &storage = table.GetStorage(); + auto segment_info = storage.GetColumnSegmentInfo(); + for (auto &segment : segment_info) { + verify_block_usage_count[segment.block_id]++; + if (StringUtil::Contains(segment.segment_info, "Overflow String Block Ids: ")) { + auto overflow_blocks = + StringUtil::Replace(segment.segment_info, "Overflow String Block Ids: ", ""); + auto splits = StringUtil::Split(overflow_blocks, ", "); + for (auto &split : splits) { + auto overflow_block_id = std::stoll(split); + verify_block_usage_count[overflow_block_id]++; + } } } } } + block_manager.VerifyBlocks(verify_block_usage_count); } - block_manager.VerifyBlocks(verify_block_usage_count); -#endif if (debug_checkpoint_abort == CheckpointAbort::DEBUG_ABORT_BEFORE_TRUNCATE) { throw FatalException("Checkpoint aborted before truncate because of PRAGMA checkpoint_abort flag"); diff --git a/src/storage/compression/validity_uncompressed.cpp b/src/storage/compression/validity_uncompressed.cpp index 5a71b897469f..4fa65f32e7a5 100644 --- a/src/storage/compression/validity_uncompressed.cpp +++ b/src/storage/compression/validity_uncompressed.cpp @@ -287,6 +287,13 @@ void ValidityUncompressed::UnalignedScan(data_ptr_t input, idx_t input_size, idx // otherwise the subsequent bitwise & will modify values outside of the range of values we want to alter input_mask |= ValidityUncompressed::UPPER_MASKS[shift_amount]; + if (pos == 0) { + // We also need to set the lower bits, which are to the left of the relevant bits (x), to 1 + // These are the bits that are "behind" this scan window, and should not affect this scan + auto non_relevant_mask = ValidityUncompressed::LOWER_MASKS[result_idx]; + input_mask |= non_relevant_mask; + } + // after this, we move to the next input_entry offset = ValidityMask::BITS_PER_VALUE - input_idx; input_entry++; diff --git a/src/storage/compression/zstd.cpp b/src/storage/compression/zstd.cpp index 40885528450c..1ccb489a64f2 100644 --- a/src/storage/compression/zstd.cpp +++ b/src/storage/compression/zstd.cpp @@ -142,6 +142,11 @@ struct ZSTDAnalyzeState : public AnalyzeState { unique_ptr ZSTDStorage::StringInitAnalyze(ColumnData &col_data, PhysicalType type) { // check if the storage version we are writing to supports sztd auto &storage = col_data.GetStorageManager(); + auto &block_manager = col_data.GetBlockManager(); + if (block_manager.InMemory()) { + //! Can't use ZSTD in in-memory environment + return nullptr; + } if (storage.GetStorageVersion() < 4) { // compatibility mode with old versions - disable zstd return nullptr; @@ -249,6 +254,7 @@ class ZSTDCompressionState : public CompressionState { public: void ResetOutBuffer() { + D_ASSERT(GetCurrentOffset() <= GetWritableSpace(info)); out_buffer.dst = current_buffer_ptr; out_buffer.pos = 0; @@ -347,6 +353,7 @@ class ZSTDCompressionState : public CompressionState { void InitializeVector() { D_ASSERT(!in_vector); if (vector_count + 1 >= total_vector_count) { + //! Last vector vector_size = analyze_state->count - (ZSTD_VECTOR_SIZE * vector_count); } else { vector_size = ZSTD_VECTOR_SIZE; @@ -355,6 +362,7 @@ class ZSTDCompressionState : public CompressionState { current_offset = UnsafeNumericCast( AlignValue(UnsafeNumericCast(current_offset))); current_buffer_ptr = current_buffer->Ptr() + current_offset; + D_ASSERT(GetCurrentOffset() <= GetWritableSpace(info)); compressed_size = 0; uncompressed_size = 0; @@ -413,15 +421,11 @@ class ZSTDCompressionState : public CompressionState { throw InvalidInputException("ZSTD Compression failed: %s", duckdb_zstd::ZSTD_getErrorName(compress_result)); } + D_ASSERT(GetCurrentOffset() <= GetWritableSpace(info)); if (compress_result == 0) { // Finished break; } - if (out_buffer.pos != out_buffer.size) { - throw InternalException("Expected ZSTD_compressStream2 to fully utilize the current buffer, but pos is " - "%d, while size is %d", - out_buffer.pos, out_buffer.size); - } NewPage(); } } @@ -691,7 +695,7 @@ struct ZSTDScanState : public SegmentScanState { explicit ZSTDScanState(ColumnSegment &segment) : state(segment.GetSegmentState()->Cast()), block_manager(segment.GetBlockManager()), buffer_manager(BufferManager::GetBufferManager(segment.db)), - segment_block_offset(segment.GetBlockOffset()) { + segment_block_offset(segment.GetBlockOffset()), segment(segment) { decompression_context = duckdb_zstd::ZSTD_createDCtx(); segment_handle = buffer_manager.Pin(segment.block); @@ -791,14 +795,23 @@ struct ZSTDScanState : public SegmentScanState { auto vector_size = metadata.count; + auto string_lengths_size = (sizeof(string_length_t) * vector_size); scan_state.string_lengths = reinterpret_cast(scan_state.current_buffer_ptr); - scan_state.current_buffer_ptr += (sizeof(string_length_t) * vector_size); + scan_state.current_buffer_ptr += string_lengths_size; // Update the in_buffer to point to the start of the compressed data frame idx_t current_offset = UnsafeNumericCast(scan_state.current_buffer_ptr - handle_start); scan_state.in_buffer.src = scan_state.current_buffer_ptr; scan_state.in_buffer.pos = 0; - scan_state.in_buffer.size = block_manager.GetBlockSize() - sizeof(block_id_t) - current_offset; + if (scan_state.metadata.block_offset + string_lengths_size + scan_state.metadata.compressed_size > + (segment.SegmentSize() - sizeof(block_id_t))) { + //! We know that the compressed size is too big to fit on the current page + scan_state.in_buffer.size = + MinValue(metadata.compressed_size, block_manager.GetBlockSize() - sizeof(block_id_t) - current_offset); + } else { + scan_state.in_buffer.size = + MinValue(metadata.compressed_size, block_manager.GetBlockSize() - current_offset); + } // Initialize the context for streaming decompression duckdb_zstd::ZSTD_DCtx_reset(decompression_context, duckdb_zstd::ZSTD_reset_session_only); @@ -832,7 +845,7 @@ struct ZSTDScanState : public SegmentScanState { scan_state.in_buffer.src = ptr; scan_state.in_buffer.pos = 0; - idx_t page_size = block_manager.GetBlockSize() - sizeof(block_id_t); + idx_t page_size = segment.SegmentSize() - sizeof(block_id_t); idx_t remaining_compressed_data = scan_state.metadata.compressed_size - scan_state.compressed_scan_count; scan_state.in_buffer.size = MinValue(page_size, remaining_compressed_data); } @@ -842,6 +855,7 @@ struct ZSTDScanState : public SegmentScanState { return; } + auto &in_buffer = scan_state.in_buffer; duckdb_zstd::ZSTD_outBuffer out_buffer; out_buffer.dst = destination; @@ -849,18 +863,25 @@ struct ZSTDScanState : public SegmentScanState { out_buffer.size = uncompressed_length; while (true) { - idx_t old_pos = scan_state.in_buffer.pos; + idx_t old_pos = in_buffer.pos; size_t res = duckdb_zstd::ZSTD_decompressStream( /* zds = */ decompression_context, /* output =*/&out_buffer, - /* input =*/&scan_state.in_buffer); - scan_state.compressed_scan_count += scan_state.in_buffer.pos - old_pos; + /* input =*/&in_buffer); + scan_state.compressed_scan_count += in_buffer.pos - old_pos; if (duckdb_zstd::ZSTD_isError(res)) { throw InvalidInputException("ZSTD Decompression failed: %s", duckdb_zstd::ZSTD_getErrorName(res)); } if (out_buffer.pos == out_buffer.size) { + //! Done decompressing the relevant portion + break; + } + if (!res) { + D_ASSERT(out_buffer.pos == out_buffer.size); + D_ASSERT(in_buffer.pos == in_buffer.size); break; } + D_ASSERT(in_buffer.pos == in_buffer.size); // Did not fully decompress, it needs a new page to read from LoadNextPageForVector(scan_state); } @@ -956,6 +977,7 @@ struct ZSTDScanState : public SegmentScanState { idx_t segment_count; //! The amount of tuples consumed idx_t scanned_count = 0; + ColumnSegment &segment; //! Buffer for skipping data AllocatedData skip_buffer; diff --git a/src/storage/data_table.cpp b/src/storage/data_table.cpp index 9cacae9e4ec5..6c23d9d27bc8 100644 --- a/src/storage/data_table.cpp +++ b/src/storage/data_table.cpp @@ -1195,7 +1195,7 @@ ErrorData DataTable::AppendToIndexes(TableIndexList &indexes, optional_ptr(); - unbound_index.BufferChunk(index_chunk, row_ids, mapped_column_ids); + unbound_index.BufferChunk(index_chunk, row_ids, mapped_column_ids, BufferedIndexReplay::INSERT_ENTRY); return false; } diff --git a/src/storage/local_storage.cpp b/src/storage/local_storage.cpp index eecb9137fc88..ca224b7fcca1 100644 --- a/src/storage/local_storage.cpp +++ b/src/storage/local_storage.cpp @@ -154,12 +154,25 @@ void LocalTableStorage::FlushBlocks() { ErrorData LocalTableStorage::AppendToIndexes(DuckTransaction &transaction, RowGroupCollection &source, TableIndexList &index_list, const vector &table_types, row_t &start_row) { - // In this function, we only care about scanning the indexed columns of a table. + // mapped_column_ids contains the physical column indices of each Indexed column in the table. + // This mapping is used to retrieve the physical column index for the corresponding vector of an index chunk scan. + // For example, if we are processing data for index_chunk.data[i], we can retrieve the physical column index + // by getting the value at mapped_column_ids[i]. + // An important note is that the index_chunk orderings are created in accordance with this mapping, not the other + // way around. (Check the scan code below, where the mapped_column_ids is passed as a parameter to the scan. + // The index_chunk inside of that lambda is ordered according to the mapping that is a parameter to the scan). + + // mapped_column_ids is used in two places: + // 1) To create the physical table chunk in this function. + // 2) If we are in an unbound state (i.e., WAL replay is happening right now), this mapping and the index_chunk + // are buffered in unbound_index. However, there can also be buffered deletes happening, so it is important + // to maintain a canonical representation of the mapping, which is just sorting. auto indexed_columns = index_list.GetRequiredColumns(); vector mapped_column_ids; for (auto &col : indexed_columns) { mapped_column_ids.emplace_back(col); } + std::sort(mapped_column_ids.begin(), mapped_column_ids.end()); // However, because the bound expressions of the indexes (and their bound // column references) are in relation to ALL table columns, we create an @@ -168,6 +181,7 @@ ErrorData LocalTableStorage::AppendToIndexes(DuckTransaction &transaction, RowGr DataChunk table_chunk; table_chunk.InitializeEmpty(table_types); + // index_chunk scans are created here in the mapped_column_ids ordering (see note above). ErrorData error; source.Scan(transaction, mapped_column_ids, [&](DataChunk &index_chunk) -> bool { D_ASSERT(index_chunk.ColumnCount() == mapped_column_ids.size()); @@ -195,7 +209,6 @@ void LocalTableStorage::AppendToIndexes(DuckTransaction &transaction, TableAppen bool append_to_table) { // In this function, we might scan all table columns, // as we might also append to the table itself (append_to_table). - auto &table = table_ref.get(); if (append_to_table) { table.InitializeAppend(transaction, append_state); diff --git a/src/storage/metadata/metadata_manager.cpp b/src/storage/metadata/metadata_manager.cpp index 3417324228a3..0c67bb9cae56 100644 --- a/src/storage/metadata/metadata_manager.cpp +++ b/src/storage/metadata/metadata_manager.cpp @@ -104,7 +104,11 @@ MetadataHandle MetadataManager::Pin(QueryContext context, const MetadataPointer shared_ptr block_handle; { lock_guard guard(block_lock); - auto &block = blocks[UnsafeNumericCast(pointer.block_index)]; + auto entry = blocks.find(UnsafeNumericCast(pointer.block_index)); + if (entry == blocks.end()) { + throw InternalException("Trying to pin block %llu - but the block did not exist", pointer.block_index); + } + auto &block = entry->second; #ifdef DEBUG for (auto &free_block : block.free_blocks) { if (free_block == pointer.index) { @@ -369,6 +373,7 @@ void MetadataBlock::FreeBlocksFromInteger(idx_t free_list) { } void MetadataManager::MarkBlocksAsModified() { + unique_lock guard(block_lock); // for any blocks that were modified in the last checkpoint - set them to free blocks currently for (auto &kv : modified_blocks) { auto block_id = kv.first; @@ -382,7 +387,10 @@ void MetadataManager::MarkBlocksAsModified() { if (new_free_blocks == NumericLimits::Maximum()) { // if new free_blocks is all blocks - mark entire block as modified blocks.erase(entry); + + guard.unlock(); block_manager.MarkBlockAsModified(block_id); + guard.lock(); } else { // set the new set of free blocks block.FreeBlocksFromInteger(new_free_blocks); @@ -417,6 +425,18 @@ void MetadataManager::ClearModifiedBlocks(const vector &pointe } } +bool MetadataManager::BlockHasBeenCleared(const MetaBlockPointer &pointer) { + unique_lock guard(block_lock); + auto block_id = pointer.GetBlockId(); + auto block_index = pointer.GetBlockIndex(); + auto entry = modified_blocks.find(block_id); + if (entry == modified_blocks.end()) { + throw InternalException("BlockHasBeenCleared - Block id %llu not found in modified_blocks", block_id); + } + auto &modified_list = entry->second; + return (modified_list & (1ULL << block_index)) == 0ULL; +} + vector MetadataManager::GetMetadataInfo() const { vector result; unique_lock guard(block_lock); diff --git a/src/storage/metadata/metadata_reader.cpp b/src/storage/metadata/metadata_reader.cpp index 06c2b1c1b43f..342833448dea 100644 --- a/src/storage/metadata/metadata_reader.cpp +++ b/src/storage/metadata/metadata_reader.cpp @@ -4,11 +4,8 @@ namespace duckdb { MetadataReader::MetadataReader(MetadataManager &manager, MetaBlockPointer pointer, optional_ptr> read_pointers_p, BlockReaderType type) - : manager(manager), type(type), next_pointer(FromDiskPointer(pointer)), has_next_block(true), - read_pointers(read_pointers_p), index(0), offset(0), next_offset(pointer.offset), capacity(0) { - if (read_pointers) { - read_pointers->push_back(pointer); - } + : manager(manager), type(type), next_pointer(pointer), has_next_block(true), read_pointers(read_pointers_p), + index(0), offset(0), next_offset(pointer.offset), capacity(0) { } MetadataReader::MetadataReader(MetadataManager &manager, BlockPointer pointer) @@ -59,11 +56,10 @@ MetaBlockPointer MetadataReader::GetMetaBlockPointer() { vector MetadataReader::GetRemainingBlocks(MetaBlockPointer last_block) { vector result; while (has_next_block) { - auto next_block_pointer = manager.GetDiskPointer(next_pointer, UnsafeNumericCast(next_offset)); - if (last_block.IsValid() && next_block_pointer.block_pointer == last_block.block_pointer) { + if (last_block.IsValid() && next_pointer.block_pointer == last_block.block_pointer) { break; } - result.push_back(next_block_pointer); + result.push_back(next_pointer); ReadNextBlock(); } return result; @@ -77,18 +73,18 @@ void MetadataReader::ReadNextBlock(QueryContext context) { if (!has_next_block) { throw IOException("No more data remaining in MetadataReader"); } - block = manager.Pin(context, next_pointer); - index = next_pointer.index; + if (read_pointers) { + read_pointers->push_back(next_pointer); + } + auto next_disk_pointer = FromDiskPointer(next_pointer); + block = manager.Pin(context, next_disk_pointer); + index = next_disk_pointer.index; idx_t next_block = Load(BasePtr()); if (next_block == idx_t(-1)) { has_next_block = false; } else { - next_pointer = FromDiskPointer(MetaBlockPointer(next_block, 0)); - MetaBlockPointer next_block_pointer(next_block, 0); - if (read_pointers) { - read_pointers->push_back(next_block_pointer); - } + next_pointer = MetaBlockPointer(next_block, 0); } if (next_offset < sizeof(block_id_t)) { next_offset = sizeof(block_id_t); diff --git a/src/storage/metadata/metadata_writer.cpp b/src/storage/metadata/metadata_writer.cpp index 69d8ea87e87c..8e7138b7d1a0 100644 --- a/src/storage/metadata/metadata_writer.cpp +++ b/src/storage/metadata/metadata_writer.cpp @@ -32,7 +32,7 @@ MetaBlockPointer MetadataWriter::GetMetaBlockPointer() { void MetadataWriter::SetWrittenPointers(optional_ptr> written_pointers_p) { written_pointers = written_pointers_p; - if (written_pointers && capacity > 0) { + if (written_pointers && capacity > 0 && offset < capacity) { written_pointers->push_back(manager.GetDiskPointer(current_pointer)); } } diff --git a/src/storage/single_file_block_manager.cpp b/src/storage/single_file_block_manager.cpp index 42ebe449164d..dc9c598cc469 100644 --- a/src/storage/single_file_block_manager.cpp +++ b/src/storage/single_file_block_manager.cpp @@ -66,8 +66,8 @@ void DeserializeEncryptionData(ReadStream &stream, data_t *dest, idx_t size) { void GenerateDBIdentifier(uint8_t *db_identifier) { memset(db_identifier, 0, MainHeader::DB_IDENTIFIER_LEN); - duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::GenerateRandomDataStatic(db_identifier, - MainHeader::DB_IDENTIFIER_LEN); + RandomEngine engine; + engine.RandomData(db_identifier, MainHeader::DB_IDENTIFIER_LEN); } void EncryptCanary(MainHeader &main_header, const shared_ptr &encryption_state, @@ -362,6 +362,15 @@ void SingleFileBlockManager::CheckAndAddEncryptionKey(MainHeader &main_header) { void SingleFileBlockManager::CreateNewDatabase(QueryContext context) { auto flags = GetFileFlags(true); + auto encryption_enabled = options.encryption_options.encryption_enabled; + if (encryption_enabled) { + if (!db.GetDatabase().GetEncryptionUtil()->SupportsEncryption() && !options.read_only) { + throw InvalidConfigurationException( + "The database was opened with encryption enabled, but DuckDB currently has a read-only crypto module " + "loaded. Please re-open using READONLY, or ensure httpfs is loaded using `LOAD httpfs`."); + } + } + // open the RDBMS handle auto &fs = FileSystem::Get(db); handle = fs.OpenFile(path, flags); @@ -376,7 +385,6 @@ void SingleFileBlockManager::CreateNewDatabase(QueryContext context) { // Derive the encryption key and add it to the cache. // Not used for plain databases. data_t derived_key[MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH]; - auto encryption_enabled = options.encryption_options.encryption_enabled; // We need the unique database identifier, if the storage version is new enough. // If encryption is enabled, we also use it as the salt. @@ -487,6 +495,15 @@ void SingleFileBlockManager::LoadExistingDatabase(QueryContext context) { if (main_header.IsEncrypted()) { if (options.encryption_options.encryption_enabled) { //! Encryption is set + + //! Check if our encryption module can write, if not, we should throw here + if (!db.GetDatabase().GetEncryptionUtil()->SupportsEncryption() && !options.read_only) { + throw InvalidConfigurationException( + "The database is encrypted, but DuckDB currently has a read-only crypto module loaded. Either " + "re-open the database using `ATTACH '..' (READONLY)`, or ensure httpfs is loaded using `LOAD " + "httpfs`."); + } + //! Check if the given key upon attach is correct // Derive the encryption key and add it to cache CheckAndAddEncryptionKey(main_header); @@ -506,6 +523,19 @@ void SingleFileBlockManager::LoadExistingDatabase(QueryContext context) { path, EncryptionTypes::CipherToString(config_cipher), EncryptionTypes::CipherToString(stored_cipher)); } + + // This avoids the cipher from being downgrades by an attacker FIXME: we likely want to have a propervalidation + // of the cipher used instead of this trick to avoid downgrades + if (stored_cipher != EncryptionTypes::GCM) { + if (config_cipher == EncryptionTypes::INVALID) { + throw CatalogException( + "Cannot open encrypted database \"%s\" without explicitly specifying the " + "encryption cipher for security reasons. Please make sure you understand the security implications " + "and re-attach the database specifying the desired cipher.", + path); + } + } + // this is ugly, but the storage manager does not know the cipher type before db.GetStorageManager().SetCipher(stored_cipher); } diff --git a/src/storage/storage_info.cpp b/src/storage/storage_info.cpp index b4a7422e95e0..5b5f7572c821 100644 --- a/src/storage/storage_info.cpp +++ b/src/storage/storage_info.cpp @@ -83,6 +83,9 @@ static const StorageVersionInfo storage_version_info[] = { {"v1.3.1", 66}, {"v1.3.2", 66}, {"v1.4.0", 67}, + {"v1.4.1", 67}, + {"v1.4.2", 67}, + {"v1.4.3", 67}, {nullptr, 0} }; // END OF STORAGE VERSION INFO @@ -108,6 +111,9 @@ static const SerializationVersionInfo serialization_version_info[] = { {"v1.3.1", 5}, {"v1.3.2", 5}, {"v1.4.0", 6}, + {"v1.4.1", 6}, + {"v1.4.2", 6}, + {"v1.4.3", 6}, {"latest", 6}, {nullptr, 0} }; diff --git a/src/storage/storage_manager.cpp b/src/storage/storage_manager.cpp index ef90d55354eb..f0ce909c553c 100644 --- a/src/storage/storage_manager.cpp +++ b/src/storage/storage_manager.cpp @@ -142,6 +142,14 @@ bool StorageManager::InMemory() const { return path == IN_MEMORY_PATH; } +inline void ClearUserKey(shared_ptr const &encryption_key) { + if (encryption_key && !encryption_key->empty()) { + duckdb_mbedtls::MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(data_ptr_cast(&(*encryption_key)[0]), + encryption_key->size()); + encryption_key->clear(); + } +} + void StorageManager::Initialize(QueryContext context) { bool in_memory = InMemory(); if (in_memory && read_only) { diff --git a/src/storage/table/array_column_data.cpp b/src/storage/table/array_column_data.cpp index 08fc04bd30c7..b28152e42f07 100644 --- a/src/storage/table/array_column_data.cpp +++ b/src/storage/table/array_column_data.cpp @@ -120,7 +120,7 @@ void ArrayColumnData::Select(TransactionData transaction, idx_t vector_index, Co // not consecutive - break break; } - end_idx = next_idx; + end_idx = next_idx + 1; } consecutive_ranges++; } diff --git a/src/storage/table/chunk_info.cpp b/src/storage/table/chunk_info.cpp index 3b7b11d7be84..de6db28a019f 100644 --- a/src/storage/table/chunk_info.cpp +++ b/src/storage/table/chunk_info.cpp @@ -32,7 +32,7 @@ static bool UseVersion(TransactionData transaction, transaction_t id) { return TransactionVersionOperator::UseInsertedVersion(transaction.start_time, transaction.transaction_id, id); } -bool ChunkInfo::Cleanup(transaction_t lowest_transaction, unique_ptr &result) const { +bool ChunkInfo::Cleanup(transaction_t lowest_transaction) const { return false; } @@ -99,7 +99,7 @@ idx_t ChunkConstantInfo::GetCommittedDeletedCount(idx_t max_count) { return delete_id < TRANSACTION_ID_START ? max_count : 0; } -bool ChunkConstantInfo::Cleanup(transaction_t lowest_transaction, unique_ptr &result) const { +bool ChunkConstantInfo::Cleanup(transaction_t lowest_transaction) const { if (delete_id != NOT_DELETED_ID) { // the chunk info is labeled as deleted - we need to keep it around return false; @@ -253,7 +253,7 @@ void ChunkVectorInfo::CommitAppend(transaction_t commit_id, idx_t start, idx_t e } } -bool ChunkVectorInfo::Cleanup(transaction_t lowest_transaction, unique_ptr &result) const { +bool ChunkVectorInfo::Cleanup(transaction_t lowest_transaction) const { if (any_deleted) { // if any rows are deleted we can't clean-up return false; diff --git a/src/storage/table/column_data.cpp b/src/storage/table/column_data.cpp index a842f082ea0e..cf664549b1b7 100644 --- a/src/storage/table/column_data.cpp +++ b/src/storage/table/column_data.cpp @@ -495,10 +495,10 @@ void ColumnData::InitializeAppend(ColumnAppendState &state) { void ColumnData::AppendData(BaseStatistics &append_stats, ColumnAppendState &state, UnifiedVectorFormat &vdata, idx_t append_count) { idx_t offset = 0; - this->count += append_count; while (true) { // append the data from the vector idx_t copied_elements = state.current->Append(state, vdata, offset, append_count); + this->count += copied_elements; append_stats.Merge(state.current->stats.statistics); if (copied_elements == append_count) { // finished copying everything @@ -536,6 +536,11 @@ void ColumnData::RevertAppend(row_t start_row_p) { if (segment->start == start_row) { // we are truncating exactly this segment - erase it entirely data.EraseSegments(l, segment_index); + if (segment_index > 0) { + // if we have a previous segment, we need to update the next pointer + auto previous_segment = data.GetSegmentByIndex(l, UnsafeNumericCast(segment_index - 1)); + previous_segment->next = nullptr; + } } else { // we need to truncate within the segment // remove any segments AFTER this segment: they should be deleted entirely @@ -583,7 +588,6 @@ void ColumnData::Update(TransactionData transaction, DataTable &data_table, idx_ Vector base_vector(type); ColumnScanState state; FetchUpdateData(state, row_ids, base_vector); - UpdateInternal(transaction, data_table, column_index, update_vector, row_ids, update_count, base_vector); } @@ -868,7 +872,8 @@ bool PersistentCollectionData::HasUpdates() const { } PersistentColumnData ColumnData::Serialize() { - PersistentColumnData result(type.InternalType(), GetDataPointers()); + auto result = count ? PersistentColumnData(type.InternalType(), GetDataPointers()) + : PersistentColumnData(type.InternalType()); result.has_updates = HasUpdates(); return result; } diff --git a/src/storage/table/column_data_checkpointer.cpp b/src/storage/table/column_data_checkpointer.cpp index 68c35f8427cb..198a7e249d60 100644 --- a/src/storage/table/column_data_checkpointer.cpp +++ b/src/storage/table/column_data_checkpointer.cpp @@ -109,9 +109,10 @@ CompressionType ForceCompression(StorageManager &storage_manager, CompressionType compression_type) { // One of the force_compression flags has been set // check if this compression method is available - // if (CompressionTypeIsDeprecated(compression_type, storage_manager)) { + // auto compression_availability_result = CompressionTypeIsAvailable(compression_type, storage_manager); + // if (!compression_availability_result.IsAvailable()) { // throw InvalidInputException("The forced compression method (%s) is not available in the current storage - // version", CompressionTypeToString(compression_type)); + // version", CompressionTypeToString(compression_type)); //} bool found = false; @@ -362,7 +363,7 @@ void ColumnDataCheckpointer::WriteToDisk() { } bool ColumnDataCheckpointer::HasChanges(ColumnData &col_data) { - return col_data.HasChanges(); + return col_data.HasAnyChanges(); } void ColumnDataCheckpointer::WritePersistentSegments(ColumnCheckpointState &state) { diff --git a/src/storage/table/column_segment.cpp b/src/storage/table/column_segment.cpp index 347463fbe5e5..f7fcc25b3bf7 100644 --- a/src/storage/table/column_segment.cpp +++ b/src/storage/table/column_segment.cpp @@ -242,7 +242,9 @@ void ColumnSegment::ConvertToPersistent(QueryContext context, optional_ptr(start, count), collection(collection_p), version_info(nullptr), allocation_size(0), - row_id_is_loaded(false), has_changes(false) { + : SegmentBase(start, count), collection(collection_p), version_info(nullptr), deletes_is_loaded(false), + allocation_size(0), row_id_is_loaded(false), has_changes(false) { Verify(); } RowGroup::RowGroup(RowGroupCollection &collection_p, RowGroupPointer pointer) : SegmentBase(pointer.row_start, pointer.tuple_count), collection(collection_p), version_info(nullptr), - allocation_size(0), row_id_is_loaded(false), has_changes(false) { + deletes_is_loaded(false), allocation_size(0), row_id_is_loaded(false), has_changes(false) { // deserialize the columns if (pointer.data_pointers.size() != collection_p.GetTypes().size()) { throw IOException("Row group column count is unaligned with table column count. Corrupt file?"); @@ -45,7 +45,6 @@ RowGroup::RowGroup(RowGroupCollection &collection_p, RowGroupPointer pointer) this->is_loaded[c] = false; } this->deletes_pointers = std::move(pointer.deletes_pointers); - this->deletes_is_loaded = false; this->has_metadata_blocks = pointer.has_metadata_blocks; this->extra_metadata_blocks = std::move(pointer.extra_metadata_blocks); @@ -54,7 +53,7 @@ RowGroup::RowGroup(RowGroupCollection &collection_p, RowGroupPointer pointer) RowGroup::RowGroup(RowGroupCollection &collection_p, PersistentRowGroupData &data) : SegmentBase(data.start, data.count), collection(collection_p), version_info(nullptr), - allocation_size(0), row_id_is_loaded(false), has_changes(false) { + deletes_is_loaded(false), allocation_size(0), row_id_is_loaded(false), has_changes(false) { auto &block_manager = GetBlockManager(); auto &info = GetTableInfo(); auto &types = collection.get().GetTypes(); @@ -974,21 +973,15 @@ bool RowGroup::HasUnloadedDeletes() const { return !deletes_is_loaded; } -vector RowGroup::GetColumnPointers() { - if (has_metadata_blocks) { - // we have the column metadata from the file itself - no need to deserialize metadata to fetch it - // read if from "column_pointers" and "extra_metadata_blocks" - auto result = column_pointers; - for (auto &block_pointer : extra_metadata_blocks) { - result.emplace_back(block_pointer, 0); - } - return result; +vector RowGroup::GetOrComputeExtraMetadataBlocks(bool force_compute) { + if (has_metadata_blocks && !force_compute) { + return extra_metadata_blocks; } - vector result; if (column_pointers.empty()) { // no pointers - return result; + return {}; } + vector read_pointers; // column_pointers stores the beginning of each column // if columns are big - they may span multiple metadata blocks // we need to figure out all blocks that this row group points to @@ -999,13 +992,25 @@ vector RowGroup::GetColumnPointers() { // for all but the last column pointer - we can just follow the linked list until we reach the last column MetadataReader reader(metadata_manager, column_pointers[0]); auto last_pointer = column_pointers[last_idx]; - result = reader.GetRemainingBlocks(last_pointer); + read_pointers = reader.GetRemainingBlocks(last_pointer); } // for the last column we need to deserialize the column - because we don't know where it stops auto &types = GetCollection().GetTypes(); - MetadataReader reader(metadata_manager, column_pointers[last_idx], &result); + MetadataReader reader(metadata_manager, column_pointers[last_idx], &read_pointers); ColumnData::Deserialize(GetBlockManager(), GetTableInfo(), last_idx, start, reader, types[last_idx]); - return result; + + unordered_set result_as_set; + for (auto &ptr : read_pointers) { + result_as_set.emplace(ptr.block_pointer); + } + for (auto &ptr : column_pointers) { + result_as_set.erase(ptr.block_pointer); + } + return {result_as_set.begin(), result_as_set.end()}; +} + +const vector &RowGroup::GetColumnStartPointers() const { + return column_pointers; } RowGroupWriteData RowGroup::WriteToDisk(RowGroupWriter &writer) { @@ -1014,7 +1019,8 @@ RowGroupWriteData RowGroup::WriteToDisk(RowGroupWriter &writer) { // we have existing metadata and the row group has not been changed // re-use previous metadata RowGroupWriteData result; - result.existing_pointers = GetColumnPointers(); + result.reuse_existing_metadata_blocks = true; + result.existing_extra_metadata_blocks = GetOrComputeExtraMetadataBlocks(); return result; } auto &compression_types = writer.GetCompressionTypes(); @@ -1042,14 +1048,22 @@ RowGroupPointer RowGroup::Checkpoint(RowGroupWriteData write_data, RowGroupWrite // construct the row group pointer and write the column meta data to disk row_group_pointer.row_start = start; row_group_pointer.tuple_count = count; - if (!write_data.existing_pointers.empty()) { + if (write_data.reuse_existing_metadata_blocks) { // we are re-using the previous metadata row_group_pointer.data_pointers = column_pointers; - row_group_pointer.has_metadata_blocks = has_metadata_blocks; - row_group_pointer.extra_metadata_blocks = extra_metadata_blocks; - row_group_pointer.deletes_pointers = deletes_pointers; - metadata_manager->ClearModifiedBlocks(write_data.existing_pointers); - metadata_manager->ClearModifiedBlocks(deletes_pointers); + row_group_pointer.has_metadata_blocks = true; + row_group_pointer.extra_metadata_blocks = write_data.existing_extra_metadata_blocks; + row_group_pointer.deletes_pointers = CheckpointDeletes(*metadata_manager); + vector extra_metadata_block_pointers; + extra_metadata_block_pointers.reserve(write_data.existing_extra_metadata_blocks.size()); + for (auto &block_pointer : write_data.existing_extra_metadata_blocks) { + extra_metadata_block_pointers.emplace_back(block_pointer, 0); + } + metadata_manager->ClearModifiedBlocks(column_pointers); + metadata_manager->ClearModifiedBlocks(extra_metadata_block_pointers); + // remember metadata_blocks to avoid loading them on future checkpoints + has_metadata_blocks = true; + extra_metadata_blocks = row_group_pointer.extra_metadata_blocks; return row_group_pointer; } D_ASSERT(write_data.states.size() == columns.size()); @@ -1092,15 +1106,15 @@ RowGroupPointer RowGroup::Checkpoint(RowGroupWriteData write_data, RowGroupWrite } // this metadata block is not stored - add it to the extra metadata blocks row_group_pointer.extra_metadata_blocks.push_back(column_pointer.block_pointer); + metadata_blocks.insert(column_pointer.block_pointer); + } + if (metadata_manager) { + row_group_pointer.deletes_pointers = CheckpointDeletes(*metadata_manager); } // set up the pointers correctly within this row group for future operations column_pointers = row_group_pointer.data_pointers; has_metadata_blocks = true; extra_metadata_blocks = row_group_pointer.extra_metadata_blocks; - - if (metadata_manager) { - row_group_pointer.deletes_pointers = CheckpointDeletes(*metadata_manager); - } Verify(); return row_group_pointer; } @@ -1109,7 +1123,8 @@ bool RowGroup::HasChanges() const { if (has_changes) { return true; } - if (version_info.load()) { + auto version_info_loaded = version_info.load(); + if (version_info_loaded && version_info_loaded->HasUnserializedChanges()) { // we have deletes return true; } diff --git a/src/storage/table/row_group_collection.cpp b/src/storage/table/row_group_collection.cpp index d906afeb78f4..fce6605fc8c6 100644 --- a/src/storage/table/row_group_collection.cpp +++ b/src/storage/table/row_group_collection.cpp @@ -14,6 +14,7 @@ #include "duckdb/storage/table/column_checkpoint_state.hpp" #include "duckdb/storage/table/persistent_table_data.hpp" #include "duckdb/storage/table/row_group_segment_tree.hpp" +#include "duckdb/storage/table/row_version_manager.hpp" #include "duckdb/storage/table/scan_state.hpp" #include "duckdb/storage/table_storage_info.hpp" #include "duckdb/main/settings.hpp" @@ -505,6 +506,11 @@ void RowGroupCollection::RevertAppendInternal(idx_t start_row) { if (segment.start == start_row) { // we are truncating exactly this row group - erase it entirely row_groups->EraseSegments(l, segment_index); + if (segment_index > 0) { + // if we have a previous segment, we need to update the next pointer + auto previous_segment = row_groups->GetSegmentByIndex(l, UnsafeNumericCast(segment_index - 1)); + previous_segment->next = nullptr; + } } else { // we need to truncate within a row group // remove any segments AFTER this segment: they should be deleted entirely @@ -665,14 +671,16 @@ void RowGroupCollection::Update(TransactionData transaction, DataTable &data_tab void RowGroupCollection::RemoveFromIndexes(TableIndexList &indexes, Vector &row_identifiers, idx_t count) { auto row_ids = FlatVector::GetData(row_identifiers); - // Collect all indexed columns. + // Collect all Indexed columns on the table. unordered_set indexed_column_id_set; indexes.Scan([&](Index &index) { - D_ASSERT(index.IsBound()); auto &set = index.GetColumnIdSet(); indexed_column_id_set.insert(set.begin(), set.end()); return false; }); + + // If we are in WAL replay, delete data will be buffered, and so we sort the column_ids + // since the sorted form will be the mapping used to get back physical IDs from the buffered index chunk. vector column_ids; for (auto &col : indexed_column_id_set) { column_ids.emplace_back(col); @@ -686,10 +694,10 @@ void RowGroupCollection::RemoveFromIndexes(TableIndexList &indexes, Vector &row_ // Initialize the fetch state. Only use indexed columns. TableScanState state; - state.Initialize(std::move(column_ids)); + auto column_ids_copy = column_ids; + state.Initialize(std::move(column_ids_copy)); state.table_state.max_row = row_start + total_rows; - // Used for scanning data. Only contains the indexed columns. DataChunk fetch_chunk; fetch_chunk.Initialize(GetAllocator(), column_types); @@ -749,17 +757,24 @@ void RowGroupCollection::RemoveFromIndexes(TableIndexList &indexes, Vector &row_ result_chunk.SetCardinality(fetch_chunk); // Slice the vector with all rows that are present in this vector. - // Then, erase all values from the indexes. + // If the index is bound, delete the data. If unbound, buffer into unbound_index. result_chunk.Slice(sel, sel_count); indexes.Scan([&](Index &index) { if (index.IsBound()) { index.Cast().Delete(result_chunk, row_identifiers); return false; } - throw MissingExtensionException( - "Cannot delete from index '%s', unknown index type '%s'. You need to load the " - "extension that provides this index type before table '%s' can be modified.", - index.GetIndexName(), index.GetIndexType(), info->GetTableName()); + // Buffering takes only the indexed columns in ordering of the column_ids mapping. + DataChunk index_column_chunk; + index_column_chunk.InitializeEmpty(column_types); + for (idx_t i = 0; i < column_types.size(); i++) { + auto col_id = column_ids[i].GetPrimaryIndex(); + index_column_chunk.data[i].Reference(result_chunk.data[col_id]); + } + index_column_chunk.SetCardinality(result_chunk.size()); + auto &unbound_index = index.Cast(); + unbound_index.BufferChunk(index_column_chunk, row_identifiers, column_ids, BufferedIndexReplay::DEL_ENTRY); + return false; }); } } @@ -1136,7 +1151,7 @@ void RowGroupCollection::Checkpoint(TableDataWriter &writer, TableStatistics &gl break; } auto &write_state = checkpoint_state.write_data[segment_idx]; - if (write_state.existing_pointers.empty()) { + if (!write_state.reuse_existing_metadata_blocks) { table_has_changes = true; break; } @@ -1150,8 +1165,15 @@ void RowGroupCollection::Checkpoint(TableDataWriter &writer, TableStatistics &gl auto &entry = segments[segment_idx]; auto &row_group = *entry.node; auto &write_state = checkpoint_state.write_data[segment_idx]; - metadata_manager.ClearModifiedBlocks(write_state.existing_pointers); - metadata_manager.ClearModifiedBlocks(row_group.GetDeletesPointers()); + metadata_manager.ClearModifiedBlocks(row_group.GetColumnStartPointers()); + D_ASSERT(write_state.reuse_existing_metadata_blocks); + vector extra_metadata_block_pointers; + extra_metadata_block_pointers.reserve(write_state.existing_extra_metadata_blocks.size()); + for (auto &block_pointer : write_state.existing_extra_metadata_blocks) { + extra_metadata_block_pointers.emplace_back(block_pointer, 0); + } + metadata_manager.ClearModifiedBlocks(extra_metadata_block_pointers); + row_group.CheckpointDeletes(metadata_manager); row_groups->AppendSegment(l, std::move(entry.node)); } writer.WriteUnchangedTable(metadata_pointer, total_rows.load()); @@ -1178,11 +1200,132 @@ void RowGroupCollection::Checkpoint(TableDataWriter &writer, TableStatistics &gl if (!row_group_writer) { throw InternalException("Missing row group writer for index %llu", segment_idx); } + bool metadata_reuse = checkpoint_state.write_data[segment_idx].reuse_existing_metadata_blocks; auto pointer = row_group.Checkpoint(std::move(checkpoint_state.write_data[segment_idx]), *row_group_writer, global_stats); + + auto debug_verify_blocks = DBConfig::GetSetting(GetAttached().GetDatabase()) && + dynamic_cast(&checkpoint_state.writer) != nullptr; + RowGroupPointer pointer_copy; + if (debug_verify_blocks) { + pointer_copy = pointer; + } writer.AddRowGroup(std::move(pointer), std::move(row_group_writer)); row_groups->AppendSegment(l, std::move(entry.node)); new_total_rows += row_group.count; + + if (debug_verify_blocks) { + if (!pointer_copy.has_metadata_blocks) { + throw InternalException("Checkpointing should always remember metadata blocks"); + } + if (metadata_reuse && pointer_copy.data_pointers != row_group.GetColumnStartPointers()) { + throw InternalException("Colum start pointers changed during metadata reuse"); + } + + // Capture blocks that have been written + vector all_written_blocks = pointer_copy.data_pointers; + vector all_metadata_blocks; + for (auto &block : pointer_copy.extra_metadata_blocks) { + all_written_blocks.emplace_back(block, 0); + all_metadata_blocks.emplace_back(block, 0); + } + + // Verify that we can load the metadata correctly again + vector all_quick_read_blocks; + for (auto &ptr : row_group.GetColumnStartPointers()) { + all_quick_read_blocks.emplace_back(ptr); + if (metadata_reuse && !block_manager.GetMetadataManager().BlockHasBeenCleared(ptr)) { + throw InternalException("Found column start block that was not cleared"); + } + } + auto extra_metadata_blocks = row_group.GetOrComputeExtraMetadataBlocks(/* force_compute: */ true); + for (auto &ptr : extra_metadata_blocks) { + auto block_pointer = MetaBlockPointer(ptr, 0); + all_quick_read_blocks.emplace_back(block_pointer); + if (metadata_reuse && !block_manager.GetMetadataManager().BlockHasBeenCleared(block_pointer)) { + throw InternalException("Found extra metadata block that was not cleared"); + } + } + + // Deserialize all columns to check if the quick read via GetOrComputeExtraMetadataBlocks was correct + vector all_full_read_blocks; + auto column_start_pointers = row_group.GetColumnStartPointers(); + auto &types = row_group.GetCollection().GetTypes(); + auto &metadata_manager = row_group.GetCollection().GetMetadataManager(); + for (idx_t i = 0; i < column_start_pointers.size(); i++) { + MetadataReader reader(metadata_manager, column_start_pointers[i], &all_full_read_blocks); + ColumnData::Deserialize(GetBlockManager(), GetTableInfo(), i, row_group.start, reader, types[i]); + } + + // Derive sets of blocks to compare + set all_written_block_ids; + for (auto &ptr : all_written_blocks) { + all_written_block_ids.insert(ptr.block_pointer); + } + set all_quick_read_block_ids; + for (auto &ptr : all_quick_read_blocks) { + all_quick_read_block_ids.insert(ptr.block_pointer); + } + set all_full_read_block_ids; + for (auto &ptr : all_full_read_blocks) { + all_full_read_block_ids.insert(ptr.block_pointer); + } + if (all_written_block_ids != all_quick_read_block_ids || + all_quick_read_block_ids != all_full_read_block_ids) { + std::stringstream oss; + oss << "Written: "; + for (auto &block : all_written_blocks) { + oss << block << ", "; + } + oss << "\n"; + oss << "Quick read: "; + for (auto &block : all_quick_read_blocks) { + oss << block << ", "; + } + oss << "\n"; + oss << "Full read: "; + for (auto &block : all_full_read_blocks) { + oss << block << ", "; + } + oss << "\n"; + + throw InternalException("Reloading blocks just written does not yield same blocks: " + oss.str()); + } + + vector read_deletes_pointers; + if (!pointer_copy.deletes_pointers.empty()) { + auto root_delete = pointer_copy.deletes_pointers[0]; + auto vm = RowVersionManager::Deserialize(root_delete, GetBlockManager().GetMetadataManager(), + row_group.start); + read_deletes_pointers = vm->GetStoragePointers(); + } + + set all_written_deletes_block_ids; + for (auto &ptr : pointer_copy.deletes_pointers) { + all_written_deletes_block_ids.insert(ptr.block_pointer); + } + set all_read_deletes_block_ids; + for (auto &ptr : read_deletes_pointers) { + all_read_deletes_block_ids.insert(ptr.block_pointer); + } + + if (all_written_deletes_block_ids != all_read_deletes_block_ids) { + std::stringstream oss; + oss << "Written: "; + for (auto &block : all_written_deletes_block_ids) { + oss << block << ", "; + } + oss << "\n"; + oss << "Read: "; + for (auto &block : all_read_deletes_block_ids) { + oss << block << ", "; + } + oss << "\n"; + + throw InternalException("Reloading deletes blocks just written does not yield same blocks: " + + oss.str()); + } + } } total_rows = new_total_rows; l.Release(); diff --git a/src/storage/table/row_version_manager.cpp b/src/storage/table/row_version_manager.cpp index df4e463da2d1..7df22474f6e1 100644 --- a/src/storage/table/row_version_manager.cpp +++ b/src/storage/table/row_version_manager.cpp @@ -7,7 +7,7 @@ namespace duckdb { -RowVersionManager::RowVersionManager(idx_t start) noexcept : start(start), has_changes(false) { +RowVersionManager::RowVersionManager(idx_t start) noexcept : start(start), has_unserialized_changes(false) { } void RowVersionManager::SetStart(idx_t new_start) { @@ -88,7 +88,7 @@ void RowVersionManager::FillVectorInfo(idx_t vector_idx) { void RowVersionManager::AppendVersionInfo(TransactionData transaction, idx_t count, idx_t row_group_start, idx_t row_group_end) { lock_guard lock(version_lock); - has_changes = true; + has_unserialized_changes = true; idx_t start_vector_idx = row_group_start / STANDARD_VECTOR_SIZE; idx_t end_vector_idx = (row_group_end - 1) / STANDARD_VECTOR_SIZE; @@ -141,6 +141,7 @@ void RowVersionManager::CommitAppend(transaction_t commit_id, idx_t row_group_st idx_t vend = vector_idx == end_vector_idx ? row_group_end - end_vector_idx * STANDARD_VECTOR_SIZE : STANDARD_VECTOR_SIZE; auto &info = *vector_info[vector_idx]; + D_ASSERT(has_unserialized_changes); info.CommitAppend(commit_id, vstart, vend); } } @@ -167,10 +168,12 @@ void RowVersionManager::CleanupAppend(transaction_t lowest_active_transaction, i } auto &info = *vector_info[vector_idx]; // if we wrote the entire chunk info try to compress it - unique_ptr new_info; - auto cleanup = info.Cleanup(lowest_active_transaction, new_info); + auto cleanup = info.Cleanup(lowest_active_transaction); if (cleanup) { - vector_info[vector_idx] = std::move(new_info); + if (info.HasDeletes()) { + has_unserialized_changes = true; + } + vector_info[vector_idx].reset(); } } } @@ -179,6 +182,7 @@ void RowVersionManager::RevertAppend(idx_t start_row) { lock_guard lock(version_lock); idx_t start_vector_idx = (start_row + (STANDARD_VECTOR_SIZE - 1)) / STANDARD_VECTOR_SIZE; for (idx_t vector_idx = start_vector_idx; vector_idx < vector_info.size(); vector_idx++) { + D_ASSERT(has_unserialized_changes); vector_info[vector_idx].reset(); } } @@ -205,19 +209,19 @@ ChunkVectorInfo &RowVersionManager::GetVectorInfo(idx_t vector_idx) { idx_t RowVersionManager::DeleteRows(idx_t vector_idx, transaction_t transaction_id, row_t rows[], idx_t count) { lock_guard lock(version_lock); - has_changes = true; + has_unserialized_changes = true; return GetVectorInfo(vector_idx).Delete(transaction_id, rows, count); } void RowVersionManager::CommitDelete(idx_t vector_idx, transaction_t commit_id, const DeleteInfo &info) { lock_guard lock(version_lock); - has_changes = true; + has_unserialized_changes = true; GetVectorInfo(vector_idx).CommitDelete(commit_id, info); } vector RowVersionManager::Checkpoint(MetadataManager &manager) { - if (!has_changes && !storage_pointers.empty()) { - // the row version manager already exists on disk and no changes were made + lock_guard lock(version_lock); + if (!has_unserialized_changes) { // we can write the current pointer as-is // ensure the blocks we are pointing to are not marked as free manager.ClearModifiedBlocks(storage_pointers); @@ -236,24 +240,23 @@ vector RowVersionManager::Checkpoint(MetadataManager &manager) } to_serialize.emplace_back(vector_idx, *chunk_info); } - if (to_serialize.empty()) { - return vector(); - } storage_pointers.clear(); - MetadataWriter writer(manager, &storage_pointers); - // now serialize the actual version information - writer.Write(to_serialize.size()); - for (auto &entry : to_serialize) { - auto &vector_idx = entry.first; - auto &chunk_info = entry.second.get(); - writer.Write(vector_idx); - chunk_info.Write(writer); + if (!to_serialize.empty()) { + MetadataWriter writer(manager, &storage_pointers); + // now serialize the actual version information + writer.Write(to_serialize.size()); + for (auto &entry : to_serialize) { + auto &vector_idx = entry.first; + auto &chunk_info = entry.second.get(); + writer.Write(vector_idx); + chunk_info.Write(writer); + } + writer.Flush(); } - writer.Flush(); - has_changes = false; + has_unserialized_changes = false; return storage_pointers; } @@ -277,8 +280,19 @@ shared_ptr RowVersionManager::Deserialize(MetaBlockPointer de version_info->FillVectorInfo(vector_index); version_info->vector_info[vector_index] = ChunkInfo::Read(source); } - version_info->has_changes = false; + version_info->has_unserialized_changes = false; return version_info; } +bool RowVersionManager::HasUnserializedChanges() { + lock_guard lock(version_lock); + return has_unserialized_changes; +} + +vector RowVersionManager::GetStoragePointers() { + lock_guard lock(version_lock); + D_ASSERT(!has_unserialized_changes); + return storage_pointers; +} + } // namespace duckdb diff --git a/src/storage/table/standard_column_data.cpp b/src/storage/table/standard_column_data.cpp index 6c6cdf3a3787..42fe46cf2d33 100644 --- a/src/storage/table/standard_column_data.cpp +++ b/src/storage/table/standard_column_data.cpp @@ -170,12 +170,12 @@ void StandardColumnData::UpdateColumn(TransactionData transaction, DataTable &da const vector &column_path, Vector &update_vector, row_t *row_ids, idx_t update_count, idx_t depth) { if (depth >= column_path.size()) { - // update this column + // Update the column. ColumnData::Update(transaction, data_table, column_path[0], update_vector, row_ids, update_count); - } else { - // update the child column (i.e. the validity column) - validity.UpdateColumn(transaction, data_table, column_path, update_vector, row_ids, update_count, depth + 1); + return; } + // Update the child column, which is the validity column. + validity.UpdateWithBase(transaction, data_table, column_path[0], update_vector, row_ids, update_count, *this); } unique_ptr StandardColumnData::GetUpdateStatistics() { @@ -200,8 +200,8 @@ void StandardColumnData::FetchRow(TransactionData transaction, ColumnFetchState auto child_state = make_uniq(); state.child_states.push_back(std::move(child_state)); } - validity.FetchRow(transaction, *state.child_states[0], row_id, result, result_idx); ColumnData::FetchRow(transaction, state, row_id, result, result_idx); + validity.FetchRow(transaction, *state.child_states[0], row_id, result, result_idx); } void StandardColumnData::CommitDropColumn() { diff --git a/src/storage/table/validity_column_data.cpp b/src/storage/table/validity_column_data.cpp index fc8a9e1ea5b1..6f6e3257648e 100644 --- a/src/storage/table/validity_column_data.cpp +++ b/src/storage/table/validity_column_data.cpp @@ -1,6 +1,7 @@ #include "duckdb/storage/table/validity_column_data.hpp" #include "duckdb/storage/table/scan_state.hpp" #include "duckdb/storage/table/update_segment.hpp" +#include "duckdb/storage/table/standard_column_data.hpp" namespace duckdb { @@ -13,6 +14,22 @@ FilterPropagateResult ValidityColumnData::CheckZonemap(ColumnScanState &state, T return FilterPropagateResult::NO_PRUNING_POSSIBLE; } +void ValidityColumnData::UpdateWithBase(TransactionData transaction, DataTable &data_table, idx_t column_index, + Vector &update_vector, row_t *row_ids, idx_t update_count, ColumnData &base) { + Vector base_vector(base.type); + ColumnScanState validity_scan_state; + FetchUpdateData(validity_scan_state, row_ids, base_vector); + + if (validity_scan_state.current->GetCompressionFunction().type == CompressionType::COMPRESSION_EMPTY) { + // The validity is actually covered by the data, so we read it to get the validity for UpdateInternal. + ColumnScanState data_scan_state; + auto fetch_count = base.Fetch(data_scan_state, row_ids[0], base_vector); + base_vector.Flatten(fetch_count); + } + + UpdateInternal(transaction, data_table, column_index, update_vector, row_ids, update_count, base_vector); +} + void ValidityColumnData::AppendData(BaseStatistics &stats, ColumnAppendState &state, UnifiedVectorFormat &vdata, idx_t count) { lock_guard l(stats_lock); diff --git a/src/storage/table_index_list.cpp b/src/storage/table_index_list.cpp index ade84cdc8691..77f1f6581520 100644 --- a/src/storage/table_index_list.cpp +++ b/src/storage/table_index_list.cpp @@ -147,11 +147,17 @@ void TableIndexList::Bind(ClientContext &context, DataTableInfo &table_info, con // Create an IndexBinder to bind the index IndexBinder idx_binder(*binder, context); - // Apply any outstanding appends and replace the unbound index with a bound index. + // Apply any outstanding buffered replays and replace the unbound index with a bound index. auto &unbound_index = index_entry->index->Cast(); auto bound_idx = idx_binder.BindIndex(unbound_index); - if (unbound_index.HasBufferedAppends()) { - bound_idx->ApplyBufferedAppends(column_types, unbound_index.GetBufferedAppends(), + if (unbound_index.HasBufferedReplays()) { + // For replaying buffered index operations, we only want the physical column types (skip over + // generated column types). + vector physical_column_types; + for (auto &col : table.GetColumns().Physical()) { + physical_column_types.push_back(col.Type()); + } + bound_idx->ApplyBufferedReplays(physical_column_types, unbound_index.GetBufferedReplays(), unbound_index.GetMappedColumnIds()); } @@ -255,11 +261,18 @@ void TableIndexList::InitializeIndexChunk(DataChunk &index_chunk, const vector index_types; + // Store the mapped_column_ids and index_types in sorted canonical form, needed for + // buffering WAL index operations during replay (see notes in unbound_index.hpp). + // First sort mapped_column_ids, then populate index_types according to the sorted order. for (auto &col : indexed_columns) { - index_types.push_back(table_types[col]); mapped_column_ids.emplace_back(col); } + std::sort(mapped_column_ids.begin(), mapped_column_ids.end()); + + vector index_types; + for (auto &col : mapped_column_ids) { + index_types.push_back(table_types[col.GetPrimaryIndex()]); + } index_chunk.InitializeEmpty(index_types); } diff --git a/src/storage/version_map.json b/src/storage/version_map.json index 74a266dbeee0..ce61dc0d3ad0 100644 --- a/src/storage/version_map.json +++ b/src/storage/version_map.json @@ -55,7 +55,10 @@ "v1.3.0": 66, "v1.3.1": 66, "v1.3.2": 66, - "v1.4.0": 67 + "v1.4.0": 67, + "v1.4.1": 67, + "v1.4.2": 67, + "v1.4.3": 67 }, "default": 64 }, @@ -77,6 +80,9 @@ "v1.3.1": 5, "v1.3.2": 5, "v1.4.0": 6, + "v1.4.1": 6, + "v1.4.2": 6, + "v1.4.3": 6, "latest": 6 }, "default": 1 diff --git a/src/transaction/cleanup_state.cpp b/src/transaction/cleanup_state.cpp index f9a17f2651bf..6a96490323bc 100644 --- a/src/transaction/cleanup_state.cpp +++ b/src/transaction/cleanup_state.cpp @@ -95,10 +95,15 @@ void CleanupState::Flush() { // set up the row identifiers vector Vector row_identifiers(LogicalType::ROW_TYPE, data_ptr_cast(row_numbers)); - // delete the tuples from all the indexes + // delete the tuples from all the indexes. + // If there is any issue with removal, a FatalException must be thrown since there may be a corruption of + // data, hence the transaction cannot be guaranteed. try { current_table->RemoveFromIndexes(row_identifiers, count); - } catch (...) { // NOLINT: ignore errors here + } catch (std::exception &ex) { + throw FatalException(ErrorData(ex).Message()); + } catch (...) { + throw FatalException("unknown failure in CleanupState::Flush"); } count = 0; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 3be41b3cc9cb..2e7087410de1 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -34,7 +34,9 @@ if(NOT WIN32 AND NOT SUN) if(${BUILD_TPCE}) add_subdirectory(tpce) endif() - add_subdirectory(persistence) + if(${ENABLE_UNITTEST_CPP_TESTS}) + add_subdirectory(persistence) + endif() endif() set(UNITTEST_ROOT_DIRECTORY diff --git a/test/README.md b/test/README.md index 8819cf4b3c0b..7d7c265ce7cb 100644 --- a/test/README.md +++ b/test/README.md @@ -1,12 +1,27 @@ This is the SQL and C++ DuckDB Unittests -## Environment Variable Flags +## Test Contract -Some of the tests require a special environment flag to be set so they can properly run. +Within SQL tests, the test runner (unittest) guarantees that these environment variables will be set. Many can be overridden when needed. + +| Env Var | Default Value | Overrides | Notes | +|------------------|----------------------------|-------------------------------------------------------------|---------------------------------------------------| +| `TEST_NAME` | e.g. `test/path/filename.test` | N/A | | +| `TEST_NAME_NO_SLASH` | e.g. `test_path_filename.test` | N/A | is always `TEST_NAME s@/@_@g`| +| `TEST_UUID` | random UUID (as string) | N/A | defined per invocation | +| `WORKING_DIR` | e.g., `/Users/me/src/duckdb` | `unittest --test-dir ` | default: wherever duckdb was sourced and built
AKA: `__WORKING_DIRECTORY__` | +| `BUILD_DIR` | `{WORKING_DIR}/build/release` | N/A | read-only; derived from unittest path
AKA: `__BUILD_DIRECTORY__` | +| `DATA_DIR` | `{WORKING_DIR}/data` | `DATA_DIR=mydata unittest ...`
also test configs, `test_env` specification | Should provide a copy of `duckdb/data/**`, use to test AWS, Azure, other VFS reads | +| `TEMP_BASE` | `{WORKING_DIR}/duckdb_unittest_tempdir` | via `unittest --test-temp-dir` (retains dir after test) | use to test VFS writes
AKA: `__TEST_DIR__` | +| `TEMP_DIR` | `{TEMP_BASE}/` | via `unittest --test-temp-dir` (retains dir after test) | use to test VFS writes
AKA: `__TEST_DIR__` | + +Some tests require a particular environment variables to be set so they can properly run, usually this can be seen via `require-env VAR` in the test. ### Parallel CSV Reader + The tests located in `test/parallel_csv/test_parallel_csv.cpp` run the parallel CSV reader over multiple configurations of threads and buffer sizes. ### ADBC + The ADBC tests are in `test/api/adbc/test_adbc.cpp`. To run them the DuckDB library path must be provided in the `DUCKDB_INSTALL_LIB` variable. diff --git a/test/api/capi/test_capi_appender.cpp b/test/api/capi/test_capi_appender.cpp index e17084b720a3..2de1853526a9 100644 --- a/test/api/capi/test_capi_appender.cpp +++ b/test/api/capi/test_capi_appender.cpp @@ -1,5 +1,9 @@ #include "capi_tester.hpp" #include "duckdb.h" +#include "duckdb/function/table/system_functions.hpp" + +#include +#include using namespace duckdb; using namespace std; @@ -1252,3 +1256,112 @@ TEST_CASE("Test upserting using the C API", "[capi]") { tester.Cleanup(); } + +bool HasError(const duckdb_state state, atomic &success, const string &message) { + if (state == DuckDBError) { + success = false; + Printer::Print(message); + return true; + } + return false; +} + +TEST_CASE("Test the appender with parallel appends and multiple data types in the C API", "[capi]") { + auto test_types = TestAllTypesFun::GetTestTypes(false, false); + + string query = "CREATE TABLE IF NOT EXISTS test ("; + for (auto &type : test_types) { + if (type.name == "union") { + type.name = "union_col"; + } + query += type.name + " " + type.type.ToString() + ", "; + } + query += ")"; + + char *err_msg; + + // Open DB. + auto test_dir = TestDirectoryPath(); + auto path = test_dir + "/test.db"; + duckdb_database db; + REQUIRE(duckdb_open_ext(path.c_str(), &db, nullptr, &err_msg) == DuckDBSuccess); + + // Connect. + duckdb_connection conn; + REQUIRE(duckdb_connect(db, &conn) == DuckDBSuccess); + + // Create the table. + duckdb_result ret; + REQUIRE(duckdb_query(conn, query.c_str(), &ret) == DuckDBSuccess); + duckdb_destroy_result(&ret); + + atomic success {true}; + duckdb::vector threads; + + idx_t worker_count = 5; + for (idx_t worker_id = 0; worker_id < worker_count; worker_id++) { + threads.emplace_back([db, &success, test_types]() { + // Create thread-local connection. + duckdb_connection t_conn; + if (HasError(duckdb_connect(db, &t_conn), success, "failed to create connection")) { + return; + } + + // Create appender. + duckdb_appender t_app; + if (HasError(duckdb_appender_create(t_conn, "main", "test", &t_app), success, + "failed to create appender")) { + return; + } + + // Start a transaction. + duckdb_result t_ret; + if (HasError(duckdb_query(t_conn, "BEGIN TRANSACTION", &t_ret), success, "failed to begin transaction")) { + return; + } + duckdb_destroy_result(&t_ret); + + for (int j = 0; j < STANDARD_VECTOR_SIZE + 10; j++) { + if (!success) { + return; + } + + // Begin row. + if (HasError(duckdb_appender_begin_row(t_app), success, "failed to begin append to row")) { + return; + } + + // Append all values. + for (const auto &type : test_types) { + auto value = type.min_value; + duckdb_value val_ptr = reinterpret_cast(&value); + if (HasError(duckdb_append_value(t_app, val_ptr), success, "failed to append value to row")) { + return; + }; + } + + // End row. + if (HasError(duckdb_appender_end_row(t_app), success, "failed to append end row")) { + return; + } + } + + // COMMIT and clean up. + if (HasError(duckdb_query(t_conn, "COMMIT", &t_ret), success, "failed to commit transaction")) { + return; + } + duckdb_destroy_result(&t_ret); + if (HasError(duckdb_appender_destroy(&t_app), success, "failed to append destroy result")) { + return; + } + duckdb_disconnect(&t_conn); + }); + } + + for (auto &t : threads) { + t.join(); + } + + duckdb_disconnect(&conn); + duckdb_close(&db); +} diff --git a/test/api/test_instance_cache.cpp b/test/api/test_instance_cache.cpp index 7abe20e55406..93d52b4ddda5 100644 --- a/test/api/test_instance_cache.cpp +++ b/test/api/test_instance_cache.cpp @@ -205,3 +205,27 @@ TEST_CASE("Test attaching the same database path from different databases in rea REQUIRE_FAIL(con1.Query(read_only_attach)); } } + +TEST_CASE("Test automatic DB instance caching", "[api][.]") { + DBInstanceCache instance_cache; + DBConfig config; + + SECTION("Unnamed in-memory connections are not shared") { + auto db1 = instance_cache.GetOrCreateInstance(":memory:", config); + auto db2 = instance_cache.GetOrCreateInstance(":memory:", config); + + Connection con(*db1); + Connection con2(*db2); + REQUIRE_NO_FAIL(con.Query("CREATE TABLE t(i INT)")); + REQUIRE_NO_FAIL(con2.Query("CREATE TABLE t(i INT)")); + } + SECTION("Named in-memory connections are shared") { + auto db1 = instance_cache.GetOrCreateInstance(":memory:abc", config); + auto db2 = instance_cache.GetOrCreateInstance(":memory:abc", config); + + Connection con(*db1); + Connection con2(*db2); + REQUIRE_NO_FAIL(con.Query("CREATE TABLE t(i INT)")); + REQUIRE_NO_FAIL(con2.Query("SELECT * FROM t")); + } +} diff --git a/test/api/test_relation_api.cpp b/test/api/test_relation_api.cpp index d04357fc40d1..b19f2f007e95 100644 --- a/test/api/test_relation_api.cpp +++ b/test/api/test_relation_api.cpp @@ -506,6 +506,16 @@ TEST_CASE("Test table creations using the relation API", "[relation_api]") { result = con.Query("SELECT * FROM new_values ORDER BY k"); REQUIRE(CHECK_COLUMN(result, 0, {4, 5})); REQUIRE(CHECK_COLUMN(result, 1, {"hello", "hello"})); + + // create a table in an attached db and insert values + auto test_dir = TestDirectoryPath(); + string db_path = test_dir + "/my_db.db"; + REQUIRE_NO_FAIL(con.Query("ATTACH '" + db_path + "' AS my_db;")); + REQUIRE_NOTHROW(values = con.Values({{1, 10}, {2, 5}, {3, 4}}, {"i", "j"})); + REQUIRE_NOTHROW(values->Create(std::string("my_db"), std::string(), std::string("integers"))); + result = con.Query("SELECT * FROM my_db.integers ORDER BY i"); + REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3})); + REQUIRE(CHECK_COLUMN(result, 1, {10, 5, 4})); } TEST_CASE("Test table creations with on_create_conflict using the relation API", "[relation_api]") { diff --git a/test/configs/block_verification.json b/test/configs/block_verification.json new file mode 100644 index 000000000000..dc2dccaaf882 --- /dev/null +++ b/test/configs/block_verification.json @@ -0,0 +1,7 @@ +{ + "description": "Run with block verification on persistent databases as storage.", + "initial_db": "{TEST_DIR}/{BASE_TEST_NAME}__test__config__block_verification.db", + "on_init": "SET debug_verify_blocks = true;", + "skip_compiled": "true", + "inherit_skip_tests": "test/configs/force_storage.json" +} diff --git a/test/configs/enable_verification_for_debug.json b/test/configs/enable_verification_for_debug.json index 2f808b0a1acf..be80681bc014 100644 --- a/test/configs/enable_verification_for_debug.json +++ b/test/configs/enable_verification_for_debug.json @@ -2,161 +2,8 @@ "description": "Run with verification enabled (suitable for debug builds).", "on_init": "PRAGMA enable_verification;", "skip_compiled": "true", + "inherit_skip_tests": "test/configs/enable_verification.json", "skip_tests": [ - { - "reason": "Contains random() or gen_random_uuid().", - "paths": [ - "test/fuzzer/pedro/nan_as_seed.test", - "test/optimizer/pushdown/issue_16104.test", - "test/sql/function/numeric/test_random.test", - "test/sql/function/uuid/test_uuid.test", - "test/sql/window/test_volatile_independence.test" - ] - }, - { - "reason": "Contains SEQUENCE.", - "paths": [ - "test/fuzzer/pedro/having_query_wrong_result.test", - "test/fuzzer/pedro/temp_sequence_durability.test", - "test/issues/fuzz/sequence_overflow.test", - "test/sql/aggregate/aggregates/test_avg.test", - "test/sql/aggregate/aggregates/test_bit_and.test", - "test/sql/aggregate/aggregates/test_bit_or.test", - "test/sql/aggregate/aggregates/test_bit_xor.test", - "test/sql/attach/attach_sequence.test", - "test/sql/attach/reattach_schema.test", - "test/sql/catalog/comment_on_wal.test", - "test/sql/catalog/dependencies/test_alter_dependency_ownership.test", - "test/sql/catalog/function/test_sequence_macro.test", - "test/sql/catalog/sequence/sequence_cycle.test", - "test/sql/catalog/sequence/sequence_offset_increment.test", - "test/sql/catalog/sequence/sequence_overflow.test", - "test/sql/catalog/sequence/test_duckdb_sequences.test", - "test/sql/catalog/sequence/test_sequence.test", - "test/sql/catalog/test_temporary.test", - "test/sql/copy_database/copy_database_different_types.test", - "test/sql/export/export_database.test", - "test/sql/function/list/aggregates/avg.test", - "test/sql/function/list/aggregates/bit_and.test", - "test/sql/function/list/aggregates/bit_or.test", - "test/sql/function/list/aggregates/bit_xor.test", - "test/sql/storage/catalog/test_sequence_uncommitted_transaction.test", - "test/sql/storage/catalog/test_store_default_sequence.test", - "test/sql/storage/catalog/test_store_sequences.test", - "test/sql/storage/wal/wal_sequence_uncommitted_transaction.test", - "test/sql/storage/wal/wal_store_default_sequence.test", - "test/sql/storage/wal/wal_store_sequences.test" - ] - }, - { - "reason": "Contains SAMPLE (non-deterministic).", - "paths": [ - "test/fuzzer/pedro/sample_limit_overflow.test", - "test/sql/function/numeric/set_seed_for_sample.test" - ] - }, - { - "reason": "Contains current time function.", - "paths": [ - "test/sql/function/timestamp/current_time.test", - "test/sql/parser/test_value_functions.test", - "test/sql/timezone/test_icu_timezone.test" - ] - }, - { - "reason": "Contains FIRST (non-deterministic).", - "paths": [ - "test/sql/parallelism/intraquery/test_parallel_nested_aggregates.test" - ] - }, - { - "reason": "Non-deterministic query (subqueries return multiple rows).", - "paths": [ - "test/sql/subquery/scalar/test_issue_6136.test" - ] - }, - { - "reason": "Running verification creates extra output.", - "paths": [ - "test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test", - "test/sql/copy/csv/rejects/csv_rejects_auto.test", - "test/sql/copy/csv/rejects/csv_rejects_flush_cast.test", - "test/sql/copy/csv/rejects/csv_rejects_flush_message.test", - "test/sql/copy/csv/rejects/csv_rejects_maximum_line.test", - "test/sql/copy/csv/rejects/csv_rejects_read.test", - "test/sql/copy/csv/rejects/csv_rejects_two_tables.test", - "test/sql/copy/csv/rejects/csv_unquoted_rejects.test", - "test/sql/copy/csv/rejects/test_invalid_utf_rejects.test", - "test/sql/copy/csv/rejects/test_mixed.test", - "test/sql/copy/csv/rejects/test_multiple_errors_same_line.test", - "test/sql/copy/csv/test_non_unicode_header.test", - "test/sql/logging/file_system_logging.test", - "test/sql/logging/logging.test", - "test/sql/logging/logging_buffer_size.test", - "test/sql/logging/logging_csv.test", - "test/sql/logging/logging_types.test", - "test/sql/logging/test_logging_function.test", - "test/sql/pragma/test_query_log.test" - ] - }, - { - "reason": "Unoptimized statement differs from original result (cross product, conversion, overflow, statistics).", - "paths": [ - "test/fuzzer/duckfuzz/semi_join_has_correct_left_right_relations.test", - "test/fuzzer/pedro/force_no_cross_product.test", - "test/fuzzer/pedro/strptime_null_argument.test", - "test/fuzzer/sqlsmith/bitstring_agg_overflow.test", - "test/issues/rigger/overflow_filter_pushdown.test", - "test/sql/aggregate/aggregates/test_bitstring_agg.test", - "test/sql/catalog/function/test_table_macro_complex.test", - "test/sql/copy/parquet/parquet_filename_filter.test", - "test/sql/copy/parquet/parquet_hive.test", - "test/sql/copy/parquet/union_by_name_hive_partitioning.test", - "test/sql/optimizer/test_in_rewrite_rule.test", - "test/sql/storage/compression/rle/rle_constant.test" - ] - }, - { - "reason": "Prepared statement differs from original result (statistics, pg_prepared_statements).", - "paths": [ - "test/fuzzer/pedro/vacuum_table_with_generated_column.test", - "test/optimizer/statistics/statistics_numeric.test", - "test/parquet/parquet_stats_function.test", - "test/sql/alter/add_col/test_add_col_stats.test", - "test/sql/function/generic/test_stats.test", - "test/sql/pg_catalog/pg_prepared_statements.test", - "test/sql/storage/distinct_statistics_storage.test", - "test/sql/table_function/duckdb_prepared_statements.test", - "test/sql/types/list/list_stats.test", - "test/sql/types/nested/array/array_statistics.test", - "test/sql/types/struct/struct_stats.test", - "test/sql/vacuum/test_analyze.test" - ] - }, - { - "reason": "Deserialized statement differs from original result.", - "paths": [ - "test/sql/function/list/lambdas/arrow/test_deprecated_lambda.test" - ] - }, - { - "reason": "Round-trip of literals changes type, e.g., DOUBLE -> DECIMAL.", - "paths": [ - "test/sql/types/decimal/large_decimal_constants.test", - "test/sql/types/hugeint/test_hugeint_conversion.test", - "test/sql/types/uhugeint/test_uhugeint_conversion.test" - ] - }, - { - "reason": "Time-out without optimizer.", - "paths": [ - "test/optimizer/join_dependent_filter.test", - "test/optimizer/joins/no_duplicate_elimination_join.test", - "test/optimizer/joins/tpcds_nofail.test", - "test/sql/limit/test_limit0.test", - "test/sql/optimizer/plan/test_filter_pushdown_large.test" - ] - }, { "reason": "Runs too long in debug mode.", "paths": [ @@ -679,52 +526,10 @@ "test/sqlserver/sqlserver_cte.test" ] }, - { - "reason": "FIXME: Unexpected Parser Error", - "paths": [ - "test/issues/general/test_16524.test", - "test/sql/alter/test_alter_if_exists.test", - "test/sql/catalog/comment_on.test", - "test/sql/catalog/comment_on_column.test", - "test/sql/catalog/comment_on_dependencies.test", - "test/sql/catalog/comment_on_extended.test", - "test/sql/catalog/comment_on_pg_description.test", - "test/sql/collate/test_collate_between.test", - "test/sql/index/create_index_options.test", - "test/sql/pragma/test_show_tables.test" - ] - }, - { - "reason": "FIXME: Unexpected catalog duplicate/missing entry error.", - "paths": [ - "test/sql/catalog/function/test_macro_issue_13104.test", - "test/sql/catalog/function/test_macro_relpersistence_conflict.test", - "test/sql/catalog/function/test_recursive_macro.test", - "test/sql/catalog/function/test_recursive_macro_no_dependency.test", - "test/sql/catalog/test_set_schema.test", - "test/sql/catalog/test_set_search_path.test", - "test/sql/function/list/lambdas/arrow/lambdas_and_functions_deprecated.test", - "test/sql/function/list/lambdas/lambdas_and_functions.test" - ] - }, { "reason": "FIXME: Misc. unexpected failures (including internal exception).", "paths": [ - "test/fuzzer/duckfuzz/null_arguments.test", - "test/issues/internal/test_5457.test", - "test/parquet/variant/variant_nanos_tz.test", - "test/parquet/variant/variant_nested_with_nulls.test", - "test/sql/aggregate/aggregates/test_state_export.test", - "test/sql/copy/file_size_bytes.test", - "test/sql/copy/parquet/bloom_filters.test", - "test/sql/copy/parquet/corrupt_stats.test", - "test/sql/copy/parquet/parquet_1618_struct_strings.test", - "test/sql/create/create_table_compression.test", - "test/sql/logging/logging_file_bind_replace.test", - "test/sql/optimizer/test_rowid_pushdown_plan.test", - "test/sql/pg_catalog/system_functions.test", - "test/sql/storage/compression/test_using_compression.test", - "test/sql/error/error_position.test" + "test/sql/copy/file_size_bytes.test" ] } ] diff --git a/test/configs/encryption.json b/test/configs/encryption.json index 2fbc92ce0a9d..47660285cb30 100644 --- a/test/configs/encryption.json +++ b/test/configs/encryption.json @@ -4,6 +4,10 @@ "on_new_connection": "USE __test__config__crypto;", "on_load": "skip", "skip_compiled": "true", + "statically_loaded_extensions": [ + "core_functions", + "httpfs" + ], "skip_tests": [ { "reason": "TODO", @@ -43,6 +47,14 @@ "paths": [ "test/fuzzer/sqlsmith/current_schemas_null.test" ] + }, + { + "reason": "Expects httpfs to not be present", + "paths" : [ + "test/sql/secrets/create_secret_hffs_autoload.test", + "test/sql/secrets/secret_autoloading_errors.test", + "test/sql/partitioning/hive_partitioning_autodetect.test" + ] } ] } diff --git a/test/configs/force_storage_restart.json b/test/configs/force_storage_restart.json index 83e7aa972eba..f70aa3a9588b 100644 --- a/test/configs/force_storage_restart.json +++ b/test/configs/force_storage_restart.json @@ -3,26 +3,11 @@ "initial_db": "{TEST_DIR}/{BASE_TEST_NAME}__test__config__force_storage_restart.db", "force_restart": "true", "skip_compiled": "true", + "inherit_skip_tests": "test/configs/force_storage.json", "skip_tests": [ { "reason": "Contains explicit use of the memory catalog.", "paths": [ - "test/sql/show_select/test_describe_all.test", - "test/sql/catalog/function/attached_macro.test", - "test/sql/catalog/test_temporary.test", - "test/sql/pragma/test_show_tables_temp_views.test", - "test/sql/pg_catalog/system_functions.test", - "test/sql/pg_catalog/sqlalchemy.test", - "test/sql/attach/attach_table_info.test", - "test/sql/attach/attach_defaults.test", - "test/sql/attach/attach_did_you_mean.test", - "test/sql/attach/attach_default_table.test", - "test/sql/attach/attach_show_all_tables.test", - "test/sql/attach/attach_issue7711.test", - "test/sql/attach/attach_issue_7660.test", - "test/sql/attach/show_databases.test", - "test/sql/attach/attach_views.test", - "test/sql/copy_database/copy_table_with_sequence.test", "test/sql/attach/attach_use_rollback.test" ] } diff --git a/test/configs/storage_compatibility.json b/test/configs/storage_compatibility.json new file mode 100644 index 000000000000..81afab90574b --- /dev/null +++ b/test/configs/storage_compatibility.json @@ -0,0 +1,54 @@ +{ + "description": "Storage compatibility test.", + "initial_db": "bwc_storage_test.db", + "settings": [ + {"name": "storage_compatibility_version", "value": "v1.0.0"} + ], + "skip_compiled": "true", + "skip_tests": [ + { + "reason": "Contains explicit use of the memory catalog.", + "paths": [ + "test/sql/show_select/test_describe_all.test", + "test/sql/catalog/function/attached_macro.test", + "test/sql/catalog/test_temporary.test", + "test/sql/pragma/test_show_tables_temp_views.test", + "test/sql/pg_catalog/system_functions.test", + "test/sql/pg_catalog/sqlalchemy.test", + "test/sql/attach/attach_table_info.test", + "test/sql/attach/attach_defaults.test", + "test/sql/attach/attach_did_you_mean.test", + "test/sql/attach/attach_default_table.test", + "test/sql/attach/attach_show_all_tables.test", + "test/sql/attach/attach_issue7711.test", + "test/sql/attach/attach_issue_7660.test", + "test/sql/attach/show_databases.test", + "test/sql/attach/attach_views.test", + "test/sql/copy_database/copy_table_with_sequence.test" + ] + }, + { + "reason": "Stringification too slow", + "paths": [ + "test/sql/types/bignum/test_bignum_sum.test" + ] + }, + { + "reason": "Time (NS) not supported (new type).", + "paths": [ + "test/parquet/timens_parquet.test", + "test/sql/types/time/test_time_ns.test" + ] + }, + { + "reason": "Expected forwards compatibility failure.", + "paths": [ + "test/fuzzer/pedro/view_not_rebound_error_no_view_dependencies.test", + "test/issues/rigger/assertion_scale.test", + "test/issues/general/test_16662.test", + "test/sql/copy/csv/test_null_padding_projection.test" + + ] + } + ] +} diff --git a/test/configs/verify_fetch_row.json b/test/configs/verify_fetch_row.json index 657302354459..73e30f229285 100644 --- a/test/configs/verify_fetch_row.json +++ b/test/configs/verify_fetch_row.json @@ -1,82 +1,16 @@ { "description": "Run on persistent databases as storage with row verification enabled.", "initial_db": "{TEST_DIR}/{BASE_TEST_NAME}__test__config__verify_fetch_row.db", - "on_init": "PRAGMA verify_fetch_row;", + "on_init": "PRAGMA enable_verification;PRAGMA verify_fetch_row;", "skip_compiled": "true", + "inherit_skip_tests": "test/configs/enable_verification.json", "skip_tests": [ { - "reason": "Contains random() or gen_random_uuid().", - "paths": [ - "test/optimizer/pushdown/issue_16104.test", - "test/fuzzer/pedro/nan_as_seed.test", - "test/sql/function/numeric/test_random.test", - "test/sql/function/uuid/test_uuid.test", - "test/sql/window/test_volatile_independence.test" - ] - }, - { - "reason": "Contains SEQUENCE.", - "paths": [ - "test/fuzzer/pedro/having_query_wrong_result.test", - "test/fuzzer/pedro/temp_sequence_durability.test", - "test/issues/fuzz/sequence_overflow.test", - "test/sql/aggregate/aggregates/test_bit_xor.test", - "test/sql/aggregate/aggregates/test_bit_and.test", - "test/sql/aggregate/aggregates/test_bit_or.test", - "test/sql/aggregate/aggregates/test_avg.test", - "test/sql/catalog/comment_on_wal.test", - "test/sql/catalog/dependencies/test_alter_dependency_ownership.test", - "test/sql/catalog/function/test_sequence_macro.test", - "test/sql/catalog/sequence/sequence_offset_increment.test", - "test/sql/catalog/sequence/sequence_cycle.test", - "test/sql/catalog/sequence/test_sequence.test", - "test/sql/catalog/sequence/sequence_overflow.test", - "test/sql/function/list/aggregates/avg.test", - "test/sql/function/list/aggregates/bit_and.test", - "test/sql/function/list/aggregates/bit_xor.test", - "test/sql/function/list/aggregates/bit_or.test", - "test/sql/storage/wal/wal_store_sequences.test", - "test/sql/storage/wal/wal_store_default_sequence.test", - "test/sql/storage/wal/wal_sequence_uncommitted_transaction.test", - "test/sql/storage/catalog/test_sequence_uncommitted_transaction.test", - "test/sql/storage/catalog/test_store_default_sequence.test", - "test/sql/storage/catalog/test_store_sequences.test", - "test/sql/attach/reattach_schema.test", - "test/sql/attach/attach_sequence.test", - "test/sql/export/export_database.test", - "test/sql/copy_database/copy_database_different_types.test" - ] - }, - { - "reason": "Contains SAMPLE (non-deterministic).", - "paths": [ - "test/fuzzer/pedro/sample_limit_overflow.test", - "test/sql/function/numeric/set_seed_for_sample.test" - ] - }, - { - "reason": "Contains current time function.", - "paths": [ - "test/sql/parser/test_value_functions.test", - "test/sql/function/timestamp/current_time.test", - "test/sql/timezone/test_icu_timezone.test" - ] - }, - { - "reason": "Contains FIRST (non-deterministic).", - "paths": [ - "test/sql/parallelism/intraquery/test_parallel_nested_aggregates.test" - ] - }, - { - "reason": "Contains explicit use of the memory catalog.", + "reason": "Wrong result", "paths": [ "test/sql/show_select/test_describe_all.test", "test/sql/catalog/function/attached_macro.test", - "test/sql/catalog/test_temporary.test", "test/sql/pragma/test_show_tables_temp_views.test", - "test/sql/pg_catalog/system_functions.test", - "test/sql/pg_catalog/sqlalchemy.test", "test/sql/attach/attach_table_info.test", "test/sql/attach/attach_defaults.test", "test/sql/attach/attach_did_you_mean.test", @@ -86,82 +20,24 @@ "test/sql/attach/attach_issue_7660.test", "test/sql/attach/show_databases.test", "test/sql/attach/attach_views.test", - "test/sql/copy_database/copy_table_with_sequence.test" - ] - }, - { - "reason": "Non-deterministic query (subqueries return multiple rows).", - "paths": [ - "test/sql/subquery/scalar/test_issue_6136.test" - ] - }, - { - "reason": "Running verification creates extra output.", - "paths": [ - "test/sql/pragma/test_query_log.test", - "test/sql/copy/csv/rejects/csv_rejects_auto.test", - "test/sql/copy/csv/rejects/csv_rejects_flush_cast.test", - "test/sql/copy/csv/rejects/csv_unquoted_rejects.test", - "test/sql/copy/csv/rejects/csv_rejects_read.test", - "test/sql/copy/csv/rejects/csv_rejects_maximum_line.test", - "test/sql/copy/csv/rejects/test_invalid_utf_rejects.test", - "test/sql/copy/csv/rejects/csv_rejects_flush_message.test", - "test/sql/copy/csv/rejects/csv_rejects_two_tables.test", - "test/sql/copy/csv/rejects/test_mixed.test", - "test/sql/copy/csv/rejects/test_multiple_errors_same_line.test", - "test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test", - "test/sql/copy/csv/test_non_unicode_header.test" - ] - }, - { - "reason": "Emits different vector type (FLAT).", - "paths": [ - "test/sql/storage/compression/rle/rle_constant.test" - ] - }, - { - "reason": "Round-trip of literals changes type, e.g., DOUBLE -> DECIMAL.", - "paths": [ - "test/sql/types/decimal/large_decimal_constants.test", - "test/sql/types/uhugeint/test_uhugeint_conversion.test", - "test/sql/types/hugeint/test_hugeint_conversion.test" - ] - }, - { - "reason": "FIXME: Unexpected Parser Error", - "paths": [ - "test/issues/general/test_16524.test", - "test/sql/collate/test_collate_between.test", - "test/sql/catalog/comment_on_dependencies.test", - "test/sql/catalog/comment_on_column.test", - "test/sql/catalog/comment_on.test", - "test/sql/catalog/comment_on_extended.test", - "test/sql/catalog/comment_on_pg_description.test", - "test/sql/pragma/test_show_tables.test", - "test/sql/alter/test_alter_if_exists.test", - "test/sql/index/create_index_options.test" - ] - }, - { - "reason": "FIXME: Unexpected catalog duplicate/missing entry error.", - "paths": [ - "test/sql/catalog/test_set_search_path.test", - "test/sql/catalog/function/test_macro_issue_13104.test", - "test/sql/catalog/function/test_macro_relpersistence_conflict.test", - "test/sql/catalog/function/test_recursive_macro.test", - "test/sql/catalog/function/test_recursive_macro_no_dependency.test", - "test/sql/catalog/test_set_schema.test", - "test/sql/function/list/lambdas/arrow/lambdas_and_functions_deprecated.test", - "test/sql/function/list/lambdas/lambdas_and_functions.test" + "test/sql/copy_database/copy_table_with_sequence.test", + "test/sql/pg_catalog/sqlalchemy.test" ] }, { - "reason": "FIXME: Misc. unexpected failures (query succeeds, wrong result).", + "reason": "FIXME: crash/error in test", "paths": [ - "test/sql/storage/compression/test_using_compression.test", - "test/sql/create/create_table_compression.test", - "test/sql/create/create_table_compression.test", - "test/sql/aggregate/aggregates/test_state_export.test" + "test/sql/storage/compression/roaring/roaring_bool_run_simple_w_null.test", + "test/sql/storage/compression/roaring/roaring_bool_inverted_run_simple.test", + "test/sql/storage/compression/roaring/roaring_bool_smaller_than_vector.test", + "test/sql/storage/compression/roaring/roaring_bool_array_simple.test", + "test/sql/storage/compression/roaring/roaring_bool_bitset_simple_w_null.test", + "test/sql/storage/compression/roaring/roaring_bool_run_simple.test", + "test/sql/storage/compression/roaring/roaring_bool_inverted_array_simple.test", + "test/sql/storage/compression/roaring/roaring_bool_first_is_null.test", + "test/sql/storage/compression/roaring/roaring_bool_fetch_row.test", + "test/sql/storage/compression/roaring/roaring_bool_bitset_simple.test", + "test/sql/storage/compression/roaring/roaring_bool_array_simple_w_null.test" ] } ] diff --git a/test/configs/wal_verification.json b/test/configs/wal_verification.json index 173bb6c06619..0d8dbb9fe5d4 100644 --- a/test/configs/wal_verification.json +++ b/test/configs/wal_verification.json @@ -5,26 +5,11 @@ "checkpoint_wal_size": "1000000000", "checkpoint_on_shutdown": "false", "skip_compiled": "true", + "inherit_skip_tests": "test/configs/force_storage.json", "skip_tests": [ { "reason": "Contains explicit use of the memory catalog.", "paths": [ - "test/sql/attach/attach_default_table.test", - "test/sql/attach/attach_defaults.test", - "test/sql/attach/attach_did_you_mean.test", - "test/sql/attach/attach_issue7711.test", - "test/sql/attach/attach_issue_7660.test", - "test/sql/attach/attach_show_all_tables.test", - "test/sql/attach/attach_table_info.test", - "test/sql/attach/attach_views.test", - "test/sql/attach/show_databases.test", - "test/sql/catalog/function/attached_macro.test", - "test/sql/catalog/test_temporary.test", - "test/sql/copy_database/copy_table_with_sequence.test", - "test/sql/pg_catalog/sqlalchemy.test", - "test/sql/pg_catalog/system_functions.test", - "test/sql/pragma/test_show_tables_temp_views.test", - "test/sql/show_select/test_describe_all.test", "test/sql/attach/attach_use_rollback.test" ] }, diff --git a/test/extension/CMakeLists.txt b/test/extension/CMakeLists.txt index 0fbf0fb826c0..84a176ad1cd1 100644 --- a/test/extension/CMakeLists.txt +++ b/test/extension/CMakeLists.txt @@ -21,7 +21,7 @@ if(NOT WIN32 AND NOT SUN) ${PARAMETERS} ../extension/loadable_extension_optimizer_demo.cpp) - if(${ENABLE_UNITTEST_CPP_TESTS}) + if(${ENABLE_UNITTEST_CPP_TESTS} AND NOT (${ENABLE_THREAD_SANITIZER})) set(TEST_EXT_OBJECTS test_remote_optimizer.cpp) add_library_unity(test_extensions OBJECT ${TEST_EXT_OBJECTS}) diff --git a/test/extension/autoloading_encodings.test b/test/extension/autoloading_encodings.test index b1b99c354e21..26ef24d500eb 100644 --- a/test/extension/autoloading_encodings.test +++ b/test/extension/autoloading_encodings.test @@ -13,7 +13,7 @@ statement ok set autoinstall_known_extensions=false statement error -FROM read_csv('data/csv/test/test.csv', encoding = 'shift_jis') +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'shift_jis') ---- :.*Invalid Input Error.*You can try "INSTALL encodings; LOAD encodings".* @@ -27,4 +27,4 @@ statement ok set autoinstall_extension_repository='${LOCAL_EXTENSION_REPO}'; statement ok -FROM read_csv('data/csv/test/test.csv', encoding = 'shift_jis') \ No newline at end of file +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'shift_jis') \ No newline at end of file diff --git a/test/fuzzer/duckfuzz/having_window_bind.test b/test/fuzzer/duckfuzz/having_window_bind.test new file mode 100644 index 000000000000..17134d584175 --- /dev/null +++ b/test/fuzzer/duckfuzz/having_window_bind.test @@ -0,0 +1,47 @@ +# name: test/fuzzer/duckfuzz/having_window_bind.test +# description: Throw window having errors immediately +# group: [duckfuzz] + +require tpch + +statement ok +call dbgen(sf=0); + +statement error +SELECT + DISTINCT c7, + (c3 < NULL), + c9 +FROM + ( + SELECT + DISTINCT c8, + c2, + rank() OVER ( + ORDER BY + c6 RANGE BETWEEN CURRENT ROW + AND c2 FOLLOWING + ) + FROM + nation AS t5(c1, c2, c3, c4) ASOF + INNER JOIN ( + SELECT + DISTINCT c1 + FROM + nation AS t10(c6, c7, c8, c9) + HAVING + nth_value( + c8, + #8) OVER ( + RANGE BETWEEN '24:00:00-15:59:59'::TIME WITH TIME ZONE PRECEDING + AND 'c7607984-ed9f-4f50-a6e3-62b94b3a6e9c' PRECEDING) + USING SAMPLE 19% (System) + ORDER BY * DESC NULLS FIRST + ) AS t11 ON (8252) + WHERE 'enum_299' + HAVING c2 + ORDER BY * + ) AS t12 +ORDER BY * ASC; +---- +HAVING clause cannot contain window functions diff --git a/test/fuzzer/duckfuzz/logger_null.test b/test/fuzzer/duckfuzz/logger_null.test new file mode 100644 index 000000000000..a21177255d71 --- /dev/null +++ b/test/fuzzer/duckfuzz/logger_null.test @@ -0,0 +1,9 @@ +# name: test/fuzzer/duckfuzz/logger_null.test +# description: Test table function with named parameter that is NULL +# group: [duckfuzz] + +# https://github.com/duckdb/duckdb-fuzzer/issues/4206 +statement error +FROM duckdb_logs(denormalized_table := NULL) +---- +Invalid Input Error: denormalized_table cannot be NULL \ No newline at end of file diff --git a/test/fuzzer/pedro/intersect_correlated_subquery.test b/test/fuzzer/pedro/intersect_correlated_subquery.test index f95a374afeb6..d44dd022ef49 100644 --- a/test/fuzzer/pedro/intersect_correlated_subquery.test +++ b/test/fuzzer/pedro/intersect_correlated_subquery.test @@ -12,6 +12,10 @@ query I SELECT (SELECT 1 INTERSECT SELECT 1 HAVING true) FROM t0; ---- +query I +SELECT (SELECT 1 INTERSECT SELECT 1 HAVING false) FROM t0; +---- + query I SELECT (SELECT 1 INTERSECT SELECT 1 HAVING t0.rowid) FROM t0; ---- diff --git a/test/helpers/CMakeLists.txt b/test/helpers/CMakeLists.txt index 03edf7adc34c..7db67978efad 100644 --- a/test/helpers/CMakeLists.txt +++ b/test/helpers/CMakeLists.txt @@ -1,3 +1,6 @@ +add_definitions(-DDUCKDB_ROOT_DIRECTORY="${PROJECT_SOURCE_DIR}" + -DDUCKDB_BUILD_DIRECTORY="${PROJECT_BINARY_DIR}") + set(DUCKDB_TEST_HELPERS_UNITS test_helpers.cpp capi_tester.cpp pid.cpp test_config.cpp) diff --git a/test/helpers/test_config.cpp b/test/helpers/test_config.cpp index 2216a2f49749..ea2b2bf112eb 100644 --- a/test/helpers/test_config.cpp +++ b/test/helpers/test_config.cpp @@ -43,6 +43,7 @@ static const TestConfigOption test_config_options[] = { {"test_env", "The test variables", LogicalType::LIST(LogicalType::STRUCT({{"env_name", LogicalType::VARCHAR}, {"env_value", LogicalType::VARCHAR}})), nullptr}, + {"inherit_skip_tests", "Path of config to inherit 'skip_tests' from", LogicalType::VARCHAR}, {"skip_tests", "Tests to be skipped", LogicalType::LIST( LogicalType::STRUCT({{"reason", LogicalType::VARCHAR}, {"paths", LogicalType::LIST(LogicalType::VARCHAR)}})), @@ -54,8 +55,6 @@ static const TestConfigOption test_config_options[] = { {"statically_loaded_extensions", "Extensions to be loaded (from the statically available one)", LogicalType::LIST(LogicalType::VARCHAR), nullptr}, {"storage_version", "Database storage version to use by default", LogicalType::VARCHAR, nullptr}, - {"data_location", "Directory where static test files are read (defaults to `data/`)", LogicalType::VARCHAR, - nullptr}, {"select_tag", "Select tests which match named tag (as singleton set; multiple sets are OR'd)", LogicalType::VARCHAR, TestConfiguration::AppendSelectTagSet}, {"select_tag_set", "Select tests which match _all_ named tags (multiple sets are OR'd)", @@ -106,6 +105,42 @@ void TestConfiguration::Initialize() { ParseOption("summarize_failures", Value(true)); } } + + working_dir = FileSystem::GetWorkingDirectory(); + test_uuid = UUID::ToString(UUID::GenerateRandomUUID()); + UpdateEnvironment(); +} + +void TestConfiguration::UpdateEnvironment() { + // Setup standard vars + + // XXX: UUID used by ducklake to avoid collisions, is there a better way? + test_env["TEST_UUID"] = test_uuid; + test_env["BUILD_DIR"] = string(DUCKDB_BUILD_DIRECTORY); + test_env["WORKING_DIR"] = working_dir; // can be overridden per runner + test_env["DATA_DIR"] = working_dir + "/data"; // default: data/ + + string temp_dir = TestDirectoryPath(); + test_env["TEMP_DIR"] = temp_dir; // default: duckdb_unittest_tempdir/$PID + test_env["CATALOG_DIR"] = temp_dir + "/" + test_uuid; // _not_ guaranteed to exist +} + +string TestConfiguration::GetWorkingDirectory() { + return working_dir; +} + +bool TestConfiguration::ChangeWorkingDirectory(const string &dir) { + bool rv = false; + // set CWD first, then get it -- this gets us normalized absolute path for free + // making the comparison below meaningful + FileSystem::SetWorkingDirectory(dir); + const auto &normalized = FileSystem::GetWorkingDirectory(); + if (working_dir != normalized) { + rv = true; + working_dir = normalized; + UpdateEnvironment(); + } + return rv; } bool TestConfiguration::ParseArgument(const string &arg, idx_t argc, char **argv, idx_t &i) { @@ -210,15 +245,6 @@ bool TestConfiguration::ShouldSkipTest(const string &test_name) { return tests_to_be_skipped.count(test_name); } -string TestConfiguration::DataLocation() { - string res = GetOptionOrDefault("data_location", string("data/")); - // Force DataLocation to end with a '/' - if (res.back() != '/') { - res += "/"; - } - return res; -} - string TestConfiguration::OnInitCommand() { return GetOptionOrDefault("on_init", string()); } @@ -329,6 +355,20 @@ void TestConfiguration::LoadConfig(const string &config_path) { ParseOption(entry.first, Value(entry.second)); } + auto inherit_entry = options.find("inherit_skip_tests"); + if (inherit_entry != options.end()) { + auto path_value = inherit_entry->second; + D_ASSERT(path_value.type().id() == LogicalTypeId::VARCHAR); + D_ASSERT(!path_value.IsNull()); + auto cwd = TestGetCurrentDirectory(); + auto path = TestJoinPath(cwd, path_value.ToString()); + TestConfiguration inherit_config; + inherit_config.LoadConfig(path); + + tests_to_be_skipped.insert(inherit_config.tests_to_be_skipped.begin(), + inherit_config.tests_to_be_skipped.end()); + } + // Convert to unordered_set the list of tests to be skipped auto entry = options.find("skip_tests"); if (entry != options.end()) { diff --git a/test/include/test_config.hpp b/test/include/test_config.hpp index ff9281da433a..aa4e8246f5bc 100644 --- a/test/include/test_config.hpp +++ b/test/include/test_config.hpp @@ -46,6 +46,10 @@ class TestConfiguration { void ParseOption(const string &name, const Value &value); void LoadConfig(const string &config_path); + void UpdateEnvironment(); + string GetWorkingDirectory(); + bool ChangeWorkingDirectory(const string &dir); // true -> changed + void ProcessPath(string &path, const string &test_name); string GetDescription(); @@ -93,6 +97,11 @@ class TestConfiguration { private: case_insensitive_map_t options; unordered_set tests_to_be_skipped; + + // explicitly take ownership of working_dir here, giving runners an API to chdir, + // and get env updates to match + string working_dir; + string test_uuid; unordered_map test_env; vector> select_tag_sets; diff --git a/test/issues/general/test_15416.test b/test/issues/general/test_15416.test index a511d6107692..c2c5b832b0d3 100644 --- a/test/issues/general/test_15416.test +++ b/test/issues/general/test_15416.test @@ -15,4 +15,4 @@ FROM (SELECT 1) _(x), LATERAL (SELECT * FROM cte) b(x) ---- -Referenced column "x" not found in FROM clause! \ No newline at end of file +Referenced column "x" was not found because the FROM clause is missing \ No newline at end of file diff --git a/test/issues/general/test_17891.test_slow b/test/issues/general/test_17891.test_slow index 0ede121157d2..048038df3cbb 100644 --- a/test/issues/general/test_17891.test_slow +++ b/test/issues/general/test_17891.test_slow @@ -5,10 +5,10 @@ require parquet statement ok -create view seqs_table_a as from 'data/parquet-testing/seqs_table.parquet'; +create view seqs_table_a as from '{DATA_DIR}/parquet-testing/seqs_table.parquet'; statement ok -create view seqs_table_b as from 'data/parquet-testing/seqs_table.parquet'; +create view seqs_table_b as from '{DATA_DIR}/parquet-testing/seqs_table.parquet'; query I WITH orig AS ( diff --git a/test/issues/general/test_19327.test b/test/issues/general/test_19327.test new file mode 100644 index 000000000000..6e1ee333c82c --- /dev/null +++ b/test/issues/general/test_19327.test @@ -0,0 +1,63 @@ +# name: test/issues/general/test_19327.test +# description: Issue 19327 - Wrong result for DISTINCT and LEFT JOIN +# group: [general] + +require icu + +statement ok +create or replace table data_source as select *, (target_time AT TIME ZONE 'UTC')::timestamp::time as time_of_day from + (VALUES + ('2025-09-22T15:00:00Z'::TIMESTAMPTZ, '2025-09-22T08:00:00Z'::TIMESTAMPTZ, 1), + ('2025-09-23T15:00:00Z'::TIMESTAMPTZ, '2025-09-23T12:15:00Z'::TIMESTAMPTZ, 2), + ('2025-09-23T15:00:00Z'::TIMESTAMPTZ, '2025-09-23T14:25:00Z'::TIMESTAMPTZ, 3)) t(target_time, update_time, current_value) + ; + +query IIIII nosort q0 +WITH t_current_data AS ( +SELECT DISTINCT ON (target_time) + target_time AS today_target_time, + time_of_day, + current_value +FROM data_source +WHERE target_time BETWEEN '2025-09-23T12:15:00Z'::TIMESTAMPTZ::DATE AND ('2025-09-23T12:15:00Z'::TIMESTAMPTZ + INTERVAL '1 DAYS')::DATE AND + update_time <= '2025-09-23T12:15:00Z'::TIMESTAMPTZ +ORDER BY target_time, update_time DESC), + t_past_data AS ( +SELECT + target_time, + time_of_day, + current_value AS past_value +FROM data_source) +SELECT today_target_time, + time_of_day, + current_value, + past_value, + abs(past_value - current_value) +FROM t_current_data left outer join t_past_data USING (time_of_day) +ORDER BY time_of_day +---- + +query IIIII nosort q0 +WITH t_current_data AS ( +SELECT DISTINCT ON (target_time) + target_time AS today_target_time, + time_of_day, + current_value +FROM data_source +WHERE target_time BETWEEN '2025-09-23T12:15:00Z'::TIMESTAMPTZ::DATE AND ('2025-09-23T12:15:00Z'::TIMESTAMPTZ + INTERVAL '1 DAYS')::DATE AND + update_time <= '2025-09-23T12:15:00Z'::TIMESTAMPTZ +ORDER BY target_time, update_time DESC), + t_past_data AS ( +SELECT + target_time, + time_of_day, + current_value AS past_value +FROM data_source) +SELECT today_target_time, + time_of_day, + current_value, + past_value, + abs(past_value - current_value) +FROM t_current_data full outer join t_past_data USING (time_of_day) +ORDER BY time_of_day +---- diff --git a/test/issues/general/test_19504.test b/test/issues/general/test_19504.test new file mode 100644 index 000000000000..7fc564c1c8ee --- /dev/null +++ b/test/issues/general/test_19504.test @@ -0,0 +1,17 @@ +# name: test/issues/general/test_19504.test +# description: Issue 19504 - Regression due to optimization in 1.4.0 +# group: [general] + +query II +SELECT query, + (WITH t AS MATERIALIZED (SELECT query) + SELECT * + FROM (VALUES ('cat')) AS _(x) + WHERE x IN (SELECT * FROM t) + ) AS broken +FROM (VALUES ('cat'), ('dog'), ('duck')) AS queries(query) +ORDER BY query; +---- +cat cat +dog NULL +duck NULL \ No newline at end of file diff --git a/test/issues/general/test_19575.test b/test/issues/general/test_19575.test new file mode 100644 index 000000000000..70d21d133cec --- /dev/null +++ b/test/issues/general/test_19575.test @@ -0,0 +1,46 @@ +# name: test/issues/general/test_19575.test +# description: Issue 19575 - [1.4.1] Interesting error with like expression on some character. (Invalid unicode) +# group: [general] + +statement ok +create or replace table aaa(name varchar); + +statement ok +insert into aaa values('绿色'),(chr('0x10FFFF')),(chr('0xD7FF')); + +query I +select count(*) from aaa; +---- +3 + +query I +select * from aaa where name like '绿%'; +---- +绿色 + +query I +select count(*) from aaa where name like '红%'; +---- +0 + +# (0xD7FF + 1) go into unicode surrogate range +query II +explain select * from aaa where name like concat(chr('0xD7FF'),'%'); +---- +physical_plan :.*Filters.*prefix.* + +query I +select count(*) from aaa where name like concat(chr('0xD7FF'),'%'); +---- +1 + +# (0x10FFFF + 1) would be illegal utf8 +query II +explain select * from aaa where name like concat(chr('0x10FFFF'),'%'); +---- +physical_plan :.*Filters.*prefix.* + +query I +select count(*) from aaa where name like concat(chr('0x10FFFF'),'%'); +---- +1 \ No newline at end of file diff --git a/test/issues/general/test_20016.test b/test/issues/general/test_20016.test new file mode 100644 index 000000000000..d00567fcbaae --- /dev/null +++ b/test/issues/general/test_20016.test @@ -0,0 +1,22 @@ +# name: test/issues/general/test_20016.test +# description: Issue 20016 - "INTERNAL Error: Failed to bind column reference" with CTE and generate_series +# group: [general] + +query I +select ( + with cte_1 as ( + select sum(1) + from generate_series(1, col_outer) + ), cte_2 as ( + select ( + select sum(1) + from generate_series(1, col_inner) + ) + from (select 1 as col_inner) as inner_gen + ) + select 1 + from cte_1, cte_2 +) as middle +from (select 1 as col_outer) as outer_gen; +---- +1 \ No newline at end of file diff --git a/test/issues/internal/test_5994.test b/test/issues/internal/test_5994.test index 6e0265f0b1ed..619add894097 100644 --- a/test/issues/internal/test_5994.test +++ b/test/issues/internal/test_5994.test @@ -5,6 +5,6 @@ require parquet query I -SELECT COUNT(*) FROM 'data/parquet-testing/internal_5994.parquet' WHERE eventName != 'ListObjects'; +SELECT COUNT(*) FROM '{DATA_DIR}/parquet-testing/internal_5994.parquet' WHERE eventName != 'ListObjects'; ---- 118 diff --git a/test/optimizer/csv_pushdown.test b/test/optimizer/csv_pushdown.test index 3bff2011497e..fd68e64d6f26 100644 --- a/test/optimizer/csv_pushdown.test +++ b/test/optimizer/csv_pushdown.test @@ -4,7 +4,7 @@ # read a single column from a file query I -SELECT l_returnflag FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'VARCHAR','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); +SELECT l_returnflag FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'VARCHAR','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); ---- N N @@ -19,19 +19,19 @@ A # verify the projection pushdown is correctly displayed query II -explain SELECT l_returnflag FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'VARCHAR','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); +explain SELECT l_returnflag FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'VARCHAR','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); ---- physical_plan :.*READ_CSV.*l_returnflag.* # read a column as the incorrect type (l_shipinstruct is not a date) statement error -SELECT l_shipinstruct FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); +SELECT l_shipinstruct FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); ---- Column at position: 13 Set type: DATE Sniffed type: VARCHAR # conversion is skipped if we don't read the value - so even with the incorrect type specified this still works query I -SELECT l_returnflag FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, header = 0); +SELECT l_returnflag FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, header = 0); ---- N N @@ -46,17 +46,17 @@ A # ignore errors query I -SELECT l_shipinstruct FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true); +SELECT l_shipinstruct FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'DECIMAL(15,2)','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true); ---- # ignore errors partially statement error -SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'SMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); +SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'SMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); ---- l_extendedprice query III -SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'SMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true, header = 0); +SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'SMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true, header = 0); ---- 1 15519 24387 1 6370 10211 @@ -64,12 +64,12 @@ SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('data/csv/real/linei 1 2403 31330 statement error -SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'USMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); +SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'USMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}); ---- l_extendedprice query III -SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'USMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true, header = 0); +SELECT l_orderkey, l_partkey, l_extendedprice FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', columns={'l_orderkey': 'INTEGER','l_partkey': 'INTEGER','l_suppkey': 'INTEGER','l_linenumber': 'INTEGER','l_quantity': 'INTEGER','l_extendedprice': 'USMALLINT','l_discount': 'DECIMAL(15,2)','l_tax': 'DECIMAL(15,2)','l_returnflag': 'VARCHAR','l_linestatus': 'VARCHAR','l_shipdate': 'DATE','l_commitdate': 'DATE','l_receiptdate': 'DATE','l_shipinstruct': 'DATE','l_shipmode': 'VARCHAR','l_comment': 'VARCHAR'}, ignore_errors=true, header = 0); ---- 1 15519 24387 1 6731 58958 diff --git a/test/optimizer/expression_rewriter/ordered_aggregate_incorrectly_remove_order.test b/test/optimizer/expression_rewriter/ordered_aggregate_incorrectly_remove_order.test new file mode 100644 index 000000000000..88c137d28857 --- /dev/null +++ b/test/optimizer/expression_rewriter/ordered_aggregate_incorrectly_remove_order.test @@ -0,0 +1,19 @@ +# name: test/optimizer/expression_rewriter/ordered_aggregate_incorrectly_remove_order.test +# description: When there are multiple GroupingSets in aggregates, optimizer may incorrectly remove the ORDER BY clause from aggregates. +# group: [expression_rewriter] + +statement ok +CREATE TABLE t1(col1 INT, col2 INT); + +statement ok +INSERT INTO t1 VALUES (2, 1), (1, 2); + +query I +SELECT GROUP_CONCAT(col1 ORDER BY col1) AS c1 +FROM t1 +GROUP BY ROLLUP (col1) +ORDER BY c1; +---- +1 +1,2 +2 diff --git a/test/optimizer/joins/issues/internal_6248.test b/test/optimizer/joins/issues/internal_6248.test new file mode 100644 index 000000000000..3e433ece5998 --- /dev/null +++ b/test/optimizer/joins/issues/internal_6248.test @@ -0,0 +1,38 @@ +# name: test/optimizer/joins/issues/internal_6248.test +# description: Extract all child bindings of logical gets if they have children. Usually the case with table functions +# group: [issues] + +require json + +statement ok +CREATE OR REPLACE TABLE foo("field1" VARCHAR, "field2" INTEGER, "company-id" VARCHAR, "field3" VARCHAR); + +statement ok +CREATE OR REPLACE TABLE bar("company-id" VARCHAR, field4 VARCHAR); + +statement ok +select + ( + select + json_group_array(x.value) as some_field + from + json_each (c.field4) as x, + json_each (some_field_json) as y + where + x.value is not null + and json_contains(x.value, y.value) + ) as field4, +from + ( + select + cc."company-id" as "company-id", + array_agg(distinct {"field1": cc."field1", "field2": cc."field2"}) as some_field_json + from + "foo" as "cc" + where + cc."field3" = 'three' + group by + cc."field3", + cc."company-id" + ) as n + join "bar" c on c."company-id" = n."company-id"; \ No newline at end of file diff --git a/test/optimizer/joins/test_logical_get_with_unnest.test b/test/optimizer/joins/test_logical_get_with_unnest.test new file mode 100644 index 000000000000..d69938792ef9 --- /dev/null +++ b/test/optimizer/joins/test_logical_get_with_unnest.test @@ -0,0 +1,30 @@ +# name: test/optimizer/joins/test_logical_get_with_unnest.test +# description: Test unnest as logical Get. Make sure relation manager has all column bindings +# group: [joins] + +require json + +statement ok +CREATE TABLE foo AS +SELECT + ['1', '2'] AS "number", + '{}' AS "my_json", + 42 AS "forty_two", + 'foo' AS "bar", + 'bar' AS "baz" + FROM +range(50); + +statement ok +SELECT + * +FROM +foo AS cc, +UNNEST(cc.number) AS t2 +CROSS JOIN LATERAL +( +SELECT + x.value AS whizz +FROM + json_each(cc.my_json) AS x +) AS col3; \ No newline at end of file diff --git a/test/optimizer/pushdown/filters_on_limit_optimizer.test b/test/optimizer/pushdown/filters_on_limit_optimizer.test new file mode 100644 index 000000000000..6d2524d644fd --- /dev/null +++ b/test/optimizer/pushdown/filters_on_limit_optimizer.test @@ -0,0 +1,15 @@ +# name: test/optimizer/pushdown/filters_on_limit_optimizer.test +# description: Test disabling large limit optimizations when filters are applied +# group: [pushdown] + +statement ok +PRAGMA explain_output = OPTIMIZED_ONLY; + +statement ok +create table t as select range a from range(2_000_000); + +# Checks if the limit is applied after the filter is pushed down +query II +explain select * from T where a < 50 limit 100; +---- +logical_opt :.*LIMIT.*SEQ_SCAN.* \ No newline at end of file diff --git a/test/optimizer/pushdown/parquet_or_pushdown.test b/test/optimizer/pushdown/parquet_or_pushdown.test index 6e9d1b06cade..b1c0df16e1d3 100644 --- a/test/optimizer/pushdown/parquet_or_pushdown.test +++ b/test/optimizer/pushdown/parquet_or_pushdown.test @@ -9,27 +9,27 @@ mode skip # Multiple column in the root OR node, don't push down query II -EXPLAIN SELECT tbl.a, tbl.b FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a, b) WHERE a=1 OR b=false +EXPLAIN SELECT tbl.a, tbl.b FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a, b) WHERE a=1 OR b=false ---- physical_plan :.*PARQUET_SCAN.*Filters:.* # Single column in the root OR node query II -EXPLAIN SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR a=2 +EXPLAIN SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR a=2 ---- physical_plan :.*PARQUET_SCAN.*Filters: a=1 OR a=2.* # Single column + root OR node with AND query II -EXPLAIN SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR (a>3 AND a<5) +EXPLAIN SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR (a>3 AND a<5) ---- physical_plan :.*PARQUET_SCAN.*Filters: a=1 OR a>3 AND a<5|.* # Single column multiple ORs query II -EXPLAIN SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR a>3 OR a<5 +EXPLAIN SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a=1 OR a>3 OR a<5 ---- physical_plan :.*PARQUET_SCAN.*Filters: a=1 OR a>3 OR a<5|.* @@ -37,14 +37,14 @@ physical_plan :.*PARQUET_SCAN.*Filters: a=1 OR a>3 OR a<5|.* # Testing not equal query II -EXPLAIN SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a!=1 OR a>3 OR a<2 +EXPLAIN SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a!=1 OR a>3 OR a<2 ---- physical_plan :.*PARQUET_SCAN.*Filters: a!=1 OR a>3 OR a<2|.* # Multiple OR filters connected with ANDs query II -EXPLAIN SELECT tbl.a, tbl.b, tbl.c FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a,b,c) WHERE (a<2 OR a>3) AND (a=1 OR a=4) AND (b=false OR c=1); +EXPLAIN SELECT tbl.a, tbl.b, tbl.c FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a,b,c) WHERE (a<2 OR a>3) AND (a=1 OR a=4) AND (b=false OR c=1); ---- physical_plan :.*PARQUET_SCAN.*Filters: a<2 OR a>3 AND a=1.*OR a=4.* @@ -55,12 +55,12 @@ PRAGMA enable_profiling # should return 2 rows: 0 and 7 query II -EXPLAIN ANALYZE SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a<1 OR a>6; +EXPLAIN ANALYZE SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a<1 OR a>6; ---- analyzed_plan :.*PARQUET_SCAN.*Filters: a<1 OR a>6.*2[ \t].* # should return 1 row: 0 query II -EXPLAIN ANALYZE SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a<1 OR a>8; +EXPLAIN ANALYZE SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) WHERE a<1 OR a>8; ---- analyzed_plan :.*PARQUET_SCAN.*Filters: a<1 OR a>8.*1[ \t].* diff --git a/test/optimizer/unnest_rewriter.test_slow b/test/optimizer/unnest_rewriter.test_slow index 01e8894e9a45..25daf9e13df9 100644 --- a/test/optimizer/unnest_rewriter.test_slow +++ b/test/optimizer/unnest_rewriter.test_slow @@ -56,7 +56,7 @@ query III SELECT hits_0.access.page."pageTitle" as "pageTitle", COUNT(DISTINCT CONCAT(ga_sessions."__distinct_key", 'x', hits_0.__row_id)) as "hits_count", COUNT(DISTINCT CASE WHEN product_0.access."productQuantity">0 THEN CONCAT(ga_sessions."__distinct_key", 'x', hits_0."__row_id") END) as "sold_count" -FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM 'data/parquet-testing/test_unnest_rewriter.parquet' as x) as ga_sessions, +FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM '{DATA_DIR}/parquet-testing/test_unnest_rewriter.parquet' as x) as ga_sessions, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(ga_sessions.hits)) as x(access)) as hits_0, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(hits_0.access.product)) as x(access)) as product_0 GROUP BY 1 ORDER BY 1, 2, 3 LIMIT 2; @@ -90,7 +90,7 @@ query II EXPLAIN SELECT hits_0.access.page."pageTitle" as "pageTitle", COUNT(DISTINCT CONCAT(ga_sessions."__distinct_key", 'x', hits_0.__row_id)) as "hits_count", COUNT(DISTINCT CASE WHEN product_0.access."productQuantity">0 THEN CONCAT(ga_sessions."__distinct_key", 'x', hits_0."__row_id") END) as "sold_count" -FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM 'data/parquet-testing/test_unnest_rewriter.parquet' as x) as ga_sessions, +FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM '{DATA_DIR}/parquet-testing/test_unnest_rewriter.parquet' as x) as ga_sessions, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(ga_sessions.hits)) as x(access)) as hits_0, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(hits_0.access.product)) as x(access)) as product_0 GROUP BY 1 LIMIT 2; diff --git a/test/parquet/concatenated_gzip_members.test b/test/parquet/concatenated_gzip_members.test index 21252c845b82..c9fbc7e8d577 100644 --- a/test/parquet/concatenated_gzip_members.test +++ b/test/parquet/concatenated_gzip_members.test @@ -5,6 +5,6 @@ require parquet query I -from 'data/parquet-testing/concatenated_gzip_members.parquet' offset 512; +from '{DATA_DIR}/parquet-testing/concatenated_gzip_members.parquet' offset 512; ---- 513 diff --git a/test/parquet/dbp_small_decimal.test b/test/parquet/dbp_small_decimal.test index 16491c8e7db4..fe767ef6b6f9 100644 --- a/test/parquet/dbp_small_decimal.test +++ b/test/parquet/dbp_small_decimal.test @@ -5,7 +5,7 @@ require parquet query III -select * from 'data/parquet-testing/dbp_small_decimal.parquet' ; +select * from '{DATA_DIR}/parquet-testing/dbp_small_decimal.parquet' ; ---- 1 10.0 diez 2 20.0 vente diff --git a/test/parquet/encrypted_parquet.test b/test/parquet/encrypted_parquet.test index e2996386673b..73dd24cb06f3 100644 --- a/test/parquet/encrypted_parquet.test +++ b/test/parquet/encrypted_parquet.test @@ -1,5 +1,5 @@ # name: test/parquet/encrypted_parquet.test -# description: Test Parquet reader on data/parquet-testing/encryption +# description: Test Parquet reader on $TEST_DATA_LOC/parquet-testing/encryption # group: [parquet] # TODO: re-enable these tests once we encrypt the full Parquet Encryption spec @@ -9,11 +9,11 @@ mode skip require parquet statement error -SELECT * FROM parquet_scan('data/parquet-testing/encryption/encrypted_footer.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/encryption/encrypted_footer.parquet') limit 50; ---- -Invalid Input Error: Encrypted Parquet files are not supported for file 'data/parquet-testing/encryption/encrypted_footer.parquet' +Invalid Input Error: Encrypted Parquet files are not supported for file '{DATA_DIR}/parquet-testing/encryption/encrypted_footer.parquet' statement error -SELECT * FROM parquet_scan('data/parquet-testing/encryption/encrypted_column.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/encryption/encrypted_column.parquet') limit 50; ---- -Invalid Error: Failed to read Parquet file "data/parquet-testing/encryption/encrypted_column.parquet": Encrypted Parquet files are not supported +Invalid Error: Failed to read Parquet file "{DATA_DIR}/parquet-testing/encryption/encrypted_column.parquet": Encrypted Parquet files are not supported diff --git a/test/parquet/invalid_parquet.test b/test/parquet/invalid_parquet.test index 5edb34af47fc..16e3f83fea85 100644 --- a/test/parquet/invalid_parquet.test +++ b/test/parquet/invalid_parquet.test @@ -1,11 +1,11 @@ # name: test/parquet/invalid_parquet.test -# description: Test Parquet Reader on data/parquet-testing/invalid.parquet +# description: Test Parquet Reader on $TEST_DATA_LOC/parquet-testing/invalid.parquet # group: [parquet] require parquet statement error -SELECT * FROM parquet_scan('data/parquet-testing/invalid.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/invalid.parquet') limit 50; ---- Invalid Input Error: Invalid string encoding found in Parquet file: value "TREL\xC3" is not valid UTF8! @@ -13,6 +13,6 @@ statement ok pragma disable_optimizer statement error -SELECT * FROM parquet_scan('data/parquet-testing/invalid.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/invalid.parquet') limit 50; ---- Invalid Input Error: Invalid string encoding found in Parquet file: value "TREL\xC3" is not valid UTF8! diff --git a/test/parquet/prefetching.test b/test/parquet/prefetching.test index ae1a9a0c2a2b..a58570e8de5f 100644 --- a/test/parquet/prefetching.test +++ b/test/parquet/prefetching.test @@ -10,16 +10,16 @@ set prefetch_all_parquet_files=true; # With default settings, this query will fail: the incorrectly set index page offsets mess with duckdb's prefetching mechanism statement error -FROM 'data/parquet-testing/incorrect_index_page_offsets.parquet' +FROM '{DATA_DIR}/parquet-testing/incorrect_index_page_offsets.parquet' ---- -IO Error: The parquet file 'data/parquet-testing/incorrect_index_page_offsets.parquet' seems to have incorrectly set page offsets. This interferes with DuckDB's prefetching optimization. DuckDB may still be able to scan this file by manually disabling the prefetching mechanism using: 'SET disable_parquet_prefetching=true'. +IO Error: The parquet file '{DATA_DIR}/parquet-testing/incorrect_index_page_offsets.parquet' seems to have incorrectly set page offsets. This interferes with DuckDB's prefetching optimization. DuckDB may still be able to scan this file by manually disabling the prefetching mechanism using: 'SET disable_parquet_prefetching=true'. # Now we disable prefetching statement ok set disable_parquet_prefetching=true; query IIIIIIIIIII -FROM 'data/parquet-testing/incorrect_index_page_offsets.parquet' +FROM '{DATA_DIR}/parquet-testing/incorrect_index_page_offsets.parquet' ---- 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 1 diff --git a/test/parquet/test_filename_column.test b/test/parquet/test_filename_column.test index 854857725294..bf6700ed1a07 100644 --- a/test/parquet/test_filename_column.test +++ b/test/parquet/test_filename_column.test @@ -6,57 +6,57 @@ require parquet # anything non-VARCHAR will be cast to boolean, and interpreted as such query I -SELECT pq.filename FROM read_parquet('data/parquet-testing/enum.parquet', filename=true) pq LIMIT 1 +SELECT pq.filename FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename=true) pq LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet query I -SELECT pq.filename FROM read_parquet('data/parquet-testing/enum.parquet', filename=1) pq LIMIT 1 +SELECT pq.filename FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename=1) pq LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet # the string TRUE can be a column name query I -SELECT "TRUE" FROM read_parquet('data/parquet-testing/enum.parquet', filename='TRUE') pq LIMIT 1 +SELECT "TRUE" FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename='TRUE') pq LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet # FALSR too query I -SELECT "FALSE" FROM read_parquet('data/parquet-testing/enum.parquet', filename='FALSE') pq LIMIT 1 +SELECT "FALSE" FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename='FALSE') pq LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet # this is the output without an additional filename column query IIIIIII nosort q0 -SELECT * FROM read_parquet('data/parquet-testing/enum.parquet') +SELECT * FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet') ---- # this shouldn't somehow add a column with the name false/0/FALSE query IIIIIII nosort q0 -SELECT * FROM read_parquet('data/parquet-testing/enum.parquet', filename=false) +SELECT * FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename=false) ---- query IIIIIII nosort q0 -SELECT * FROM read_parquet('data/parquet-testing/enum.parquet', filename=0) +SELECT * FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename=0) ---- # cool names work too query I -SELECT my_cool_filename FROM read_parquet('data/parquet-testing/enum.parquet', filename='my_cool_filename') LIMIT 1 +SELECT my_cool_filename FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename='my_cool_filename') LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet query I -SELECT my_cool_filename FROM read_parquet('data/parquet-testing/enum.parquet', filename=my_cool_filename) LIMIT 1 +SELECT my_cool_filename FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', filename=my_cool_filename) LIMIT 1 ---- -data/parquet-testing/enum.parquet +{DATA_DIR}/parquet-testing/enum.parquet query III -select file_name[22:], row_group_id, bloom_filter_excludes from parquet_bloom_probe('data/parquet-testing/multi_bloom_*.parquet', 'a', 1) +SELECT parse_filename(file_name), row_group_id, bloom_filter_excludes FROM parquet_bloom_probe('{DATA_DIR}/parquet-testing/multi_bloom_*.parquet', 'a', 1) ---- multi_bloom_a.parquet 0 false multi_bloom_b.parquet 0 true -multi_bloom_c.parquet 0 true \ No newline at end of file +multi_bloom_c.parquet 0 true diff --git a/test/parquet/test_legacy_empty_pandas_parquet.test b/test/parquet/test_legacy_empty_pandas_parquet.test index 5c0df8ca1b63..4cf277001bcc 100644 --- a/test/parquet/test_legacy_empty_pandas_parquet.test +++ b/test/parquet/test_legacy_empty_pandas_parquet.test @@ -5,5 +5,6 @@ require parquet # This file includes the unsupported NULL (24) ConvertedType # Which is not supported by the spec, but written by some ancient versions of Pandas (pre-2020) + statement ok -select * from 'data/parquet-testing/empty.parquet' +select * from '{DATA_DIR}/parquet-testing/empty.parquet' diff --git a/test/parquet/test_parquet_reader.test_slow b/test/parquet/test_parquet_reader.test_slow index 2ae1518446ff..6dd9354a83c3 100644 --- a/test/parquet/test_parquet_reader.test_slow +++ b/test/parquet/test_parquet_reader.test_slow @@ -1,5 +1,5 @@ # name: test/parquet/test_parquet_reader.test_slow -# description: Test Parquet Reader with files on data/parquet-testing +# description: Test Parquet Reader with files on $TEST_DATA_LOC/parquet-testing # group: [parquet] require parquet @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query I -SELECT * FROM parquet_scan('data/parquet-testing/manyrowgroups.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups.parquet') limit 50; ---- 42 42 @@ -62,7 +62,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/manyrowgroups.parquet') limit 5 90 query I -SELECT * FROM parquet_scan('data/parquet-testing/map.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/map.parquet') limit 50; ---- {Content-Encoding=gzip, X-Frame-Options=SAMEORIGIN, Connection=keep-alive, Via='1.1 ip-10-1-1-216.ec2.internal (squid/4.10-20200322-r358ad2fdf)', X-Xss-Protection='1; mode=block', Content-Type='text/html;charset=utf-8', Date='Sat, 30 Jan 2021 16:19:57 GMT', X-Cache=MISS from ip-10-1-1-216.ec2.internal, Vary=Accept-Encoding, Server=nginx/1.10.3, X-Cache-Lookup='HIT from ip-10-1-1-216.ec2.internal:3128', X-Content-Type-Options=nosniff, Content-Length=921} {Content-Encoding=gzip, X-Frame-Options=SAMEORIGIN, Connection=keep-alive, Via='1.1 ip-10-1-1-216.ec2.internal (squid/4.10-20200322-r358ad2fdf)', X-Xss-Protection='1; mode=block', Content-Type='text/html;charset=utf-8', Date='Sat, 30 Jan 2021 16:19:59 GMT', X-Cache=MISS from ip-10-1-1-216.ec2.internal, Vary=Accept-Encoding, Server=nginx/1.10.3, X-Cache-Lookup='HIT from ip-10-1-1-216.ec2.internal:3128', X-Content-Type-Options=nosniff, Content-Length=922} @@ -116,7 +116,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/map.parquet') limit 50; {Content-Encoding=gzip, X-Frame-Options=SAMEORIGIN, Connection=keep-alive, Via='1.1 ip-10-1-1-216.ec2.internal (squid/4.10-20200322-r358ad2fdf)', X-Xss-Protection='1; mode=block', Content-Type='text/html;charset=utf-8', Date='Sat, 30 Jan 2021 16:20:53 GMT', X-Cache=MISS from ip-10-1-1-216.ec2.internal, Vary=Accept-Encoding, Server=nginx/1.10.3, X-Cache-Lookup='HIT from ip-10-1-1-216.ec2.internal:3128', X-Content-Type-Options=nosniff, Content-Length=891} query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/int32_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/int32_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -144,12 +144,12 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/int32_decimal.parquet') l 24.00 query IIIIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nonnullable.impala.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nonnullable.impala.parquet') limit 50; ---- 8 [-1] [[-1, -2], []] {k1=-1} [{}, {k1=1}, {}, {}] {'a': -1, 'B': [-1], 'c': {'D': [[{'e': -1, 'f': nonnullable}]]}, 'G': {}} query IIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/bug687_nulls.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug687_nulls.parquet') limit 50; ---- NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 95 42 39 49 16 34 82 7 40 82 @@ -203,7 +203,7 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 4 94 21 52 43 32 43 51 56 60 query IIII -SELECT * FROM parquet_scan('data/parquet-testing/bug1554.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1554.parquet') limit 50; ---- 1584883:SSuWRbZnFmIqCUBQYxk9+48fdIwywjfQUyfcKP+pbJhaqWS+UZh0Sua8VNJKlQpIlRzyWr57xyrqTh2ZgIQnxQ== False NULL 200 1584883:VduFa/R/CL7CbbEUmdFKysh80R38hXdrfuDlFhsa5mU3G3vfUDiQdTR0H0LzJzWojUDGgUr+hKp55VRRXMxaaQ== False NULL 200 @@ -257,19 +257,19 @@ SELECT * FROM parquet_scan('data/parquet-testing/bug1554.parquet') limit 50; 1584883:sV6uqASHK17GJVEXh2mxbbRIk08qivvqS561cy09Zn+SCUMHZL7J/BLRsx0/kYi1Uzkh52SsocpbQzuYeRT+lQ== False NULL 200 query IIIII -SELECT * FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet') limit 50; ---- 250 values hashing to 559e365557478feb96ab11fdf474e74d query II -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nested_lists.snappy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nested_lists.snappy.parquet') limit 50; ---- [[[a, b], [c]], [NULL, [d]]] 1 [[[a, b], [c, d]], [NULL, [e]]] 1 [[[a, b], [c, d], [e]], [NULL, [f]]] 1 query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nulls.snappy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nulls.snappy.parquet') limit 50; ---- {'b_c_int': NULL} {'b_c_int': NULL} @@ -281,14 +281,14 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/nulls.snappy.parquet') li {'b_c_int': NULL} query III -SELECT * FROM parquet_scan('data/parquet-testing/nan-float.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/nan-float.parquet') limit 50; ---- -1.0 foo True inf bar False 2.5 baz True query I -SELECT * FROM parquet_scan('data/parquet-testing/manyrowgroups2.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups2.parquet') limit 50; ---- 42 42 @@ -342,13 +342,13 @@ SELECT * FROM parquet_scan('data/parquet-testing/manyrowgroups2.parquet') limit 90 query I -SELECT * FROM parquet_scan('data/parquet-testing/struct.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/struct.parquet') limit 50; ---- {'str_field': hello, 'f64_field': NULL} {'str_field': NULL, 'f64_field': 1.23} query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/byte_array_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/byte_array_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -376,25 +376,25 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/byte_array_decimal.parque 24.00 query II -SELECT * FROM parquet_scan('data/parquet-testing/arrow/list_columns.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/list_columns.parquet') limit 50; ---- [1, 2, 3] [abc, efg, hij] [NULL, 1] NULL [4] [efg, NULL, hij, xyz] query I -SELECT * FROM parquet_scan('data/parquet-testing/timestamp-ms.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/timestamp-ms.parquet') limit 50; ---- 2020-10-05 17:21:49 query IIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_dictionary.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet') limit 50; ---- 0 True 0 0 0 0 0.0 0.0 01/01/09 0 2009-01-01 00:00:00 1 False 1 1 1 10 1.100000023841858 10.1 01/01/09 1 2009-01-01 00:01:00 query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/binary.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/binary.parquet') limit 50; ---- \x00 \x01 @@ -410,7 +410,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/binary.parquet') limit 50 \x0B query IIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nation.dict-malformed.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nation.dict-malformed.parquet') limit 50; ---- 0 ALGERIA 0 haggle. carefully final deposits detect slyly agai 1 ARGENTINA 1 al foxes promise slyly according to the regular accounts. bold requests alon @@ -439,7 +439,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/nation.dict-malformed.par 24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be query IIIIIIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet') limit 50; ---- 1 155190 7706 1 17 21168.23 0.04 0.02 N O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON TRUCK egular courts above the 1 67310 7311 2 36 45983.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL ly final dependencies: slyly bold @@ -493,7 +493,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet' 39 2320 9821 1 44 53782.08 0.09 0.06 N O 1996-11-14 1996-12-15 1996-12-12 COLLECT COD RAIL eodolites. careful query III -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nested_maps.snappy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nested_maps.snappy.parquet') limit 50; ---- {a={1=true, 2=false}} 1 1.000000 {b={1=true}} 1 1.000000 @@ -503,7 +503,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/nested_maps.snappy.parque {f={3=true, 4=false, 5=true}} 1 1.000000 query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/dict-page-offset-zero.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/dict-page-offset-zero.parquet') limit 50; ---- 1552 1552 @@ -546,32 +546,32 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/dict-page-offset-zero.par 1552 query III -SELECT * FROM parquet_scan('data/parquet-testing/silly-names.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/silly-names.parquet') limit 50; ---- 1 foo True 2 bar False 3 baz True query III -SELECT * FROM parquet_scan('data/parquet-testing/zstd.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/zstd.parquet') limit 50; ---- -1.0 foo True NULL bar False 2.5 baz True query I -SELECT * FROM parquet_scan('data/parquet-testing/bug1618_struct_strings.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1618_struct_strings.parquet') limit 50; ---- {'str_field': hello, 'f64_field': NULL} {'str_field': NULL, 'f64_field': 1.23} query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/single_nan.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/single_nan.parquet') limit 50; ---- NULL query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/int64_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/int64_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -599,7 +599,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/int64_decimal.parquet') l 24.00 query IIIIIIIIIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/filter_bug1391.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/filter_bug1391.parquet') limit 50; ---- 98 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 XYZ 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 275 DDU Emergency & General Surgery 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 7 Ward/Unit 3 Department 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 CODE 13 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 XYZ 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 275 DDU Emergency & General Surgery 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 7 Ward/Unit 3 Department 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 CODE @@ -653,7 +653,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/filter_bug1391.parquet') limit 257 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 XYZ 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 141 Therapies CBU 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 7 Ward/Unit NULL NULL 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 1900-01-01 00:00:00.000 9999-12-31 00:00:00.000 CODE query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/fixed_length_decimal_legacy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/fixed_length_decimal_legacy.parquet') limit 50; ---- 1.00 2.00 @@ -681,12 +681,12 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/fixed_length_decimal_lega 24.00 query I -SELECT * FROM parquet_scan('data/parquet-testing/timestamp.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/timestamp.parquet') limit 50; ---- 2020-10-05 17:21:49.48844 query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/fixed_length_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/fixed_length_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -714,7 +714,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/fixed_length_decimal.parq 24.00 query IIIIIIIIIIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/leftdate3_192_loop_1.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/leftdate3_192_loop_1.parquet') limit 50; ---- 00080010 10006 22156 0.0 1.0 3743.0 1925-12-31 109.0 109.75 109.0 400.0 NULL 109.375 109.5 600.0 7.412625 7.26 109.75 NULL NULL 00299090 10022 22158 0.0 1.0 3420.0 1925-12-31 55.0 56.0 56.0 3400.0 NULL 56.0 56.25 200.0 9.365437 9.365437 55.125 NULL NULL @@ -768,14 +768,14 @@ SELECT * FROM parquet_scan('data/parquet-testing/leftdate3_192_loop_1.parquet') 04557310 10559 22202 0.0 1.0 5311.0 1925-12-31 53.0 54.125 53.0 3800.0 NULL 52.75 53.0 599.0 18.0 18.0 53.5 NULL NULL query III -SELECT * FROM parquet_scan('data/parquet-testing/blob.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/blob.parquet') limit 50; ---- 1 \x04\x00 str1 2 \x04\x00\x80 str2 3 \x03\xFF\x00\xFF str3 query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/bug1588.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1588.parquet') limit 50; ---- 1621259:e1WMOfPKh7EnuBJ+dG3V8mksk2NSFL8m7vLi1NPB3xk6NzQI4Dfqs3Ok2GmTRXqMqo3oc7T3ckM0/uTs/e4nVg== 1621259 e1WMOfPKh7EnuBJ+dG3V8mksk2NSFL8m7vLi1NPB3xk6NzQI4Dfqs3Ok2GmTRXqMqo3oc7T3ckM0/uTs/e4nVg== a YiX2OkkxZvSMMT5TmbyZjlE8gCQSSmvxUrNBtLw1rWrs5cmxQNdTwPJgzgXNB3nF+1vaazrHwH32rnq67T7cHg== http://crawler-test.com/urls/page_url_length_n 0.66 200 Page URL Length wl8lmqkOqcTS8gbpoGaBf0ZxvleJXOCIrIDuQui6k7nhUPa5Xu/tupkmw0xYxSz8ByUNjMGgY0i0egvh6WNBBw== http://crawler-test.com/urls/page_url_length/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 0.0 200 Page URL Length (400 characters a Page URL Length (400 characters) False False True False NULL NULL http://crawler-test.com/urls/page_url_length_n YiX2OkkxZvSMMT5TmbyZjlE8gCQSSmvxUrNBtLw1rWrs5cmxQNdTwPJgzgXNB3nF+1vaazrHwH32rnq67T7cHg== http://crawler-test.com/urls/page_url_length/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx wl8lmqkOqcTS8gbpoGaBf0ZxvleJXOCIrIDuQui6k7nhUPa5Xu/tupkmw0xYxSz8ByUNjMGgY0i0egvh6WNBBw== False False False 2 2 False False False 1 doc 0 1621259:fCt7H+I6nGxXkRaRSYXL5kW5lkuXmaXAXiZdcm5dejcpwITcdVYNeYY0JQAJbDAOX2mjEOQX0Z/vvq9OAQDFUQ== 1621259 fCt7H+I6nGxXkRaRSYXL5kW5lkuXmaXAXiZdcm5dejcpwITcdVYNeYY0JQAJbDAOX2mjEOQX0Z/vvq9OAQDFUQ== a XnZecm8bhAZFA327isPnIwA+v6xejmC5P8/iQ0ax8ZMBhM/mCxes6Ugj8WsvwK6qKJteGGM5pLvdyBDhcaisyw== https://crawler-test.com/content/page_html_size_n 1.89 200 Page HTML Size hzYV7GhCYN6hhz7hXVTbyIct7YXRsyHzsCriciWYnQeMCzu9UKIsFAKdKf3yecmiDIVgYA/N8lLETj4AyprBmA== https://crawler-test.com/content/page_html_size/5 0.27 200 Page html Size (~5 KB) a Page HTML Size (~5 KB) False False True False NULL NULL https://crawler-test.com/content/page_html_size_n XnZecm8bhAZFA327isPnIwA+v6xejmC5P8/iQ0ax8ZMBhM/mCxes6Ugj8WsvwK6qKJteGGM5pLvdyBDhcaisyw== https://crawler-test.com/content/page_html_size/5 hzYV7GhCYN6hhz7hXVTbyIct7YXRsyHzsCriciWYnQeMCzu9UKIsFAKdKf3yecmiDIVgYA/N8lLETj4AyprBmA== True True False 2 2 False False False 1 doc 0 @@ -829,7 +829,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/bug1588.parquet') limit 50; 1621259:QQ/wWoUYYMJETokJJxVZSlqTHUMifVE7YGZcGNvLQkWH4CpFqgXBlt/ABSfSJs3zBJL9gitT6ABEmV5MWFwOUQ== 1621259 QQ/wWoUYYMJETokJJxVZSlqTHUMifVE7YGZcGNvLQkWH4CpFqgXBlt/ABSfSJs3zBJL9gitT6ABEmV5MWFwOUQ== a 0FgjuY6ilewP5/Tv2RHwt9A66vuidBx3jYV1+sEb5Rcld/ErHsWinoZyRnlrirqQiLdBjxv9zCe/9i9wj+FPkg== https://crawler-test.com/titles/page_title_length_n 1.89 200 Page Title Length /yh69joQhtpoZX9pPVfv7cbB3acvTzB22nfHIpCNP1cgfcZC3BnkcflsLTVeRY5xZ1qDpYf5fGlQMu72PhPOxQ== https://crawler-test.com/titles/page_title_length/2 0.27 200 xx a Page Title Length (2 characters) False False True False NULL NULL https://crawler-test.com/titles/page_title_length_n 0FgjuY6ilewP5/Tv2RHwt9A66vuidBx3jYV1+sEb5Rcld/ErHsWinoZyRnlrirqQiLdBjxv9zCe/9i9wj+FPkg== https://crawler-test.com/titles/page_title_length/2 /yh69joQhtpoZX9pPVfv7cbB3acvTzB22nfHIpCNP1cgfcZC3BnkcflsLTVeRY5xZ1qDpYf5fGlQMu72PhPOxQ== True True False 2 2 False False False 1 doc 0 query II -SELECT * FROM parquet_scan('data/parquet-testing/bug1589.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1589.parquet') limit 50; ---- 200 NULL 300 NULL @@ -885,7 +885,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/bug1589.parquet') limit 50; mode skip query I -SELECT * FROM parquet_scan('data/parquet-testing/arrow/hadoop_lz4_compressed_larger.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/hadoop_lz4_compressed_larger.parquet') limit 50; ---- c7ce6bef-d5b0-4863-b199-8ea8c7fb117b e8fb9197-cb9f-4118-b67f-fbfa65f61843 @@ -941,7 +941,7 @@ c50e8ade-6051-436f-a26e-acc9c0594be5 mode skip query III -SELECT * FROM parquet_scan('data/parquet-testing/arrow/non_hadoop_lz4_compressed.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/non_hadoop_lz4_compressed.parquet') limit 50; ---- 1593604800 abc 42.0 1593604800 def 7.7 @@ -951,7 +951,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/non_hadoop_lz4_compressed mode unskip query IIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') limit 50; ---- 4 True 0 0 0 0 0.0 0.0 03/01/09 0 2009-03-01 00:00:00 5 False 1 1 1 10 1.100000023841858 10.1 03/01/09 1 2009-03-01 00:01:00 @@ -963,7 +963,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') 1 False 1 1 1 10 1.100000023841858 10.1 01/01/09 1 2009-01-01 00:01:00 query II -SELECT * FROM parquet_scan('data/parquet-testing/arrow/repeated_no_annotation.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/repeated_no_annotation.parquet') limit 50; ---- 1 NULL 2 NULL @@ -973,7 +973,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/repeated_no_annotation.pa 6 {'phone': [{'number': 1111111111, 'kind': home}, {'number': 2222222222, 'kind': NULL}, {'number': 3333333333, 'kind': mobile}]} query IIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/data-types.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/data-types.parquet') limit 50; ---- NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 42 43 44 45 4.599999904632568 4.7 4.80 49 50 True 2019-11-26 20:11:42.501 2020-01-10 @@ -982,7 +982,7 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL query IIII -SELECT * FROM parquet_scan('data/parquet-testing/unsigned.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/unsigned.parquet') limit 50; ---- 1 1 1 1 2 2 2 2 @@ -992,13 +992,13 @@ SELECT * FROM parquet_scan('data/parquet-testing/unsigned.parquet') limit 50; 255 65535 4294967295 18446744073709551615 query I -SELECT * FROM parquet_scan('data/parquet-testing/pandas-date.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/pandas-date.parquet') limit 50; ---- 2021-01-12 1921-12-24 query I -SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') limit 50; ---- 1970-01-01 1971-01-01 @@ -1038,7 +1038,7 @@ NULL 1997-01-01 query IIIIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nullable.impala.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nullable.impala.parquet') limit 50; ---- 1 [1, 2, 3] [[1, 2], [3, 4]] {k1=1, k2=100} [{k1=1}] {'A': 1, 'b': [1], 'C': {'d': [[{'E': 10, 'F': aaa}, {'E': -10, 'F': bbb}], [{'E': 11, 'F': c}]]}, 'g': {foo={'H': {'i': [1.100000]}}}} 2 [NULL, 1, 2, NULL, 3, NULL] [[NULL, 1, 2, NULL], [3, NULL, 4], [], NULL] {k1=2, k2=NULL} [{k3=NULL, k1=1}, NULL, {}] {'A': NULL, 'b': [NULL], 'C': {'d': [[{'E': NULL, 'F': NULL}, {'E': 10, 'F': aaa}, {'E': NULL, 'F': NULL}, {'E': -10, 'F': bbb}, {'E': NULL, 'F': NULL}], [{'E': 11, 'F': c}, NULL], [], NULL]}, 'g': {g1={'H': {'i': [2.200000, NULL]}}, g2={'H': {'i': []}}, g3=NULL, g4={'H': {'i': NULL}}, g5={'H': NULL}}} @@ -1051,7 +1051,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/nullable.impala.parquet') mode skip query III -SELECT * FROM parquet_scan('data/parquet-testing/arrow/hadoop_lz4_compressed.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/hadoop_lz4_compressed.parquet') limit 50; ---- 1593604800 abc 42.0 1593604800 def 7.7 @@ -1061,18 +1061,18 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/hadoop_lz4_compressed.par mode unskip query I -SELECT * FROM parquet_scan('data/parquet-testing/fixed.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/fixed.parquet') limit 50; ---- \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F query IIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.snappy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.snappy.parquet') limit 50; ---- 6 True 0 0 0 0 0.0 0.0 04/01/09 0 2009-04-01 00:00:00 7 False 1 1 1 10 1.100000023841858 10.1 04/01/09 1 2009-04-01 00:01:00 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/int32_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/int32_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -1100,7 +1100,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/int32_decimal.parquet') 24.00 query IIIII -SELECT * FROM parquet_scan('data/parquet-testing/decimal/pandas_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/pandas_decimal.parquet') limit 50; ---- 1234.0 12.34 12345.6789 123456789.98765433 922337203685477580700.92230685477500000 -1234.0 -12.34 -9765.4321 -987654321.12345680 -922337236854775807.92233720306854775 @@ -1110,7 +1110,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/pandas_decimal.parquet' NULL NULL NULL NULL 0E-17 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/arrow/byte_array_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/arrow/byte_array_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -1138,7 +1138,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/arrow/byte_array_decima 24.00 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/decimal_dc.parquet') limit 50 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/decimal_dc.parquet') limit 50 ---- NULL NULL @@ -1192,7 +1192,7 @@ NULL NULL query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/int64_decimal.parquet') limit 50 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/int64_decimal.parquet') limit 50 ---- 1.00 2.00 @@ -1220,7 +1220,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/int64_decimal.parquet') 24.00 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal_legacy.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/fixed_length_decimal_legacy.parquet') limit 50; ---- 1.00 2.00 @@ -1248,7 +1248,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal_le 24.00 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/fixed_length_decimal.parquet') limit 50; ---- 1.00 2.00 @@ -1276,34 +1276,34 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal.pa 24.00 query II -SELECT * FROM parquet_scan('data/parquet-testing/glob2/t1.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/glob2/t1.parquet') limit 50; ---- 1 hello query II -SELECT * FROM parquet_scan('data/parquet-testing/cache/cache1.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/cache/cache1.parquet') limit 50; ---- 1 hello query II -SELECT * FROM parquet_scan('data/parquet-testing/cache/cache2.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/cache/cache2.parquet') limit 50; ---- 0 10 1 20 2 30 query II -SELECT * FROM parquet_scan('data/parquet-testing/glob/t2.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/glob/t2.parquet') limit 50; ---- 1 hello query II -SELECT * FROM parquet_scan('data/parquet-testing/glob/t1.parquet') limit 50; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/glob/t1.parquet') limit 50; ---- 1 hello query III -SELECT * FROM parquet_scan('data/parquet-testing/bug2557.parquet') limit 10 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug2557.parquet') limit 10 ---- [adipiscing, elit] [267] [] [adipiscing, elit] [58, 146] [3105.735731, 7332.144961, 2693.459659, 2058.830347] @@ -1318,4 +1318,4 @@ SELECT * FROM parquet_scan('data/parquet-testing/bug2557.parquet') limit 10 statement ok -from 'data/parquet-testing/bug14120-dict-nulls-only.parquet'; \ No newline at end of file +from '{DATA_DIR}/parquet-testing/bug14120-dict-nulls-only.parquet'; diff --git a/test/parquet/test_parquet_reader_compression.test b/test/parquet/test_parquet_reader_compression.test index c50e804f2bdd..c9582fe26719 100644 --- a/test/parquet/test_parquet_reader_compression.test +++ b/test/parquet/test_parquet_reader_compression.test @@ -1,5 +1,5 @@ # name: test/parquet/test_parquet_reader_compression.test -# description: Test Parquet Reader with files on data/parquet-testing/compression +# description: Test Parquet Reader with files on $TEST_DATA_LOC/parquet-testing/compression # group: [parquet] require parquet @@ -7,7 +7,7 @@ require parquet foreach codec NONE SNAPPY GZIP ZSTD LZ4 BROTLI query IIII -SELECT * FROM parquet_scan('data/parquet-testing/compression/generated/data_page=1_${codec}.parquet', hive_partitioning=0) limit 50 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/compression/generated/data_page=1_${codec}.parquet', hive_partitioning=0) limit 50 ---- 0 20 {'string': foo, 'int': 22} [] 1 6 {'string': baz, 'int': 10} NULL @@ -41,7 +41,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/compression/generated/data_page 29 13 {'string': bar, 'int': 8} [40, 32, 9, 2, 2, 40, 7, 0, 32, 31, 11, 14, 4, 14, 40, 20, 29, 17, 41] query IIII -SELECT * FROM parquet_scan('data/parquet-testing/compression/generated/data_page=2_${codec}.parquet', hive_partitioning=0) limit 50 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/compression/generated/data_page=2_${codec}.parquet', hive_partitioning=0) limit 50 ---- 0 20 {'string': foo, 'int': 22} [] 1 6 {'string': baz, 'int': 10} NULL @@ -75,8 +75,8 @@ SELECT * FROM parquet_scan('data/parquet-testing/compression/generated/data_page 29 13 {'string': bar, 'int': 8} [40, 32, 9, 2, 2, 40, 7, 0, 32, 31, 11, 14, 4, 14, 40, 20, 29, 17, 41] query I -SELECT * FROM parquet_scan('data/parquet-testing/compression/empty_datapage_v2.snappy.parquet', hive_partitioning=0) limit 50 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/compression/empty_datapage_v2.snappy.parquet', hive_partitioning=0) limit 50 ---- NULL -endloop \ No newline at end of file +endloop diff --git a/test/parquet/test_parquet_schema.test b/test/parquet/test_parquet_schema.test index 76ba4cacf368..3ed4b849fee5 100644 --- a/test/parquet/test_parquet_schema.test +++ b/test/parquet/test_parquet_schema.test @@ -5,18 +5,18 @@ require parquet statement ok -COPY (SELECT 42::INTEGER i) TO '__TEST_DIR__/integers.parquet' (FIELD_IDS {i: 0}) +COPY (SELECT 42::INTEGER i) TO '{TEMP_DIR}/integers.parquet' (FIELD_IDS {i: 0}) statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map{}) +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map{}) ---- Invalid Input Error: 'schema' expects a STRUCT as the value type of the map # can't combine with union_by_name statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }, union_by_name=true) @@ -26,7 +26,7 @@ Binder Error: Parquet schema cannot be combined with union_by_name=true or hive_ # can't combine with hive_partitioning statement error SELECT * -FROM read_parquet('__TEST_DIR__/*.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/*.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }, hive_partitioning=true) @@ -37,13 +37,13 @@ statement ok COPY ( SELECT 1 i1, 3 i3, 4 i4, 5 i5 UNION ALL SELECT 2 i1, 3 i3, 4 i4, 5 i5 -) TO '__TEST_DIR__/partitioned' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}, PARTITION_BY i1, FORMAT parquet, WRITE_PARTITION_COLUMNS) +) TO '{TEMP_DIR}/partitioned' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}, PARTITION_BY i1, FORMAT parquet, WRITE_PARTITION_COLUMNS) # auto-detection of hive partitioning is enabled by default, # but automatically disabled when a schema is supplied, so this should succeed query IIII SELECT * -FROM read_parquet('__TEST_DIR__/partitioned/*/*.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/partitioned/*/*.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -58,14 +58,14 @@ statement error COPY ( SELECT 1 i1, 3 i3, 4 i4, 5 i5 UNION ALL SELECT 2 i1, 3 i3, 4 i4, 5 i5 -) TO '__TEST_DIR__/partitioned2' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}, PARTITION_BY i1, FORMAT parquet) +) TO '{TEMP_DIR}/partitioned2' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}, PARTITION_BY i1, FORMAT parquet) ---- Binder Error: Column name "i1" specified in FIELD_IDS not found. Consider using WRITE_PARTITION_COLUMNS if this column is a partition column. Available column names: # cannot duplicate field_ids statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 0: {name: 'new_column', type: 'UTINYINT', default_value: 43} }) @@ -75,7 +75,7 @@ Map keys must be unique # cannot duplicate column names statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'cool_column', type: 'BIGINT', default_value: NULL}, 1: {name: 'cool_column', type: 'UTINYINT', default_value: 43} }) pq @@ -85,7 +85,7 @@ Binder Error: table "pq" has duplicate column name "cool_column" # the supplied default value must be castable to the given type for that column statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'cool_column', type: 'BIGINT', default_value: NULL}, 1: {name: 'cool_column', type: 'UTINYINT', default_value: 'bla'} }) pq @@ -94,7 +94,7 @@ Binder Error: Unable to cast Parquet schema default_value "bla" to UTINYINT query IIIIII DESCRIBE SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }) @@ -104,7 +104,7 @@ new_column UTINYINT YES NULL NULL NULL query IIIIII DESCRIBE SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }, filename=true) @@ -113,19 +113,19 @@ renamed_i BIGINT YES NULL NULL NULL new_column UTINYINT YES NULL NULL NULL filename VARCHAR YES NULL NULL NULL -# we'll test if filename works on a persistent file otherwise __TEST_DIR__ will be different every time +# we'll test if filename works on a persistent file otherwise {TEMP_DIR} will be different every time query II SELECT * -FROM read_parquet('data/parquet-testing/enum.parquet', schema=map { +FROM read_parquet('{DATA_DIR}/parquet-testing/enum.parquet', schema=map { 1: {name: 'cool_column', type: 'VARCHAR', default_value: NULL} }, filename=true) LIMIT 1 ---- -1 data/parquet-testing/enum.parquet +1 {DATA_DIR}/parquet-testing/enum.parquet query II SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }) @@ -135,7 +135,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # we just get a cast error when we can't cast to the supplied type statement error SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'DATE', default_value: NULL} }) ---- @@ -143,11 +143,11 @@ Conversion Error # if we don't supply a field id, we can't refer to it using the schema parameter statement ok -COPY (SELECT 42::INTEGER i) TO '__TEST_DIR__/integers.parquet' +COPY (SELECT 42::INTEGER i) TO '{TEMP_DIR}/integers.parquet' query II SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 0: {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 1: {name: 'new_column', type: 'UTINYINT', default_value: 43} }) @@ -158,7 +158,7 @@ NULL 43 statement ok COPY ( SELECT 1 i1, 3 i3, 4 i4, 5 i5 -) TO '__TEST_DIR__/integers.parquet' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}) +) TO '{TEMP_DIR}/integers.parquet' (FIELD_IDS {i1: 5, i3: 3, i4: 2, i5: 1}) # this is purposely a bit confusing but we're: # 1. deleting field id 2 @@ -168,7 +168,7 @@ COPY ( # 5. upcasting them query IIII SELECT * -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -180,7 +180,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # projection still ok query I SELECT i1 -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -192,7 +192,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # we can still select virtual columns as well query III SELECT file_row_number, filename[-16:], i4 -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -204,7 +204,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # projection still, even with different generated columns query III SELECT file_row_number, filename[-16:], i4 -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -216,7 +216,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # count(*) still ok query I SELECT count(*) -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -228,7 +228,7 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { # combine with constant column query II SELECT i1, filename[-16:] -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -240,12 +240,12 @@ FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { statement ok COPY ( SELECT range % 4 g, range i FROM range(1000) -) TO '__TEST_DIR__/integers.parquet' (FIELD_IDS {g: 33, i: 42}) +) TO '{TEMP_DIR}/integers.parquet' (FIELD_IDS {g: 33, i: 42}) # let's also do a query with a filter and a downcast query II SELECT my_cool_group, sum(my_cool_value) -FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 33: {name: 'my_cool_group', type: 'UINTEGER', default_value: NULL}, 42: {name: 'my_cool_value', type: 'UINTEGER', default_value: NULL} }) @@ -268,7 +268,7 @@ COPY ( 3 i3, 4 i4, 5 i5 -) TO '__TEST_DIR__/multifile1.parquet' (FIELD_IDS { +) TO '{TEMP_DIR}/multifile1.parquet' (FIELD_IDS { i1: 5, i3: 3, i4: 2, @@ -288,7 +288,7 @@ COPY ( 3 j3, 4 j4, 5 j5 -) TO '__TEST_DIR__/multifile2.parquet' (FIELD_IDS { +) TO '{TEMP_DIR}/multifile2.parquet' (FIELD_IDS { j1: 1, j3: 2, j4: 3, @@ -297,7 +297,7 @@ COPY ( query IIIII SELECT i1, i3, i4, i5, filename[-18:] -FROM read_parquet('__TEST_DIR__/multifile*.parquet', schema=map { +FROM read_parquet('{TEMP_DIR}/multifile*.parquet', schema=map { 1: {name: 'i1', type: 'BIGINT', default_value: NULL}, 3: {name: 'i3', type: 'BIGINT', default_value: NULL}, 4: {name: 'i4', type: 'BIGINT', default_value: 2}, @@ -309,7 +309,7 @@ ORDER BY filename 1 4 5 NULL multifile2.parquet statement error -select * FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +select * FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { True: {name: 'my_cool_group', type: 'UINTEGER', default_value: NULL}, False: {name: 'my_cool_value', type: 'UINTEGER', default_value: NULL} }); @@ -317,7 +317,7 @@ select * FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { Invalid Input Error: 'schema' expects the value type of the map to be either INTEGER or VARCHAR, not BOOLEAN query II -SELECT alias(COLUMNS(*)) FROM read_parquet('__TEST_DIR__/integers.parquet', schema=map { +SELECT alias(COLUMNS(*)) FROM read_parquet('{TEMP_DIR}/integers.parquet', schema=map { 'i': {name: 'renamed_i', type: 'BIGINT', default_value: NULL}, 'j': {name: 'new_column', type: 'UTINYINT', default_value: 43} }) limit 1; @@ -326,10 +326,10 @@ renamed_i new_column # issue 15504 statement ok -COPY (select 1 as id, list_value('a', 'b', 'c') as arr, { key: 1, v1: 'a', v2: 'b' } as s) TO '__TEST_DIR__/15504.parquet' (field_ids { 'id': 0, 'arr': 1, 's': 2 }); +COPY (select 1 as id, list_value('a', 'b', 'c') as arr, { key: 1, v1: 'a', v2: 'b' } as s) TO '{TEMP_DIR}/15504.parquet' (field_ids { 'id': 0, 'arr': 1, 's': 2 }); query III -SELECT * FROM read_parquet('__TEST_DIR__/15504.parquet', schema=map { 0: { name: 'id', type: 'int32', default_value: NULL }, 1: { name: 'arr', type: 'varchar[]', default_value: NULL }, 2: { name: 's', type: 'STRUCT(key INT, v1 TEXT, v2 TEXT)', default_value: NULL } }); +SELECT * FROM read_parquet('{TEMP_DIR}/15504.parquet', schema=map { 0: { name: 'id', type: 'int32', default_value: NULL }, 1: { name: 'arr', type: 'varchar[]', default_value: NULL }, 2: { name: 's', type: 'STRUCT(key INT, v1 TEXT, v2 TEXT)', default_value: NULL } }); ---- 1 [a, b, c] {'key': 1, 'v1': a, 'v2': b} @@ -339,7 +339,7 @@ copy ( select x from generate_series(1,100) as g(x) -) to '__TEST_DIR__/16094.parquet' +) to '{TEMP_DIR}/16094.parquet' with ( field_ids {x: 1} ); @@ -349,7 +349,7 @@ select x, filename from read_parquet( - '__TEST_DIR__/16094.parquet', + '{TEMP_DIR}/16094.parquet', schema=map { 1: {name: 'x', type: 'int', default_value: NULL} }, diff --git a/test/parquet/timetz_parquet.test b/test/parquet/timetz_parquet.test index 84290efda703..00d279c65314 100644 --- a/test/parquet/timetz_parquet.test +++ b/test/parquet/timetz_parquet.test @@ -5,20 +5,20 @@ require parquet query I -select * from 'data/parquet-testing/timetz.parquet' ; +select * from '{DATA_DIR}/parquet-testing/timetz.parquet' ; ---- 14:30:00+00 11:35:00+00 01:59:00+00 query I -select COL_TIME from 'data/parquet-testing/date-with-timezone-int64.parquet' ; +select COL_TIME from '{DATA_DIR}/parquet-testing/date-with-timezone-int64.parquet' ; ---- 12:00:00+00 query II select pruefbahn_id, arbeits_beginn -from 'data/parquet-testing/timetz-nanos.parquet' +from '{DATA_DIR}/parquet-testing/timetz-nanos.parquet' where pruefbahn_id = '58981'; ---- 58981 07:20:00+00 @@ -39,7 +39,7 @@ where pruefbahn_id = '58981'; query I select col33 -from 'data/parquet-testing/negative-timetz.parquet'; +from '{DATA_DIR}/parquet-testing/negative-timetz.parquet'; ---- 20:08:21+00 09:01:00+00 diff --git a/test/parquet/variant/variant_basic.test b/test/parquet/variant/variant_basic.test index 668bccbeb4d4..329f58252ca3 100644 --- a/test/parquet/variant/variant_basic.test +++ b/test/parquet/variant/variant_basic.test @@ -5,223 +5,223 @@ require parquet # Array query II -from 'data/parquet-testing/variant_array_array_string_and_integer.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_array_string_and_integer.parquet'; ---- 1 [["string","iceberg",34],[34,null],[],["string","iceberg"],34] # String query II -from 'data/parquet-testing/variant_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_string.parquet'; ---- 1 "iceberg" # BOOL TRUE query II -from 'data/parquet-testing/variant_bool_true.parquet'; +from '{DATA_DIR}/parquet-testing/variant_bool_true.parquet'; ---- 1 true # Decimal4 query II -from 'data/parquet-testing/variant_decimal4_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_decimal4_positive.parquet'; ---- 1 "123456.789" # UUID query II -from 'data/parquet-testing/variant_uuid.parquet'; +from '{DATA_DIR}/parquet-testing/variant_uuid.parquet'; ---- 1 "f24f9b64-81fa-49d1-b74e-8c09a6e31c56" # Empty array query II -from 'data/parquet-testing/variant_array_empty.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_empty.parquet'; ---- 1 [] query II -from 'data/parquet-testing/variant_int16.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int16.parquet'; ---- 1 -1234 query II -from 'data/parquet-testing/variant_int32.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int32.parquet'; ---- 1 -12345 # Binary query II -from 'data/parquet-testing/variant_binary.parquet'; +from '{DATA_DIR}/parquet-testing/variant_binary.parquet'; ---- 1 "CgsMDQ==" # Decimal16 query II -from 'data/parquet-testing/variant_decimal16.parquet'; +from '{DATA_DIR}/parquet-testing/variant_decimal16.parquet'; ---- 1 "9876543210.123456789" query II -from 'data/parquet-testing/variant_int64.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int64.parquet'; ---- 1 -9876543210 # TIMESTAMP_NANOS_NTZ query II -from 'data/parquet-testing/variant_timestamp_nanos_ntz.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_nanos_ntz.parquet'; ---- 1 "1957-11-07 12:33:54.123456789" # Array of strings (2-dimensional) query II -from 'data/parquet-testing/variant_array_array_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_array_string.parquet'; ---- 1 [["string","iceberg"],["apple","banana"]] # TIMESTAMP_MICROS query II -from 'data/parquet-testing/variant_timestamp_micros.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_micros.parquet'; ---- 1 "1957-11-07 12:33:54.123456+00" # Object {'a': .., 'c': ...} query II -from 'data/parquet-testing/variant_object_primitives.parquet'; +from '{DATA_DIR}/parquet-testing/variant_object_primitives.parquet'; ---- 1 {"a":123456789,"c":"string"} query II -from 'data/parquet-testing/variant_timestamp_micros_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_micros_positive.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" query II -from 'data/parquet-testing/variant_int16_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int16_positive.parquet'; ---- 1 1234 query II -from 'data/parquet-testing/variant_time_ntz.parquet'; +from '{DATA_DIR}/parquet-testing/variant_time_ntz.parquet'; ---- 1 "12:33:54.123456" query II -from 'data/parquet-testing/variant_decimal16_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_decimal16_negative.parquet'; ---- 1 "-9876543210.123456789" query II -from 'data/parquet-testing/variant_timestamp_nanos1.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_nanos1.parquet'; ---- 1 "1957-11-07 12:33:54.123457+00" query II -from 'data/parquet-testing/variant_decimal8_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_decimal8_negative.parquet'; ---- 1 "-123456789.987654321" query II -from 'data/parquet-testing/variant_timestamp_micros_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_micros_negative.parquet'; ---- 1 "1957-11-07 12:33:54.123456" query II -from 'data/parquet-testing/variant_int8_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int8_positive.parquet'; ---- 1 34 query II -from 'data/parquet-testing/variant_timestamp_nanos2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_nanos2.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" query II -from 'data/parquet-testing/variant_int8_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int8_negative.parquet'; ---- 1 -34 query II -from 'data/parquet-testing/variant_array_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_string.parquet'; ---- 1 ["iceberg","string"] query II -from 'data/parquet-testing/variant_date_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_date_negative.parquet'; ---- 1 "1957-11-07" query II -from 'data/parquet-testing/variant_int64_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int64_positive.parquet'; ---- 1 9876543210 query II -from 'data/parquet-testing/variant_array_object_string_and_integer.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_object_string_and_integer.parquet'; ---- 1 [{"a":123456789,"c":"string"},{"a":123456789,"c":"string"},"iceberg",34] query II -from 'data/parquet-testing/variant_int32_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_int32_positive.parquet'; ---- 1 12345 query II -from 'data/parquet-testing/variant_double_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_double_negative.parquet'; ---- 1 -14.3 query II -from 'data/parquet-testing/variant_object_empty.parquet'; +from '{DATA_DIR}/parquet-testing/variant_object_empty.parquet'; ---- 1 {} query II -from 'data/parquet-testing/variant_null.parquet'; +from '{DATA_DIR}/parquet-testing/variant_null.parquet'; ---- 1 NULL # -10.11 in the test that it was generated from query II -from 'data/parquet-testing/variant_float_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_float_negative.parquet'; ---- 1 -10.109999656677246 query II -from 'data/parquet-testing/variant_object_string_and_array.parquet'; +from '{DATA_DIR}/parquet-testing/variant_object_string_and_array.parquet'; ---- 1 {"a":123456789,"c":["string","iceberg"]} query II -from 'data/parquet-testing/variant_object_null_and_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_object_null_and_string.parquet'; ---- 1 {"a":null,"d":"iceberg"} query II -from 'data/parquet-testing/variant_date_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_date_positive.parquet'; ---- 1 "2024-11-07" query II -from 'data/parquet-testing/variant_bool_false.parquet'; +from '{DATA_DIR}/parquet-testing/variant_bool_false.parquet'; ---- 1 false query II -from 'data/parquet-testing/variant_array_object_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_array_object_string.parquet'; ---- 1 [{"a":123456789,"c":"string"},{"a":123456789,"c":"string"}] query II -from 'data/parquet-testing/variant_decimal4_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_decimal4_negative.parquet'; ---- 1 "-123456.789" query II -from 'data/parquet-testing/variant_double_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_double_positive.parquet'; ---- 1 14.3 query II -from 'data/parquet-testing/variant_timestamp_micros_ntz_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_micros_ntz_positive.parquet'; ---- 1 "2024-11-07 12:33:54.123456" diff --git a/test/parquet/variant/variant_nanos_tz.test b/test/parquet/variant/variant_nanos_tz.test index f63581958a81..b4a47c1a1f7e 100644 --- a/test/parquet/variant/variant_nanos_tz.test +++ b/test/parquet/variant/variant_nanos_tz.test @@ -8,24 +8,24 @@ set variant_legacy_encoding=true; # Timestamp NS - negative (with timezone) (shredded) query II -from 'data/parquet-testing/variant_shredded_timestamp_nanos_tz_negative_no_logical_type.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_nanos_tz_negative_no_logical_type.parquet'; ---- 1 "1957-11-07 12:33:54.123457+00" # Timestamp NS - positive (with timezone) (shredded) query II -from 'data/parquet-testing/variant_shredded_timestamp_nanos_tz_positive_no_logical_type.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_nanos_tz_positive_no_logical_type.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" # Timestamp NS - positive (with timezone) (unshredded) query II -from 'data/parquet-testing/variant_timestamp_nanos_tz_positive_no_logical_type.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_nanos_tz_positive_no_logical_type.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" # Timestamp NS - negative (with timezone) (unshredded) query II -from 'data/parquet-testing/variant_timestamp_nanos_tz_negative_no_logical_type.parquet'; +from '{DATA_DIR}/parquet-testing/variant_timestamp_nanos_tz_negative_no_logical_type.parquet'; ---- 1 "1957-11-07 12:33:54.123457+00" diff --git a/test/parquet/variant/variant_nested_with_nulls.test b/test/parquet/variant/variant_nested_with_nulls.test index 91aa2e3fcfd1..6d42f27e3fe2 100644 --- a/test/parquet/variant/variant_nested_with_nulls.test +++ b/test/parquet/variant/variant_nested_with_nulls.test @@ -4,7 +4,7 @@ require parquet query IIIIII -describe from parquet_scan('data/parquet-testing/variant_unshredded_nested_nulls.parquet') +describe from parquet_scan('{DATA_DIR}/parquet-testing/variant_unshredded_nested_nulls.parquet') ---- id BIGINT YES NULL NULL NULL v STRUCT("value" BLOB, metadata BLOB) YES NULL NULL NULL @@ -19,7 +19,7 @@ set variant_legacy_encoding=true; # Now the variant column gets emitted as JSON query IIIIII -describe from parquet_scan('data/parquet-testing/variant_unshredded_nested_nulls.parquet') +describe from parquet_scan('{DATA_DIR}/parquet-testing/variant_unshredded_nested_nulls.parquet') ---- id BIGINT YES NULL NULL NULL v JSON YES NULL NULL NULL @@ -30,7 +30,7 @@ array_of_struct_of_variants STRUCT(v JSON)[] YES NULL NULL NULL struct_of_array_of_variants STRUCT(v JSON[]) YES NULL NULL NULL query IIIIIII -select * from parquet_scan('data/parquet-testing/variant_unshredded_nested_nulls.parquet') order by id limit 10; +select * from parquet_scan('{DATA_DIR}/parquet-testing/variant_unshredded_nested_nulls.parquet') order by id limit 10; ---- 0 {"key":0} ['{"key":0}', NULL, '{"key":0}', NULL, '{"key":0}'] {'v': '{"key":0}'} {0='{"key":0}', nullKey=NULL} [{'v': '{"key":0}'}, {'v': NULL}, NULL, {'v': '{"key":0}'}, NULL, {'v': '{"key":0}'}] {'v': [NULL, '{"key":0}']} 0 {"key":0} ['{"key":0}', NULL, '{"key":0}', NULL, '{"key":0}'] {'v': '{"key":0}'} {0='{"key":0}', nullKey=NULL} [{'v': '{"key":0}'}, {'v': NULL}, NULL, {'v': '{"key":0}'}, NULL, {'v': '{"key":0}'}] {'v': [NULL, '{"key":0}']} diff --git a/test/parquet/variant/variant_partially_shredded.test b/test/parquet/variant/variant_partially_shredded.test index 1e7ee7e68c2e..f7d6537e6dc4 100644 --- a/test/parquet/variant/variant_partially_shredded.test +++ b/test/parquet/variant/variant_partially_shredded.test @@ -4,186 +4,186 @@ require parquet query II nosort result -from 'data/parquet-testing/variant_partial_shredded0.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded0.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded1.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded1.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded2.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded3.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded3.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded4.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded4.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded5.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded5.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded6.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded6.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded7.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded7.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded8.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded8.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded9.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded9.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded10.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded10.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded11.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded11.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded12.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded12.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded13.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded13.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded14.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded14.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded15.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded15.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded16.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded16.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded17.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded17.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded18.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded18.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded19.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded19.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded20.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded20.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded21.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded21.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded22.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded22.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded23.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded23.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded24.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded24.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded25.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded25.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded26.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded26.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded27.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded27.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded28.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded28.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded29.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded29.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded30.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded30.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded31.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded31.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded32.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded32.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded33.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded33.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded34.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded34.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded35.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded35.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded36.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded36.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded37.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded37.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded38.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded38.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded39.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded39.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded40.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded40.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded41.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded41.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded42.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded42.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded43.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded43.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded44.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded44.parquet'; ---- query II nosort result -from 'data/parquet-testing/variant_partial_shredded45.parquet'; +from '{DATA_DIR}/parquet-testing/variant_partial_shredded45.parquet'; ---- diff --git a/test/parquet/variant/variant_shredded.test b/test/parquet/variant/variant_shredded.test index 46c4225f2565..69810f6a4f8e 100644 --- a/test/parquet/variant/variant_shredded.test +++ b/test/parquet/variant/variant_shredded.test @@ -5,206 +5,206 @@ require parquet # Timestamp NS - positive (no timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_nanos_ntz_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_nanos_ntz_positive.parquet'; ---- 1 "2024-11-07 12:33:54.123456789" # Float - negative query II -from 'data/parquet-testing/variant_shredded_float_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_float_negative.parquet'; ---- 1 -10.109999656677246 # Int64 - negative query II -from 'data/parquet-testing/variant_shredded_int64_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int64_negative.parquet'; ---- 1 -9876543210 # Decimal16 - negative query II -from 'data/parquet-testing/variant_shredded_decimal16_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal16_negative.parquet'; ---- 1 "-9876543210.123456789" # UUID query II -from 'data/parquet-testing/variant_shredded_uuid.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_uuid.parquet'; ---- 1 "f24f9b64-81fa-49d1-b74e-8c09a6e31c56" # Decimal4 - negative query II -from 'data/parquet-testing/variant_shredded_decimal4_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal4_negative.parquet'; ---- 1 "-123456.789" # Decimal4 - positive query II -from 'data/parquet-testing/variant_shredded_decimal4_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal4_positive.parquet'; ---- 1 "123456.789" # Timestamp Micros - negative (no timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_ntz_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_ntz_negative.parquet'; ---- 1 "1957-11-07 12:33:54.123456" # Date - negative query II -from 'data/parquet-testing/variant_shredded_date_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_date_negative.parquet'; ---- 1 "1957-11-07" # int8 - positive query II -from 'data/parquet-testing/variant_shredded_int8_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int8_positive.parquet'; ---- 1 34 # int16 - positive query II -from 'data/parquet-testing/variant_shredded_int16_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int16_positive.parquet'; ---- 1 1234 # decimal8 - negative query II -from 'data/parquet-testing/variant_shredded_decimal8_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal8_negative.parquet'; ---- 1 "-123456789.987654321" # string query II -from 'data/parquet-testing/variant_shredded_string.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_string.parquet'; ---- 1 "iceberg" # FIXME: this is actually a Timestamp Nanos - positive (with timezone) # Timestamp Micros - positive (with timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_tz_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_tz_positive.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" # binary query II -from 'data/parquet-testing/variant_shredded_binary.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_binary.parquet'; ---- 1 "CgsMDQ==" # float - positive query II -from 'data/parquet-testing/variant_shredded_float_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_float_positive.parquet'; ---- 1 10.109999656677246 # double - positive query II -from 'data/parquet-testing/variant_shredded_double_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_double_positive.parquet'; ---- 1 14.3 # decimal16 - positive query II -from 'data/parquet-testing/variant_shredded_decimal16_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal16_positive.parquet'; ---- 1 "9876543210.123456789" # Timestamp Micros - positive (no timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_ntz_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_ntz_positive.parquet'; ---- 1 "2024-11-07 12:33:54.123456" # int16 - negative query II -from 'data/parquet-testing/variant_shredded_int16_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int16_negative.parquet'; ---- 1 -1234 # Timestamp Micros - positive (with timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_tz_positive2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_tz_positive2.parquet'; ---- 1 "2024-11-07 12:33:54.123456+00" # Timestamp Micros - negative (with timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_tz_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_tz_negative.parquet'; ---- 1 "1957-11-07 12:33:54.123456+00" # decimal8 - positive query II -from 'data/parquet-testing/variant_shredded_decimal8_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_decimal8_positive.parquet'; ---- 1 "123456789.987654321" # Timestamp Nanos - negative (no timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_nanos_ntz_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_nanos_ntz_negative.parquet'; ---- 1 "1957-11-07 12:33:54.123456789" # int32 - positive query II -from 'data/parquet-testing/variant_shredded_int32_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int32_positive.parquet'; ---- 1 12345 # int32 - negative query II -from 'data/parquet-testing/variant_shredded_int32_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int32_negative.parquet'; ---- 1 -12345 # FIXME: this is actually a Timestamp Nanos - negative (with timezone) # Timestamp Micros - negative (with timezone) query II -from 'data/parquet-testing/variant_shredded_timestamp_micros_tz_negative2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_timestamp_micros_tz_negative2.parquet'; ---- 1 "1957-11-07 12:33:54.123457+00" # int8 - negative query II -from 'data/parquet-testing/variant_shredded_int8_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int8_negative.parquet'; ---- 1 -34 # Time Micros (no timezone) query II -from 'data/parquet-testing/variant_shredded_time_micros_ntz.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_time_micros_ntz.parquet'; ---- 1 "12:33:54.123456" # Date - positive query II -from 'data/parquet-testing/variant_shredded_date_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_date_positive.parquet'; ---- 1 "2024-11-07" # bool - true query II -from 'data/parquet-testing/variant_shredded_bool_true.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_bool_true.parquet'; ---- 1 true # int64 - positive query II -from 'data/parquet-testing/variant_shredded_int64_positive.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_int64_positive.parquet'; ---- 1 9876543210 # double - negative query II -from 'data/parquet-testing/variant_shredded_double_negative.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_double_negative.parquet'; ---- 1 -14.3 # bool - false query II -from 'data/parquet-testing/variant_shredded_bool_false.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_bool_false.parquet'; ---- 1 false diff --git a/test/parquet/variant/variant_shredded_nested.test b/test/parquet/variant/variant_shredded_nested.test index 22735cc3f057..31c1d42db40f 100644 --- a/test/parquet/variant/variant_shredded_nested.test +++ b/test/parquet/variant/variant_shredded_nested.test @@ -5,36 +5,36 @@ require parquet # Array query II -from 'data/parquet-testing/variant_shredded_array1.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_array1.parquet'; ---- 1 [["string","iceberg"],["apple","banana"]] # Array query II -from 'data/parquet-testing/variant_shredded_array2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_array2.parquet'; ---- 1 [{"a":123456789,"c":"string"},{"a":123456789,"c":"string"}] # Array query II -from 'data/parquet-testing/variant_shredded_array3.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_array3.parquet'; ---- 1 ["iceberg","string"] # Object query II -from 'data/parquet-testing/variant_shredded_object1.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_object1.parquet'; ---- 1 {"a":123456789,"c":"string"} # Object query II -from 'data/parquet-testing/variant_shredded_object2.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_object2.parquet'; ---- 1 {"a":null,"d":"iceberg"} # Object query II -from 'data/parquet-testing/variant_shredded_object3.parquet'; +from '{DATA_DIR}/parquet-testing/variant_shredded_object3.parquet'; ---- 1 {"a":123456789,"c":["string","iceberg"]} diff --git a/test/sql/aggregate/aggregates/test_approx_quantile.test b/test/sql/aggregate/aggregates/test_approx_quantile.test index b2bf96cbeaa2..451a6ed2f606 100644 --- a/test/sql/aggregate/aggregates/test_approx_quantile.test +++ b/test/sql/aggregate/aggregates/test_approx_quantile.test @@ -115,6 +115,19 @@ SELECT approx_quantile('1:02:03.000000+05:30'::TIMETZ, 0.5); ---- 01:02:42+05:30:39 +# TIME test, fixes issue 19874 +query I +SELECT + APPROX_QUANTILE(a, [0.43::float, 0.64::float, 0.7::float]) AS call_0_result +FROM ( + VALUES ( + (CAST('12:13:14' AS TIME)), + (CAST('12:13:14' AS TIME)), + (CAST('12:13:14' AS TIME)) + )) AS t(a); +---- +['12:13:14', '12:13:14', '12:13:14'] + # List versions query I SELECT [ diff --git a/test/sql/aggregate/aggregates/test_list_aggregate_function.test_slow b/test/sql/aggregate/aggregates/test_list_aggregate_function.test_slow index a40a337acc22..32ec8eba7418 100644 --- a/test/sql/aggregate/aggregates/test_list_aggregate_function.test_slow +++ b/test/sql/aggregate/aggregates/test_list_aggregate_function.test_slow @@ -161,7 +161,7 @@ SELECT LIST(s) FROM structs_strings GROUP BY g ORDER BY g require parquet statement ok -SELECT tconst, list(principals) as principals FROM parquet_scan('data/parquet-testing/bug3734.parquet') GROUP BY 1 limit 10; +SELECT tconst, list(principals) as principals FROM parquet_scan('{DATA_DIR}/parquet-testing/bug3734.parquet') GROUP BY 1 limit 10; # test all data types diff --git a/test/sql/aggregate/having/having_without_groupby.test b/test/sql/aggregate/having/having_without_groupby.test new file mode 100644 index 000000000000..c7f0a5e73301 --- /dev/null +++ b/test/sql/aggregate/having/having_without_groupby.test @@ -0,0 +1,42 @@ +# name: test/sql/aggregate/having/having_without_groupby.test +# group: [having] + +query I +SELECT 1 AS one FROM ( + values + (1,2), + (3,2) +) t(a, b) +HAVING 1 < 2; +---- +1 + +query I +SELECT 1 AS one FROM ( + values + (1,2), + (3,2) +) t(a, b) +HAVING false; +---- + +statement error +select a FROM ( + values + (1,2), + (3,2) +) t(a, b) +HAVING true +---- +column "a" must appear in the GROUP BY clause + +query I +select sum(a) FROM ( + values + (1,2), + (3,2) +) t(a, b) +HAVING true +---- +4 + diff --git a/test/sql/append/test_big_append_slow.test b/test/sql/append/test_big_append.test_slow similarity index 96% rename from test/sql/append/test_big_append_slow.test rename to test/sql/append/test_big_append.test_slow index b2bac1c3522d..c7d504c7e92f 100644 --- a/test/sql/append/test_big_append_slow.test +++ b/test/sql/append/test_big_append.test_slow @@ -1,4 +1,4 @@ -# name: test/sql/append/test_big_append_slow.test +# name: test/sql/append/test_big_append.test_slow # description: Test big append # group: [append] diff --git a/test/sql/attach/attach_copy.test b/test/sql/attach/attach_copy.test index 35b5e48f26a4..cb0d5b61cc77 100644 --- a/test/sql/attach/attach_copy.test +++ b/test/sql/attach/attach_copy.test @@ -9,22 +9,22 @@ statement ok CREATE TABLE db1.test(a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY db1.test FROM 'data/csv/test/test.csv'; +COPY db1.test FROM '{DATA_DIR}/csv/test/test.csv'; ---- 5000 query I -COPY db1.main.test FROM 'data/csv/test/test.csv'; +COPY db1.main.test FROM '{DATA_DIR}/csv/test/test.csv'; ---- 5000 statement ok -COPY db1.main.test TO '__TEST_DIR__/test.csv'; +COPY db1.main.test TO '{TEMP_DIR}/test.csv'; statement ok USE db1 query I -COPY test FROM 'data/csv/test/test.csv'; +COPY test FROM '{DATA_DIR}/csv/test/test.csv'; ---- 5000 diff --git a/test/sql/attach/attach_encrypted_db_key_test.test b/test/sql/attach/attach_encrypted_db_key_test.test index 951dc1f332c4..c6322b94c110 100644 --- a/test/sql/attach/attach_encrypted_db_key_test.test +++ b/test/sql/attach/attach_encrypted_db_key_test.test @@ -4,6 +4,9 @@ # workaround - alternative verify always forces the latest storage require no_alternative_verify +# We need httpfs to do encrypted writes +require httpfs + statement error ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY); ---- diff --git a/test/sql/attach/attach_encryption_block_header.test b/test/sql/attach/attach_encryption_block_header.test index 3843b9263a33..ffd55f3a63e0 100644 --- a/test/sql/attach/attach_encryption_block_header.test +++ b/test/sql/attach/attach_encryption_block_header.test @@ -4,6 +4,9 @@ # workaround - alternative verify always forces the latest storage require no_alternative_verify +# We need httpfs to do encrypted writes +require httpfs + statement error ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY ''); ---- @@ -14,6 +17,9 @@ ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY 42); ---- Binder Error: "42" is not a valid key. A key must be of type VARCHAR +# We need httpfs to write encrypted database files +require httpfs + statement ok ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY 'asdf'); diff --git a/test/sql/attach/attach_encryption_downgrade_prevention.test b/test/sql/attach/attach_encryption_downgrade_prevention.test new file mode 100644 index 000000000000..fd91169b64aa --- /dev/null +++ b/test/sql/attach/attach_encryption_downgrade_prevention.test @@ -0,0 +1,22 @@ +# name: test/sql/attach/attach_encryption_downgrade_prevention.test +# description: Ensure crypto cipher can not be downgraded to strip integrity checks +# group: [attach] + +load __TEST_DIR__/tmp.db + +require httpfs + +# This is unsafe: an attacker could manipulate +statement error +ATTACH 'data/attach_test/encrypted_ctr_key=abcde.db' as enc (ENCRYPTION_KEY 'abcde'); +---- +Catalog Error: Cannot open encrypted database "data/attach_test/encrypted_ctr_key=abcde.db" without explicitly specifying the encryption cipher for security reasons. Please make sure you understand the security implications and re-attach the database specifying the desired cipher. + +# For CTR we need to specify the cipher to ensure we don't accidentally downgrade the cipher +statement ok +ATTACH 'data/attach_test/encrypted_ctr_key=abcde.db' as enc1 (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'CTR'); + +# For GCM this is no problem +statement ok +ATTACH 'data/attach_test/encrypted_gcm_key=abcde.db' as enc2 (ENCRYPTION_KEY 'abcde'); + diff --git a/test/sql/attach/attach_encryption_fallback_readonly.test b/test/sql/attach/attach_encryption_fallback_readonly.test new file mode 100644 index 000000000000..fad7a4a9619e --- /dev/null +++ b/test/sql/attach/attach_encryption_fallback_readonly.test @@ -0,0 +1,71 @@ +# name: test/sql/attach/attach_encryption_fallback_readonly.test +# description: Ensure the fallback crypto implementation is read-only +# group: [attach] + +require vector_size 2048 + +load __TEST_DIR__/tmp.db + +require no_extension_autoloading "EXPECTED: This tests what happens when autoloading is disabled" + +# For the test, we disable auto-loading +statement ok +set autoinstall_known_extensions=false + +# First we try to read an encrypted +statement error +ATTACH 'data/attach_test/encrypted_gcm_key=abcde.db' as enc (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'GCM'); +---- +Invalid Configuration Error: The database is encrypted, but DuckDB currently has a read-only crypto module loaded. Either re-open the database using `ATTACH '..' (READONLY)`, or ensure httpfs is loaded using `LOAD httpfs`. + +# It works again by setting READONLY +statement ok +ATTACH 'data/attach_test/encrypted_gcm_key=abcde.db' as enc (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'GCM', READ_ONLY); + +query I +FROM enc.test ORDER BY value +---- +0 +1 +2 +3 +4 + +statement ok +DETACH enc + +# Creating a new table will also fail +statement error +ATTACH '__TEST_DIR__/test_write_only.db' as enc (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'GCM'); +---- +Invalid Configuration Error: The database was opened with encryption enabled, but DuckDB currently has a read-only crypto module loaded. Please re-open using READONLY, or ensure httpfs is loaded using `LOAD httpfs`. + +# Loading httpfs will solve all problems + +require httpfs + +statement ok +ATTACH 'data/attach_test/encrypted_gcm_key=abcde.db' as enc (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'GCM'); + +query I +FROM enc.test ORDER BY value +---- +0 +1 +2 +3 +4 + +statement ok +DETACH enc + +statement ok +ATTACH '__TEST_DIR__/test_write_only.db' as enc (ENCRYPTION_KEY 'abcde', ENCRYPTION_CIPHER 'GCM'); + +statement ok +CREATE TABLE enc.test AS SELECT 1 as a; + +query I +FROM enc.test +---- +1 diff --git a/test/sql/attach/remote_file_concurrently.test b/test/sql/attach/remote_file_concurrently.test new file mode 100644 index 000000000000..434ed23d5481 --- /dev/null +++ b/test/sql/attach/remote_file_concurrently.test @@ -0,0 +1,12 @@ +# name: test/sql/attach/remote_file_concurrently.test +# description: Concurrently attach the same read only database +# group: [attach] + +require httpfs + +statement ok con1 +ATTACH 'https://raw.githubusercontent.com/duckdb/duckdb/main/data/attach_test/attach.db' AS db; + +statement ok con2 +ATTACH 'https://raw.githubusercontent.com/duckdb/duckdb/main/data/attach_test/attach.db' AS db2; + diff --git a/test/sql/binder/function_chaining_19035.test b/test/sql/binder/function_chaining_19035.test index e0d2242b4299..3a2c1b1fb3b5 100644 --- a/test/sql/binder/function_chaining_19035.test +++ b/test/sql/binder/function_chaining_19035.test @@ -5,6 +5,35 @@ statement ok PRAGMA enable_verification +query I +with stage as ( + select + [1,2,3] as c1, + [4,5,6] as c2 +) +select + list_transform( + c1, + lambda x: list_bool_or( + list_transform( + c2, + lambda y: y == x + ) + ) + ) as c3 +from stage; +---- +[false, false, false] + +query I +select list_transform( + [1,2,3], + lambda x: list_max( + list_transform([4,5,6], lambda y: y + x) + )) as result; +---- +[7, 8, 9] + statement ok CREATE MACRO list_contains_macro(x, y) AS (SELECT list_contains(x, y)) diff --git a/test/sql/binder/old_implicit_cast_template.test b/test/sql/binder/old_implicit_cast_template.test new file mode 100644 index 000000000000..dc04b380e09f --- /dev/null +++ b/test/sql/binder/old_implicit_cast_template.test @@ -0,0 +1,22 @@ +# name: test/sql/binder/old_implicit_cast_template.test +# description: Test old_implicit_cast setting +# group: [binder] + +# Old-style casting should not select an overload by casting to string +# if there is a templated overload that fits better. + +statement ok +create table t1 as select * from values ('1-2', '3-4', 'a-z', NULL) as r(v); + +query I +SELECT list_extract(string_split(v, '-'), 1) FROM t1; +---- +1 + +statement ok +SET old_implicit_casting = true; + +query I +SELECT list_extract(string_split(v, '-'), 1) FROM t1; +---- +1 \ No newline at end of file diff --git a/test/sql/cast/test_boolean_cast.test b/test/sql/cast/test_boolean_cast.test index 73ac1d2bb61a..c530aa44892b 100644 --- a/test/sql/cast/test_boolean_cast.test +++ b/test/sql/cast/test_boolean_cast.test @@ -96,7 +96,7 @@ SELECT CAST(yes AS BOOLEAN) FROM tbl statement error SELECT CAST(yes AS BOOLEAN) ---- -Binder Error: Referenced column "yes" not found in FROM clause! +Binder Error: Referenced column "yes" was not found because the FROM clause query T SELECT CAST(CAST('12345' AS INTEGER) AS BOOLEAN) diff --git a/test/sql/catalog/function/test_macro_overloads.test b/test/sql/catalog/function/test_macro_overloads.test index 62e8fc7b2a23..32866ac9beb6 100644 --- a/test/sql/catalog/function/test_macro_overloads.test +++ b/test/sql/catalog/function/test_macro_overloads.test @@ -94,4 +94,4 @@ CREATE MACRO error_in_definition (a) AS a, (a, b) AS a + y ---- -Referenced column "y" not found in FROM clause +Referenced column "y" was not found because the FROM clause is missing diff --git a/test/sql/catalog/function/test_simple_macro.test b/test/sql/catalog/function/test_simple_macro.test index 8f02feeb43b5..83f586f78fad 100644 --- a/test/sql/catalog/function/test_simple_macro.test +++ b/test/sql/catalog/function/test_simple_macro.test @@ -100,7 +100,7 @@ DROP FUNCTION two; statement error CREATE MACRO add_macro(a) AS a + b ---- -column "b" not found +column "b" was not found because the FROM clause is missing statement ok CREATE MACRO add_macro(a, b) AS a + b diff --git a/test/sql/catalog/test_extension_suggestion.test b/test/sql/catalog/test_extension_suggestion.test index 9d88900f2680..a096c21f18de 100644 --- a/test/sql/catalog/test_extension_suggestion.test +++ b/test/sql/catalog/test_extension_suggestion.test @@ -7,6 +7,6 @@ require skip_reload require no_extension_autoloading "EXPECTED: This tests what happens when extension is not there" statement error -SELECT from_json('data/json/array_of_empty_arrays.json'); +SELECT from_json('{DATA_DIR}/json/array_of_empty_arrays.json'); ---- Catalog Error: Scalar Function with name "from_json" is not in the catalog, but it exists in the json extension. \ No newline at end of file diff --git a/test/sql/constraints/foreignkey/fk_19469.test b/test/sql/constraints/foreignkey/fk_19469.test new file mode 100644 index 000000000000..9cfb1b311750 --- /dev/null +++ b/test/sql/constraints/foreignkey/fk_19469.test @@ -0,0 +1,79 @@ +# name: test/sql/constraints/foreignkey/fk_19469.test +# description: Issue #19469: Error in constraint violation message when checking foreign key constraints. +# group: [foreignkey] + +statement ok +CREATE TABLE B (b1 INTEGER, +b2 INTEGER, +PRIMARY KEY(b1, b2)); + +statement ok +CREATE TABLE A (a1 VARCHAR(1), +a2 VARCHAR(1), +a3 VARCHAR(1), +a4 VARCHAR(1), +a5 INTEGER, +a6 INTEGER, +PRIMARY KEY(a1, a2), +UNIQUE(a3, a4), +FOREIGN KEY (a5, a6) REFERENCES B(b1, b2)); + +statement ok +INSERT INTO B (b1, b2) VALUES +(1, 2), +(2, 3), +(6, 7); + +statement error +INSERT INTO A (a1, a2, a3, a4, a5, a6) VALUES +('x', 'y', 'z', 'u', 1, 2), +('y', 'z', 'x', 'v', 1, 2), +('x', 'x', 'y', 'y', 2, 3), +('z', 'z', 'v', 'x', 4, 5); +---- +Constraint Error: Violates foreign key constraint because key "b1: 4, b2: 5" does not exist in the referenced table + +statement ok +CREATE TABLE C ( + c1 INTEGER, + c2 INTEGER, + c3 VARCHAR(1), + c4 VARCHAR(1), + PRIMARY KEY (c1, c2), + UNIQUE (c3, c4) +); + +statement ok +CREATE TABLE D ( + d1 INTEGER, + d2 INTEGER, + d3 VARCHAR(1), + d4 VARCHAR(1), + payload INTEGER, + FOREIGN KEY (d1, d2) REFERENCES C (c1, c2), + FOREIGN KEY (d3, d4) REFERENCES C (c3, c4) +); + +statement ok +INSERT INTO C VALUES +(0, 1, 'a', 'b'), +(1, 0, 'a', 'c'), +(2, 2, 'd', 'e'); + +statement ok +INSERT INTO D VALUES +(0, 1, 'a', 'b', 10), +(1, 0, 'a', 'c', 20), +(2, 2, 'd', 'e', 30); + +statement error +INSERT INTO D VALUES +(9, 9, 'a', 'b', 40); +---- +Constraint Error: Violates foreign key constraint because key "c1: 9, c2: 9" does not exist in the referenced table + +statement error +INSERT INTO D VALUES +(0, 1, 'x', 'y', 50); +---- +Constraint Error: Violates foreign key constraint because key "c3: x, c4: y" does not exist in the referenced table \ No newline at end of file diff --git a/test/sql/copy/csv/14512.test b/test/sql/copy/csv/14512.test index 4d008f809dc2..169a5965f445 100644 --- a/test/sql/copy/csv/14512.test +++ b/test/sql/copy/csv/14512.test @@ -6,23 +6,23 @@ statement ok PRAGMA enable_verification query II -FROM read_csv('data/csv/14512.csv', strict_mode=TRUE); +FROM read_csv('{DATA_DIR}/csv/14512.csv', strict_mode=TRUE); ---- onions , query I -select columns FROM sniff_csv('data/csv/14512.csv') +select columns FROM sniff_csv('{DATA_DIR}/csv/14512.csv') ---- [{'name': ingredients, 'type': VARCHAR}, {'name': item_tax_data, 'type': VARCHAR}] query IIIIIIIIIIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/14512_og.csv', strict_mode = false, delim = ',', quote = '"', escape = '"'); +FROM read_csv('{DATA_DIR}/csv/14512_og.csv', strict_mode = false, delim = ',', quote = '"', escape = '"'); ---- 00000579000098 13.99 EA PINE RIDGE CHENIN VOIGNIER 750.0 ML 1 13 NULL 1 NULL NULL NULL NULL NULL NULL DEFAULT BRAND NULL NULL NULL NULL BEER & WINE NULL NULL 7.25 {"sales_tax":{ "tax_type": "rate_percent", "value" :0.0725}} 00000609082001 3.99 EA MADELAINE MINI MILK CHOCOLATE TURKEY 1.0 OZ 1 13 NULL NULL NULL NULL NULL NULL NULL NULL MADELEINE NULL NULL NULL NULL CANDY NULL NULL 7.25 {"sales_tax":{ "tax_type": "rate_percent", "value" :0.0725}} 00817566020096 9.99 EA COTSWOLD EW 5.3 OZ 1 13 NULL NULL NULL NULL NULL NULL NULL NULL LONG CLAWSON NULL NULL NULL NULL DELI INGREDIENTS: DOUBLE GLOUCESTER CHEESE (PASTEURIZED MILK SALT ENZYMES DAIRY CULTURES ANNATTO EXTRACT AS A COLOR) RECONSTITUTED MINCED ONIONS (2%) DRIED CHIVES. CONTAINS: MILK THIS PRODUCT WAS PRODUCED IN AN ENVIRONMENT THAT ALSO USES PEANUTS TREE NUTS EGGS MILK WHEAT SOY FISH SHELLFISH AND SESAME. NULL 2.0 {"sales_tax":{ "tax_type": "rate_percent", "value" :0.02}} query I -select columns FROM sniff_csv('data/csv/14512_og.csv', strict_mode = false, delim = ',', quote = '"', escape = '"') +select columns FROM sniff_csv('{DATA_DIR}/csv/14512_og.csv', strict_mode = false, delim = ',', quote = '"', escape = '"') ---- [{'name': lookup_code, 'type': VARCHAR}, {'name': price, 'type': DOUBLE}, {'name': cost_unit, 'type': VARCHAR}, {'name': item_name, 'type': VARCHAR}, {'name': size, 'type': DOUBLE}, {'name': size_uom, 'type': VARCHAR}, {'name': available, 'type': BIGINT}, {'name': store_code, 'type': BIGINT}, {'name': private_label_item, 'type': VARCHAR}, {'name': alcoholic, 'type': BIGINT}, {'name': alcohol_by_volume, 'type': VARCHAR}, {'name': alcohol_type, 'type': VARCHAR}, {'name': nutri_info, 'type': VARCHAR}, {'name': allergens, 'type': VARCHAR}, {'name': balance_on_hand, 'type': VARCHAR}, {'name': blackout_times, 'type': VARCHAR}, {'name': brand_name, 'type': VARCHAR}, {'name': ca_prop65_text, 'type': VARCHAR}, {'name': ca_prop65_codes, 'type': VARCHAR}, {'name': configurable_products, 'type': VARCHAR}, {'name': country_of_origin, 'type': VARCHAR}, {'name': department, 'type': VARCHAR}, {'name': ingredients, 'type': VARCHAR}, {'name': item_details, 'type': VARCHAR}, {'name': tax_rate, 'type': DOUBLE}, {'name': item_tax_data, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/14874.test b/test/sql/copy/csv/14874.test index bd267a7d6e2b..c380e3e77982 100644 --- a/test/sql/copy/csv/14874.test +++ b/test/sql/copy/csv/14874.test @@ -8,12 +8,12 @@ statement ok PRAGMA enable_verification query I -SELECT count(*) FROM read_csv('data/csv/drug_exposure.csv'); +SELECT count(*) FROM read_csv('{DATA_DIR}/csv/drug_exposure.csv'); ---- 4113 query IIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM read_csv('data/csv/drug_exposure.csv') ORDER BY ALL limit 5; +SELECT * FROM read_csv('{DATA_DIR}/csv/drug_exposure.csv') ORDER BY ALL limit 5; ---- -9223335764168194396 1532249960797525190 43613338 2166-08-24 2166-08-24 08:00:00 2166-08-24 2166-08-24 23:00:00 NULL 32838 NULL NULL 200.0 NULL NULL 4171047 NULL NULL -8938795529793370194 NULL SW 100ml Bag 2000011398 IV mL -9212518512714808847 1484542834460282651 19008723 2161-11-16 2161-11-16 08:00:00 2161-11-19 2161-11-19 10:00:00 NULL 32838 NULL NULL 0.1 NULL NULL 4262914 NULL NULL 2567137523204385703 NULL 45802011222 45085123 NU TUBE diff --git a/test/sql/copy/csv/17738.test b/test/sql/copy/csv/17738.test index 493947a6a8c2..1594798dcd41 100644 --- a/test/sql/copy/csv/17738.test +++ b/test/sql/copy/csv/17738.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query I -FROM read_csv('data/csv/17738_rn.csv',header=False,skip=3, delim = ';'); +FROM read_csv('{DATA_DIR}/csv/17738_rn.csv',header=False,skip=3, delim = ';'); ---- xyz lorem ipsum @@ -21,7 +21,7 @@ Stephen,Tyler,"7452 Terrace ""At the Plaza"" road",SomeTown,SD, 91234 query I -FROM read_csv('data/csv/17738.csv',header=False,skip=3); +FROM read_csv('{DATA_DIR}/csv/17738.csv',header=False,skip=3); ---- xyz lorem ipsum @@ -35,7 +35,7 @@ Stephen,Tyler,"7452 Terrace ""At the Plaza"" road",SomeTown,SD, 91234 "Joan ""the bone"", Anne",Jet,"9th, at Terrace plc",Desert City,CO,00123 query I -FROM read_csv('data/csv/17738.csv',header=False,skip=4); +FROM read_csv('{DATA_DIR}/csv/17738.csv',header=False,skip=4); ---- lorem ipsum NULL @@ -48,7 +48,7 @@ Stephen,Tyler,"7452 Terrace ""At the Plaza"" road",SomeTown,SD, 91234 "Joan ""the bone"", Anne",Jet,"9th, at Terrace plc",Desert City,CO,00123 query IIIIII -FROM read_csv('data/csv/17738.csv',header=False,skip=7); +FROM read_csv('{DATA_DIR}/csv/17738.csv',header=False,skip=7); ---- John Doe 120 jefferson st. Riverside NJ 08075 Jack McGinnis 220 hobo Av. Phila PA 09119 diff --git a/test/sql/copy/csv/18579.test b/test/sql/copy/csv/18579.test index e66a7d803836..68a6cbd6271b 100644 --- a/test/sql/copy/csv/18579.test +++ b/test/sql/copy/csv/18579.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIIII -DESCRIBE FROM read_csv('data/csv/18579/*.csv', union_by_name = true); +DESCRIBE FROM read_csv('{DATA_DIR}/csv/18579/*.csv', union_by_name = true); ---- billingAccountName VARCHAR YES NULL NULL NULL partnerName VARCHAR YES NULL NULL NULL diff --git a/test/sql/copy/csv/19411.test_slow b/test/sql/copy/csv/19411.test_slow new file mode 100644 index 000000000000..70bf9e8fbeb3 --- /dev/null +++ b/test/sql/copy/csv/19411.test_slow @@ -0,0 +1,23 @@ +# name: test/sql/copy/csv/19411.test_slow +# description: Test for issue #19411 +# group: [csv] + +statement ok +PRAGMA enable_verification + +statement ok +COPY ( + FROM range(8000) + SELECT + rpad('', CAST(range AS INTEGER), '0') as test_string, + chr(CAST(range AS INTEGER)) as test_unicode, +) TO '__TEST_DIR__/fails.csv'; + + +query I nosort alltypes +FROM read_csv('__TEST_DIR__/fails.csv'); +---- + +query I nosort alltypes +FROM read_csv('__TEST_DIR__/fails.csv', strict_mode = false); +---- diff --git a/test/sql/copy/csv/19578.test b/test/sql/copy/csv/19578.test new file mode 100644 index 000000000000..6376c46f78b8 --- /dev/null +++ b/test/sql/copy/csv/19578.test @@ -0,0 +1,16 @@ +# name: test/sql/copy/csv/19578.test +# description: Test for issue #19758 +# group: [csv] + +statement ok +PRAGMA enable_verification + +query III +SELECT Delimiter, Quote, Escape FROM sniff_csv("data/19578.csv"); +---- +; " (empty) + +query III +SELECT Delimiter, Quote, Escape FROM sniff_csv("data/19578.csv", strict_mode=false); +---- +; " (empty) \ No newline at end of file diff --git a/test/sql/copy/csv/7702.test b/test/sql/copy/csv/7702.test index a3d3ad81ac33..60f014bfad63 100644 --- a/test/sql/copy/csv/7702.test +++ b/test/sql/copy/csv/7702.test @@ -6,11 +6,11 @@ statement ok PRAGMA enable_verification query I -SELECT count(*) FROM read_csv_auto( ['data/csv/error/mismatch/half1.csv', 'data/csv/error/mismatch/half2.csv'], ignore_errors=true, sample_size=1); +SELECT count(*) FROM read_csv_auto( ['{DATA_DIR}/csv/error/mismatch/half1.csv', '{DATA_DIR}/csv/error/mismatch/half2.csv'], ignore_errors=true, sample_size=1); ---- 9102 query I -SELECT count(*) FROM read_csv_auto( ['data/csv/error/mismatch/half2.csv', 'data/csv/error/mismatch/half1.csv'], ignore_errors=true, sample_size=1); +SELECT count(*) FROM read_csv_auto( ['{DATA_DIR}/csv/error/mismatch/half2.csv', '{DATA_DIR}/csv/error/mismatch/half1.csv'], ignore_errors=true, sample_size=1); ---- 9102 \ No newline at end of file diff --git a/test/sql/copy/csv/afl/fuzz_20250211_crash.test b/test/sql/copy/csv/afl/fuzz_20250211_crash.test index 7a10d16a002d..74a3ec81b111 100644 --- a/test/sql/copy/csv/afl/fuzz_20250211_crash.test +++ b/test/sql/copy/csv/afl/fuzz_20250211_crash.test @@ -6,5 +6,5 @@ statement ok PRAGMA enable_verification statement maybe -FROM read_csv('data/csv/afl/20250211_csv_fuzz_crash/case_53.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/20250211_csv_fuzz_crash/case_53.csv', buffer_size=42); ---- diff --git a/test/sql/copy/csv/afl/fuzz_20250226.test b/test/sql/copy/csv/afl/fuzz_20250226.test index 4fccccc59d73..8acaf796dc81 100644 --- a/test/sql/copy/csv/afl/fuzz_20250226.test +++ b/test/sql/copy/csv/afl/fuzz_20250226.test @@ -13,5 +13,5 @@ select count(file) from glob('./data/csv/afl/20250226_csv_fuzz_error/*'); 1 statement maybe -FROM read_csv('data/csv/afl/20250226_csv_fuzz_error/case_1.csv', force_not_null=012%0, columns={'a':'JSON'}); +FROM read_csv('{DATA_DIR}/csv/afl/20250226_csv_fuzz_error/case_1.csv', force_not_null=012%0, columns={'a':'JSON'}); ---- \ No newline at end of file diff --git a/test/sql/copy/csv/afl/test_afl_ignore_errors.test b/test/sql/copy/csv/afl/test_afl_ignore_errors.test index 5c6aa9b15aea..777e11e9fd81 100644 --- a/test/sql/copy/csv/afl/test_afl_ignore_errors.test +++ b/test/sql/copy/csv/afl/test_afl_ignore_errors.test @@ -8,7 +8,7 @@ PRAGMA enable_verification loop i 1 56 statement maybe -FROM read_csv('data/csv/afl/ignore_errors/${i}.csv', ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/afl/ignore_errors/${i}.csv', ignore_errors = true) ---- endloop \ No newline at end of file diff --git a/test/sql/copy/csv/afl/test_afl_no_parameter.test b/test/sql/copy/csv/afl/test_afl_no_parameter.test index d8e87a74fa64..a424dde1ed4a 100644 --- a/test/sql/copy/csv/afl/test_afl_no_parameter.test +++ b/test/sql/copy/csv/afl/test_afl_no_parameter.test @@ -8,7 +8,7 @@ PRAGMA enable_verification loop i 1 25 statement maybe -FROM 'data/csv/afl/no_parameter/${i}.csv' +FROM '{DATA_DIR}/csv/afl/no_parameter/${i}.csv' ---- endloop \ No newline at end of file diff --git a/test/sql/copy/csv/afl/test_afl_null_padding.test b/test/sql/copy/csv/afl/test_afl_null_padding.test index 7b406aa7a3e7..b83f85941d1b 100644 --- a/test/sql/copy/csv/afl/test_afl_null_padding.test +++ b/test/sql/copy/csv/afl/test_afl_null_padding.test @@ -8,7 +8,7 @@ PRAGMA enable_verification loop i 1 46 statement maybe -FROM read_csv('data/csv/afl/null_padding/${i}.csv', null_padding=true) +FROM read_csv('{DATA_DIR}/csv/afl/null_padding/${i}.csv', null_padding=true) ---- endloop \ No newline at end of file diff --git a/test/sql/copy/csv/afl/test_afl_skip.test b/test/sql/copy/csv/afl/test_afl_skip.test index 1763cd1433a9..06a2d1db5e24 100644 --- a/test/sql/copy/csv/afl/test_afl_skip.test +++ b/test/sql/copy/csv/afl/test_afl_skip.test @@ -8,7 +8,7 @@ PRAGMA enable_verification loop i 1 3 statement maybe -FROM read_csv('data/csv/afl/skip/${i}.csv', skip=1) +FROM read_csv('{DATA_DIR}/csv/afl/skip/${i}.csv', skip=1) ---- endloop \ No newline at end of file diff --git a/test/sql/copy/csv/afl/test_fuzz_3977.test b/test/sql/copy/csv/afl/test_fuzz_3977.test index 243861384772..e7a913db9100 100644 --- a/test/sql/copy/csv/afl/test_fuzz_3977.test +++ b/test/sql/copy/csv/afl/test_fuzz_3977.test @@ -10,355 +10,355 @@ select count(file) from glob('./data/csv/afl/3977/*'); 88 statement maybe -FROM read_csv('data/csv/afl/3977/case_1.csv', rejects_scan=0, buffer_size=655371, all_varchar=false, rejects_scan=0, buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_1.csv', rejects_scan=0, buffer_size=655371, all_varchar=false, rejects_scan=0, buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_2.csv', names=['a','b','c','d'], store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_2.csv', names=['a','b','c','d'], store_rejects=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_3.csv', names=['a','b','c','d'], store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_3.csv', names=['a','b','c','d'], store_rejects=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_4.csv', names=['a','b','c','d'], store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_4.csv', names=['a','b','c','d'], store_rejects=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_5.csv', auto_detect=false, columns={'a': 'VARCHAR'}, escape='"', header=false, quote='"', strict_mode=true, store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_5.csv', auto_detect=false, columns={'a': 'VARCHAR'}, escape='"', header=false, quote='"', strict_mode=true, store_rejects=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_6.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_6.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_7.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_7.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_8.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_8.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_9.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=false); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_9.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=false); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_10.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_10.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_11.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_11.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_12.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_12.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_13.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_13.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_14.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_14.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_15.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_15.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_16.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_16.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_17.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_17.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_18.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_18.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_19.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_19.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_20.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_20.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_21.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_21.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_22.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_22.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_23.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_23.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_24.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_24.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_25.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_25.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_26.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_26.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_27.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_27.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_28.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_28.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_29.csv', auto_detect=false, buffer_size=65536, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_29.csv', auto_detect=false, buffer_size=65536, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_30.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_30.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_31.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_31.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_32.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_32.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_33.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_33.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_34.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_34.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_35.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_35.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_36.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_36.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_37.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_37.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_38.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_38.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_39.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_39.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_40.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, comment=';', rejects_table='"', strict_mode=false); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_40.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, comment=';', rejects_table='"', strict_mode=false); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_41.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_41.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_42.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_42.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_43.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_43.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_44.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_44.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_45.csv', auto_detect=false, buffer_size=810, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_45.csv', auto_detect=false, buffer_size=810, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_46.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_46.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_47.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_47.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_48.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_48.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_49.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_49.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_50.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_50.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_51.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', '|':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_51.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', '|':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_52.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_52.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_53.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_53.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_54.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAr'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_54.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAr'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_55.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_55.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_56.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_56.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_57.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_57.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_58.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_58.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_59.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','"':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_59.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','"':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_60.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_60.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_61.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_61.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_62.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_62.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_63.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_63.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_64.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_64.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_65.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_65.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_66.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_66.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_67.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_67.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_68.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_68.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_69.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_69.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_70.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_70.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_71.csv', auto_detect=false, buffer_size=16711722, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_71.csv', auto_detect=false, buffer_size=16711722, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_72.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','F':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_72.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','F':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_73.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_73.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_74.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_74.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_75.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_75.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_76.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_76.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_77.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_77.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_78.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_78.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_79.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_79.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_80.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_80.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_81.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_81.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_82.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_82.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_83.csv', auto_detect=false, parallel=false, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_83.csv', auto_detect=false, parallel=false, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_84.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_84.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_85.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_85.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_86.csv', auto_detect=false, buffer_size=720938, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_86.csv', auto_detect=false, buffer_size=720938, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_87.csv', auto_detect=false, buffer_size=42, columns={'a2.0-22222222222222222.0222->>':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_87.csv', auto_detect=false, buffer_size=42, columns={'a2.0-22222222222222222.0222->>':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement maybe -FROM read_csv('data/csv/afl/3977/case_88.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); +FROM read_csv('{DATA_DIR}/csv/afl/3977/case_88.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', rejects_table='"', strict_mode=true); ---- statement ok diff --git a/test/sql/copy/csv/afl/test_fuzz_4086.test b/test/sql/copy/csv/afl/test_fuzz_4086.test index 23ed7869f1f0..9e3aa6415689 100644 --- a/test/sql/copy/csv/afl/test_fuzz_4086.test +++ b/test/sql/copy/csv/afl/test_fuzz_4086.test @@ -7,15 +7,15 @@ require json loop i 0 2 statement maybe -FROM read_csv('data/csv/afl/4086/case_1.csv', auto_detect=false, columns={'json': 'JSON'}, delim=NULL, buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); +FROM read_csv('{DATA_DIR}/csv/afl/4086/case_1.csv', auto_detect=false, columns={'json': 'JSON'}, delim=NULL, buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); ---- statement maybe -FROM read_csv('data/csv/afl/4086/case_2.csv', auto_detect=false, columns={'json': 'JSON'}, delim=NULL, buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); +FROM read_csv('{DATA_DIR}/csv/afl/4086/case_2.csv', auto_detect=false, columns={'json': 'JSON'}, delim=NULL, buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); ---- statement maybe -FROM read_csv('data/csv/afl/4086/case_3.csv', auto_detect=false, columns={'json': 'JSON'}, delim='\0', buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); +FROM read_csv('{DATA_DIR}/csv/afl/4086/case_3.csv', auto_detect=false, columns={'json': 'JSON'}, delim='\0', buffer_size=42, store_rejects=true, rejects_limit=658694493994253607); ---- statement ok diff --git a/test/sql/copy/csv/afl/test_fuzz_4172.test b/test/sql/copy/csv/afl/test_fuzz_4172.test index e22e66604e57..3c7ddb003e13 100644 --- a/test/sql/copy/csv/afl/test_fuzz_4172.test +++ b/test/sql/copy/csv/afl/test_fuzz_4172.test @@ -6,5 +6,5 @@ statement ok PRAGMA enable_verification statement maybe -FROM read_csv('data/csv/afl/4172/case_4.csv', ignore_errors=true, buffer_size=1, store_rejects=false); +FROM read_csv('{DATA_DIR}/csv/afl/4172/case_4.csv', ignore_errors=true, buffer_size=1, store_rejects=false); ---- diff --git a/test/sql/copy/csv/afl/test_fuzz_4496.test b/test/sql/copy/csv/afl/test_fuzz_4496.test index 58dd8b4e3e74..7df6d3193006 100644 --- a/test/sql/copy/csv/afl/test_fuzz_4496.test +++ b/test/sql/copy/csv/afl/test_fuzz_4496.test @@ -8,11 +8,11 @@ require json loop i 0 2 statement maybe -FROM read_csv('data/csv/afl/4496/crashes/case_0.csv', auto_detect=false, buffer_size=42, columns={'json': 'JSON'}, delim=NULL, rejects_limit=658694493994253607, store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/4496/crashes/case_0.csv', auto_detect=false, buffer_size=42, columns={'json': 'JSON'}, delim=NULL, rejects_limit=658694493994253607, store_rejects=true); ---- statement maybe -FROM read_csv('data/csv/afl/4496/crashes/case_1.csv', auto_detect=false, buffer_size=42, columns={'json': 'JSON'}, delim=NULL, rejects_limit=658694493994253607, store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/afl/4496/crashes/case_1.csv', auto_detect=false, buffer_size=42, columns={'json': 'JSON'}, delim=NULL, rejects_limit=658694493994253607, store_rejects=true); ---- statement ok diff --git a/test/sql/copy/csv/afl/test_fuzz_4793.test b/test/sql/copy/csv/afl/test_fuzz_4793.test index 4b586ebf6cf8..d0bcff286d92 100644 --- a/test/sql/copy/csv/afl/test_fuzz_4793.test +++ b/test/sql/copy/csv/afl/test_fuzz_4793.test @@ -9,10 +9,10 @@ PRAGMA enable_verification statement maybe -FROM read_csv('data/csv/afl/4793/crashes/case_0.csv', auto_detect=false, columns={'a':'varchar'}, delim='', encoding='latin-1', header=false, quote=''); +FROM read_csv('{DATA_DIR}/csv/afl/4793/crashes/case_0.csv', auto_detect=false, columns={'a':'varchar'}, delim='', encoding='latin-1', header=false, quote=''); ---- statement maybe -FROM read_csv('data/csv/afl/4793/crashes/case_1.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', escape='"', quote='"'); +FROM read_csv('{DATA_DIR}/csv/afl/4793/crashes/case_1.csv', auto_detect=false, buffer_size=42, columns={'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, delim=';', escape='"', quote='"'); ---- diff --git a/test/sql/copy/csv/afl/test_fuzz_5194.test b/test/sql/copy/csv/afl/test_fuzz_5194.test index 91f588d19a85..252f59bfb1ee 100644 --- a/test/sql/copy/csv/afl/test_fuzz_5194.test +++ b/test/sql/copy/csv/afl/test_fuzz_5194.test @@ -8,10 +8,10 @@ statement ok PRAGMA enable_verification statement maybe -FROM read_csv('data/csv/afl/5194/crashes/case_0.csv', auto_detect=false, buffer_size=8, columns={'a': 'integer','b': 'integer','c': 'integer'}, header=true, maximum_line_size=0); +FROM read_csv('{DATA_DIR}/csv/afl/5194/crashes/case_0.csv', auto_detect=false, buffer_size=8, columns={'a': 'integer','b': 'integer','c': 'integer'}, header=true, maximum_line_size=0); ---- statement maybe -FROM read_csv('data/csv/afl/5194/crashes/case_4.csv', buffer_size=30, delim=';', union_by_name=false, header=false, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/afl/5194/crashes/case_4.csv', buffer_size=30, delim=';', union_by_name=false, header=false, null_padding=true); ---- diff --git a/test/sql/copy/csv/auto/test_14177.test b/test/sql/copy/csv/auto/test_14177.test index 4e5354e44caf..5fb908796a9e 100644 --- a/test/sql/copy/csv/auto/test_14177.test +++ b/test/sql/copy/csv/auto/test_14177.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification query I -select count(*) FROM (FROM read_csv('data/csv/auto/14177.csv', buffer_size=80, ignore_errors = true)) as t +select count(*) FROM (FROM read_csv('{DATA_DIR}/csv/auto/14177.csv', buffer_size=80, ignore_errors = true)) as t ---- 5 \ No newline at end of file diff --git a/test/sql/copy/csv/auto/test_auto_5250.test b/test/sql/copy/csv/auto/test_auto_5250.test index 8ace6e0add37..8a281f9b52ac 100644 --- a/test/sql/copy/csv/auto/test_auto_5250.test +++ b/test/sql/copy/csv/auto/test_auto_5250.test @@ -9,6 +9,6 @@ statement ok PRAGMA verify_parallelism query I -select count(*) from read_csv_auto('data/csv/page_namespacepage_title_sample.csv', SAMPLE_SIZE = -1) +select count(*) from read_csv_auto('{DATA_DIR}/csv/page_namespacepage_title_sample.csv', SAMPLE_SIZE = -1) ---- 3993 diff --git a/test/sql/copy/csv/auto/test_auto_5378.test b/test/sql/copy/csv/auto/test_auto_5378.test index 4b2030beb660..33bb48f96412 100644 --- a/test/sql/copy/csv/auto/test_auto_5378.test +++ b/test/sql/copy/csv/auto/test_auto_5378.test @@ -9,6 +9,6 @@ statement ok PRAGMA verify_parallelism query I -SELECT count(*) FROM read_csv_auto ('data/csv/auto/titlebasicsdebug.tsv', nullstr='\N', sample_size = -1); +SELECT count(*) FROM read_csv_auto ('{DATA_DIR}/csv/auto/titlebasicsdebug.tsv', nullstr='\N', sample_size = -1); ---- 3002 diff --git a/test/sql/copy/csv/auto/test_auto_8231.test b/test/sql/copy/csv/auto/test_auto_8231.test index d781745cf232..86878d31ca45 100644 --- a/test/sql/copy/csv/auto/test_auto_8231.test +++ b/test/sql/copy/csv/auto/test_auto_8231.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -create view locations_header_trailing_comma as SELECT * from read_csv_auto('data/csv/locations_row_trailing_comma.csv', null_padding=True) +create view locations_header_trailing_comma as SELECT * from read_csv_auto('{DATA_DIR}/csv/locations_row_trailing_comma.csv', null_padding=True) query IIIII SELECT * from locations_header_trailing_comma diff --git a/test/sql/copy/csv/auto/test_auto_8573.test b/test/sql/copy/csv/auto/test_auto_8573.test index a49337271f67..aa931c6af620 100644 --- a/test/sql/copy/csv/auto/test_auto_8573.test +++ b/test/sql/copy/csv/auto/test_auto_8573.test @@ -10,11 +10,11 @@ statement ok PRAGMA verify_parallelism query II -SELECT typeof(bignumber), typeof(bignumber::DECIMAL(25,3)) FROM read_csv('data/csv/big_number.csv', COLUMNS={'bignumber': 'DECIMAL(25,3)'}, QUOTE='"', DELIM=','); +SELECT typeof(bignumber), typeof(bignumber::DECIMAL(25,3)) FROM read_csv('{DATA_DIR}/csv/big_number.csv', COLUMNS={'bignumber': 'DECIMAL(25,3)'}, QUOTE='"', DELIM=','); ---- DECIMAL(25,3) DECIMAL(25,3) query II -SELECT typeof(bignumber), typeof(bignumber::DECIMAL(25,3)) FROM read_csv_auto('data/csv/big_number.csv', COLUMNS={'bignumber': 'DECIMAL(25,3)'}, QUOTE='"', DELIM=','); +SELECT typeof(bignumber), typeof(bignumber::DECIMAL(25,3)) FROM read_csv_auto('{DATA_DIR}/csv/big_number.csv', COLUMNS={'bignumber': 'DECIMAL(25,3)'}, QUOTE='"', DELIM=','); ---- DECIMAL(25,3) DECIMAL(25,3) diff --git a/test/sql/copy/csv/auto/test_auto_8649.test b/test/sql/copy/csv/auto/test_auto_8649.test index 48f4383395e5..68dd23e3ffcd 100644 --- a/test/sql/copy/csv/auto/test_auto_8649.test +++ b/test/sql/copy/csv/auto/test_auto_8649.test @@ -11,7 +11,7 @@ PRAGMA verify_parallelism # Sample query I -SELECT * FROM read_csv_auto("data/csv/dim0.csv") ; +SELECT * FROM read_csv_auto("{DATA_DIR}/csv/dim0.csv") ; ---- T 0 diff --git a/test/sql/copy/csv/auto/test_auto_8860.test b/test/sql/copy/csv/auto/test_auto_8860.test index 7a1d421ee84a..35bb75161876 100644 --- a/test/sql/copy/csv/auto/test_auto_8860.test +++ b/test/sql/copy/csv/auto/test_auto_8860.test @@ -9,6 +9,6 @@ statement ok PRAGMA verify_parallelism query I -SELECT count(*) FROM read_csv_auto("data/csv/auto/product_codes_HS17_V202301.csv.gz", quote = '"', comment='', delim = ',') ; +SELECT count(*) FROM read_csv_auto("{DATA_DIR}/csv/auto/product_codes_HS17_V202301.csv.gz", quote = '"', comment='', delim = ',') ; ---- 5384 diff --git a/test/sql/copy/csv/auto/test_auto_column_type_opt.test b/test/sql/copy/csv/auto/test_auto_column_type_opt.test index 0db3ae9f1852..6736b0160794 100644 --- a/test/sql/copy/csv/auto/test_auto_column_type_opt.test +++ b/test/sql/copy/csv/auto/test_auto_column_type_opt.test @@ -8,67 +8,67 @@ PRAGMA enable_verification # Test read_csv wout auto_detect throws statement error -select * from read_csv('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(a := 'INTEGER')) +select * from read_csv('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(a := 'INTEGER')) ---- Columns with names: "a" do not exist in the CSV File # Test non-struct throws statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=1) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=1) ---- COLUMN_TYPES requires a struct or list as input # Test empty throws statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK()) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK()) ---- Can't pack nothing into a struct # Test funky type throws statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(a := 'BLA')) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(a := 'BLA')) ---- Type with name BLA does not exist! # Test funky name throws statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(bla := 'INTEGER')) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(bla := 'INTEGER')) ---- Columns with names: "bla" do not exist in the CSV File # Test wrong type throws statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column3 := 'INTEGER')) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column3 := 'INTEGER')) ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. # Test 1st column defined query I -SELECT typeof(#1) from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'DOUBLE')) LIMIT 1 +SELECT typeof(#1) from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'DOUBLE')) LIMIT 1 ---- DOUBLE query I -SELECT typeof(#1) from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'INTEGER')) LIMIT 1 +SELECT typeof(#1) from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'INTEGER')) LIMIT 1 ---- INTEGER # Test 3rd column defined query I -SELECT typeof(#3) from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK( column2 := 'HUGEINT')) LIMIT 1 +SELECT typeof(#3) from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK( column2 := 'HUGEINT')) LIMIT 1 ---- HUGEINT # Test 1st and 3rd column defined query II -SELECT typeof(#1),typeof(#3) from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'BIGINT', column2 := 'HUGEINT')) LIMIT 1 +SELECT typeof(#1),typeof(#3) from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'BIGINT', column2 := 'HUGEINT')) LIMIT 1 ---- BIGINT HUGEINT query IIII -SELECT * from read_csv_auto('data/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'BIGINT', column2 := 'HUGEINT')) +SELECT * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMN_TYPES=STRUCT_PACK(column0 := 'BIGINT', column2 := 'HUGEINT')) ---- 1 6370 371 p1 10 214 465 p2 diff --git a/test/sql/copy/csv/auto/test_auto_cranlogs.test b/test/sql/copy/csv/auto/test_auto_cranlogs.test index 942b55368d3b..494d4fdef7f9 100644 --- a/test/sql/copy/csv/auto/test_auto_cranlogs.test +++ b/test/sql/copy/csv/auto/test_auto_cranlogs.test @@ -7,7 +7,7 @@ PRAGMA enable_verification statement ok -CREATE TABLE cranlogs AS SELECT * FROM read_csv_auto ('data/csv/real/tmp2013-06-15.csv.gz'); +CREATE TABLE cranlogs AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/tmp2013-06-15.csv.gz'); query I SELECT COUNT(*) FROM cranlogs; @@ -28,7 +28,7 @@ statement ok PRAGMA verify_parallelism statement ok -CREATE TABLE cranlogs2 AS SELECT * FROM read_csv_auto ('data/csv/real/tmp2013-06-15.csv.gz'); +CREATE TABLE cranlogs2 AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/tmp2013-06-15.csv.gz'); query IIIIIIIIII (SELECT * FROM cranlogs EXCEPT SELECT * FROM cranlogs2) diff --git a/test/sql/copy/csv/auto/test_auto_greek_ncvoter.test b/test/sql/copy/csv/auto/test_auto_greek_ncvoter.test index ff8e0dd078f9..ad274bd19eb6 100644 --- a/test/sql/copy/csv/auto/test_auto_greek_ncvoter.test +++ b/test/sql/copy/csv/auto/test_auto_greek_ncvoter.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE IF NOT EXISTS ncvoters(county_id INTEGER, county_desc STRING, voter_reg_num STRING,status_cd STRING, voter_status_desc STRING, reason_cd STRING, voter_status_reason_desc STRING, absent_ind STRING, name_prefx_cd STRING,last_name STRING, first_name STRING, midl_name STRING, name_sufx_cd STRING, full_name_rep STRING,full_name_mail STRING, house_num STRING, half_code STRING, street_dir STRING, street_name STRING, street_type_cd STRING, street_sufx_cd STRING, unit_designator STRING, unit_num STRING, res_city_desc STRING,state_cd STRING, zip_code STRING, res_street_address STRING, res_city_state_zip STRING, mail_addr1 STRING, mail_addr2 STRING, mail_addr3 STRING, mail_addr4 STRING, mail_city STRING, mail_state STRING, mail_zipcode STRING, mail_city_state_zip STRING, area_cd STRING, phone_num STRING, full_phone_number STRING, drivers_lic STRING, race_code STRING, race_desc STRING, ethnic_code STRING, ethnic_desc STRING, party_cd STRING, party_desc STRING, sex_code STRING, sex STRING, birth_age STRING, birth_place STRING, registr_dt STRING, precinct_abbrv STRING, precinct_desc STRING,municipality_abbrv STRING, municipality_desc STRING, ward_abbrv STRING, ward_desc STRING, cong_dist_abbrv STRING, cong_dist_desc STRING, super_court_abbrv STRING, super_court_desc STRING, judic_dist_abbrv STRING, judic_dist_desc STRING, nc_senate_abbrv STRING, nc_senate_desc STRING, nc_house_abbrv STRING, nc_house_desc STRING,county_commiss_abbrv STRING, county_commiss_desc STRING, township_abbrv STRING, township_desc STRING,school_dist_abbrv STRING, school_dist_desc STRING, fire_dist_abbrv STRING, fire_dist_desc STRING, water_dist_abbrv STRING, water_dist_desc STRING, sewer_dist_abbrv STRING, sewer_dist_desc STRING, sanit_dist_abbrv STRING, sanit_dist_desc STRING, rescue_dist_abbrv STRING, rescue_dist_desc STRING, munic_dist_abbrv STRING, munic_dist_desc STRING, dist_1_abbrv STRING, dist_1_desc STRING, dist_2_abbrv STRING, dist_2_desc STRING, confidential_ind STRING, age STRING, ncid STRING, vtd_abbrv STRING, vtd_desc STRING); query I -COPY ncvoters FROM 'data/csv/real/ncvoter.csv' (FORMAT CSV, AUTO_DETECT TRUE); +COPY ncvoters FROM '{DATA_DIR}/csv/real/ncvoter.csv' (FORMAT CSV, AUTO_DETECT TRUE); ---- 10 @@ -35,7 +35,7 @@ statement ok CREATE TABLE ncvoters2 AS SELECT * FROM ncvoters LIMIT 0 statement ok -COPY ncvoters2 FROM 'data/csv/real/ncvoter.csv' (FORMAT CSV, AUTO_DETECT TRUE); +COPY ncvoters2 FROM '{DATA_DIR}/csv/real/ncvoter.csv' (FORMAT CSV, AUTO_DETECT TRUE); query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII (SELECT * FROM ncvoters EXCEPT SELECT * FROM ncvoters2) diff --git a/test/sql/copy/csv/auto/test_auto_greek_utf8.test b/test/sql/copy/csv/auto/test_auto_greek_utf8.test index 57f8b85c7028..8cfe7316abde 100644 --- a/test/sql/copy/csv/auto/test_auto_greek_utf8.test +++ b/test/sql/copy/csv/auto/test_auto_greek_utf8.test @@ -11,7 +11,7 @@ PRAGMA verify_parallelism statement ok -CREATE TABLE greek_utf8 AS SELECT i, nfc_normalize(j) j, k FROM read_csv_auto ('data/csv/real/greek_utf8.csv') t(i, j, k) +CREATE TABLE greek_utf8 AS SELECT i, nfc_normalize(j) j, k FROM read_csv_auto ('{DATA_DIR}/csv/real/greek_utf8.csv') t(i, j, k) query I SELECT COUNT(*) FROM greek_utf8; @@ -32,7 +32,7 @@ SELECT * FROM greek_utf8 ORDER BY 1; # can also do this query ITI -SELECT i, nfc_normalize(j) j, k FROM 'data/csv/real/greek_utf8.csv' t(i, j, k) +SELECT i, nfc_normalize(j) j, k FROM '{DATA_DIR}/csv/real/greek_utf8.csv' t(i, j, k) ---- 1689 00i\047m 2 1690 00i\047v 2 diff --git a/test/sql/copy/csv/auto/test_auto_imdb.test b/test/sql/copy/csv/auto/test_auto_imdb.test index 94b01a7d6635..c9b5e90219a5 100644 --- a/test/sql/copy/csv/auto/test_auto_imdb.test +++ b/test/sql/copy/csv/auto/test_auto_imdb.test @@ -7,7 +7,7 @@ PRAGMA enable_verification statement ok -CREATE TABLE movie_info AS SELECT * FROM read_csv_auto ('data/csv/real/imdb_movie_info_escaped.csv'); +CREATE TABLE movie_info AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/imdb_movie_info_escaped.csv'); query I SELECT COUNT(*) FROM movie_info; @@ -16,7 +16,7 @@ SELECT COUNT(*) FROM movie_info; statement ok -CREATE TABLE movie_info2 AS SELECT * FROM read_csv_auto ('data/csv/real/imdb_movie_info_escaped.csv'); +CREATE TABLE movie_info2 AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/imdb_movie_info_escaped.csv'); query IIIII (FROM movie_info EXCEPT FROM movie_info2) diff --git a/test/sql/copy/csv/auto/test_auto_lineitem.test b/test/sql/copy/csv/auto/test_auto_lineitem.test index fbf227ea98fc..384e80906174 100644 --- a/test/sql/copy/csv/auto/test_auto_lineitem.test +++ b/test/sql/copy/csv/auto/test_auto_lineitem.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE lineitem(l_orderkey INT NOT NULL, l_partkey INT NOT NULL, l_suppkey INT NOT NULL, l_linenumber INT NOT NULL, l_quantity INTEGER NOT NULL, l_extendedprice DECIMAL(15,2) NOT NULL, l_discount DECIMAL(15,2) NOT NULL, l_tax DECIMAL(15,2) NOT NULL, l_returnflag VARCHAR(1) NOT NULL, l_linestatus VARCHAR(1) NOT NULL, l_shipdate DATE NOT NULL, l_commitdate DATE NOT NULL, l_receiptdate DATE NOT NULL, l_shipinstruct VARCHAR(25) NOT NULL, l_shipmode VARCHAR(10) NOT NULL, l_comment VARCHAR(44) NOT NULL); query I -COPY lineitem FROM 'data/csv/real/lineitem_sample.csv' (FORMAT CSV, AUTO_DETECT TRUE); +COPY lineitem FROM '{DATA_DIR}/csv/real/lineitem_sample.csv' (FORMAT CSV, AUTO_DETECT TRUE); ---- 10 @@ -36,7 +36,7 @@ statement ok CREATE TABLE lineitem2 AS SELECT * FROM lineitem LIMIT 0 statement ok -COPY lineitem2 FROM 'data/csv/real/lineitem_sample.csv' (FORMAT CSV, AUTO_DETECT TRUE); +COPY lineitem2 FROM '{DATA_DIR}/csv/real/lineitem_sample.csv' (FORMAT CSV, AUTO_DETECT TRUE); query IIIIIIIIIIIIIIII (SELECT * FROM lineitem EXCEPT SELECT * FROM lineitem2) diff --git a/test/sql/copy/csv/auto/test_auto_ontime.test b/test/sql/copy/csv/auto/test_auto_ontime.test index c7a367bc20ab..161d5bafe22d 100644 --- a/test/sql/copy/csv/auto/test_auto_ontime.test +++ b/test/sql/copy/csv/auto/test_auto_ontime.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE ontime(year SMALLINT, quarter SMALLINT, month SMALLINT, dayofmonth SMALLINT, dayofweek SMALLINT, flightdate DATE, uniquecarrier CHAR(7), airlineid DECIMAL(8,2), carrier CHAR(2), tailnum VARCHAR(50), flightnum VARCHAR(10), originairportid INTEGER, originairportseqid INTEGER, origincitymarketid INTEGER, origin CHAR(5), origincityname VARCHAR(100), originstate CHAR(2), originstatefips VARCHAR(10), originstatename VARCHAR(100), originwac DECIMAL(8,2), destairportid INTEGER, destairportseqid INTEGER, destcitymarketid INTEGER, dest CHAR(5), destcityname VARCHAR(100), deststate CHAR(2), deststatefips VARCHAR(10), deststatename VARCHAR(100), destwac DECIMAL(8,2), crsdeptime DECIMAL(8,2), deptime DECIMAL(8,2), depdelay DECIMAL(8,2), depdelayminutes DECIMAL(8,2), depdel15 DECIMAL(8,2), departuredelaygroups DECIMAL(8,2), deptimeblk VARCHAR(20), taxiout DECIMAL(8,2), wheelsoff DECIMAL(8,2), wheelson DECIMAL(8,2), taxiin DECIMAL(8,2), crsarrtime DECIMAL(8,2), arrtime DECIMAL(8,2), arrdelay DECIMAL(8,2), arrdelayminutes DECIMAL(8,2), arrdel15 DECIMAL(8,2), arrivaldelaygroups DECIMAL(8,2), arrtimeblk VARCHAR(20), cancelled DECIMAL(8,2), cancellationcode CHAR(1), diverted DECIMAL(8,2), crselapsedtime DECIMAL(8,2), actualelapsedtime DECIMAL(8,2), airtime DECIMAL(8,2), flights DECIMAL(8,2), distance DECIMAL(8,2), distancegroup DECIMAL(8,2), carrierdelay DECIMAL(8,2), weatherdelay DECIMAL(8,2), nasdelay DECIMAL(8,2), securitydelay DECIMAL(8,2), lateaircraftdelay DECIMAL(8,2), firstdeptime VARCHAR(10), totaladdgtime VARCHAR(10), longestaddgtime VARCHAR(10), divairportlandings VARCHAR(10), divreacheddest VARCHAR(10), divactualelapsedtime VARCHAR(10), divarrdelay VARCHAR(10), divdistance VARCHAR(10), div1airport VARCHAR(10), div1aiportid INTEGER, div1airportseqid INTEGER, div1wheelson VARCHAR(10), div1totalgtime VARCHAR(10), div1longestgtime VARCHAR(10), div1wheelsoff VARCHAR(10), div1tailnum VARCHAR(10), div2airport VARCHAR(10), div2airportid INTEGER, div2airportseqid INTEGER, div2wheelson VARCHAR(10), div2totalgtime VARCHAR(10), div2longestgtime VARCHAR(10), div2wheelsoff VARCHAR(10), div2tailnum VARCHAR(10), div3airport VARCHAR(10), div3airportid INTEGER, div3airportseqid INTEGER, div3wheelson VARCHAR(10), div3totalgtime VARCHAR(10), div3longestgtime VARCHAR(10), div3wheelsoff VARCHAR(10), div3tailnum VARCHAR(10), div4airport VARCHAR(10), div4airportid INTEGER, div4airportseqid INTEGER, div4wheelson VARCHAR(10), div4totalgtime VARCHAR(10), div4longestgtime VARCHAR(10), div4wheelsoff VARCHAR(10), div4tailnum VARCHAR(10), div5airport VARCHAR(10), div5airportid INTEGER, div5airportseqid INTEGER, div5wheelson VARCHAR(10), div5totalgtime VARCHAR(10), div5longestgtime VARCHAR(10), div5wheelsoff VARCHAR(10), div5tailnum VARCHAR(10)); query I -COPY ontime FROM 'data/csv/real/ontime_sample.csv'; +COPY ontime FROM '{DATA_DIR}/csv/real/ontime_sample.csv'; ---- 9 @@ -35,7 +35,7 @@ statement ok CREATE TABLE ontime2 AS SELECT * FROM ontime LIMIT 0 statement ok -COPY ontime2 FROM 'data/csv/real/ontime_sample.csv'; +COPY ontime2 FROM '{DATA_DIR}/csv/real/ontime_sample.csv'; query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII (SELECT * FROM ontime EXCEPT SELECT * FROM ontime2) diff --git a/test/sql/copy/csv/auto/test_auto_voter.test_slow b/test/sql/copy/csv/auto/test_auto_voter.test_slow index c42f0a7549e5..ff440b2e69f5 100644 --- a/test/sql/copy/csv/auto/test_auto_voter.test_slow +++ b/test/sql/copy/csv/auto/test_auto_voter.test_slow @@ -7,7 +7,7 @@ PRAGMA enable_verification statement ok -CREATE TABLE voters AS SELECT * FROM read_csv_auto ('data/csv/real/voter.tsv'); +CREATE TABLE voters AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/voter.tsv'); query I SELECT COUNT(*) FROM voters; @@ -15,7 +15,7 @@ SELECT COUNT(*) FROM voters; 5300 query I -SELECT COUNT(*) FROM "data/csv/real/voter.tsv"; +SELECT COUNT(*) FROM "{DATA_DIR}/csv/real/voter.tsv"; ---- 5300 @@ -30,7 +30,7 @@ statement ok PRAGMA verify_parallelism statement ok -CREATE TABLE voters2 AS SELECT * FROM read_csv_auto ('data/csv/real/voter.tsv'); +CREATE TABLE voters2 AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/voter.tsv'); query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII (SELECT * FROM voters EXCEPT SELECT * FROM voters2) diff --git a/test/sql/copy/csv/auto/test_auto_web_page.test b/test/sql/copy/csv/auto/test_auto_web_page.test index 4fb874280e3f..7449069b0529 100644 --- a/test/sql/copy/csv/auto/test_auto_web_page.test +++ b/test/sql/copy/csv/auto/test_auto_web_page.test @@ -3,7 +3,7 @@ # group: [auto] statement ok -CREATE TABLE web_page AS SELECT * FROM read_csv_auto ('data/csv/real/web_page.csv'); +CREATE TABLE web_page AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/web_page.csv'); query I SELECT COUNT(*) FROM web_page; @@ -22,7 +22,7 @@ statement ok PRAGMA verify_parallelism statement ok -CREATE TABLE web_page2 AS SELECT * FROM read_csv_auto ('data/csv/real/web_page.csv'); +CREATE TABLE web_page2 AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/real/web_page.csv'); query IIIIIIIIIIIIII (SELECT * FROM web_page EXCEPT SELECT * FROM web_page2) diff --git a/test/sql/copy/csv/auto/test_csv_auto.test b/test/sql/copy/csv/auto/test_csv_auto.test index a362929b55ab..4261f8bc3097 100644 --- a/test/sql/copy/csv/auto/test_csv_auto.test +++ b/test/sql/copy/csv/auto/test_csv_auto.test @@ -9,7 +9,7 @@ statement ok PRAGMA verify_parallelism query II -FROM read_csv('data/csv/repromarket.csv', +FROM read_csv('{DATA_DIR}/csv/repromarket.csv', columns={ 'email': 'varchar', 'password': 'varchar' @@ -29,33 +29,33 @@ Vega-Inject bogus mirkofoto@gmail.com mirko query I -FROM read_csv('data/csv/pipe_delim.csv', columns={'a': 'VARCHAR'}, auto_detect=False) +FROM read_csv('{DATA_DIR}/csv/pipe_delim.csv', columns={'a': 'VARCHAR'}, auto_detect=False) ---- one|two|three|four 1|2|3|4 query I -FROM read_csv('data/csv/nullterm.csv') +FROM read_csv('{DATA_DIR}/csv/nullterm.csv') ---- \0world\0 query I -FROM read_csv('data/csv/nullterm.csv', quote = '"', escape = '"') +FROM read_csv('{DATA_DIR}/csv/nullterm.csv', quote = '"', escape = '"') ---- \0world\0 query I -FROM read_csv('data/csv/single_quote.csv', quote = '"') +FROM read_csv('{DATA_DIR}/csv/single_quote.csv', quote = '"') ---- 'Doc' query I -select columns FROM sniff_csv('data/csv/auto/mock_duckdb_test_data.csv', ignore_errors = true); +select columns FROM sniff_csv('{DATA_DIR}/csv/auto/mock_duckdb_test_data.csv', ignore_errors = true); ---- [{'name': id, 'type': BIGINT}, {'name': name, 'type': VARCHAR}, {'name': age, 'type': BIGINT}, {'name': sex, 'type': VARCHAR}, {'name': state, 'type': VARCHAR}] query IIIII -FROM read_csv('data/csv/auto/mock_duckdb_test_data.csv', ignore_errors = true, +FROM read_csv('{DATA_DIR}/csv/auto/mock_duckdb_test_data.csv', ignore_errors = true, strict_mode=true) ---- 1 James 30 M AL @@ -67,18 +67,18 @@ FROM read_csv('data/csv/auto/mock_duckdb_test_data.csv', ignore_errors = true, 9 Titus 38 M WY statement error -select * from read_csv_auto('data/csv/dates.csv', auto_detect=false, delim=',', quote='"', columns={'a': 'VARCHAR'}, +select * from read_csv_auto('{DATA_DIR}/csv/dates.csv', auto_detect=false, delim=',', quote='"', columns={'a': 'VARCHAR'}, strict_mode=true) ---- Expected Number of Columns: 1 Found: 2 query II -select * from read_csv_auto('data/csv/dates.csv') +select * from read_csv_auto('{DATA_DIR}/csv/dates.csv') ---- 919 304 6161 2008-08-10 query II -select * from read_csv_auto('data/csv/from_df.csv', quote='''') +select * from read_csv_auto('{DATA_DIR}/csv/from_df.csv', quote='''') ---- 'a,b,c' 45 NULL 234 @@ -87,7 +87,7 @@ bye 2 # CSV file with RFC-conform dialect statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/rfc_conform.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/rfc_conform.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -102,7 +102,7 @@ DROP TABLE test; # CSV file with RFC-conform dialect quote # read_csv is an alias to read_csv_auto when no extra parameters are supplied statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/rfc_conform_quote.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/rfc_conform_quote.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -116,7 +116,7 @@ DROP TABLE test; # CSV file with RFC-conform dialect quote/leading space of numerics statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/leading_space_numerics.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/leading_space_numerics.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -130,7 +130,7 @@ DROP TABLE test; # CSV file with bar delimiter statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/pipe_delim.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/pipe_delim.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -144,7 +144,7 @@ DROP TABLE test; # CSV file with bar delimiter and double quotes statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/pipe_delim_quote.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/pipe_delim_quote.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -158,7 +158,7 @@ DROP TABLE test; # CSV file with bar delimiter and double quotes and double escape statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/quote_escape.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/quote_escape.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -172,7 +172,7 @@ DROP TABLE test; # CSV file with bar delimiter and double quotes and backslash escape statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/backslash_escape.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/backslash_escape.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -186,7 +186,7 @@ DROP TABLE test; # CSV file with bar delimiter and single quotes and backslash escape statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/single_quote_backslash.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/single_quote_backslash.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -200,7 +200,7 @@ DROP TABLE test; # CSV file with semicolon delimiter statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/semicolon_delim.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/semicolon_delim.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -214,7 +214,7 @@ DROP TABLE test; # CSV file with semicolon delimiter and double quotes statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/semicolon_quote.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/semicolon_quote.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -228,7 +228,7 @@ DROP TABLE test; # CSV file with semicolon delimiter, double quotes and RFC escape statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/semicolon_escape.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/semicolon_escape.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -242,7 +242,7 @@ DROP TABLE test; # CSV file with tab delimiter statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/tab.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/tab.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -256,7 +256,7 @@ DROP TABLE test; # CSV file with tab delimiter and single quotes statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/tab_single_quote.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/tab_single_quote.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -276,7 +276,7 @@ DROP TABLE test; # CSV file with tab delimiter and single quotes without type-hint statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/tab_single_quote_varchar.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/tab_single_quote_varchar.csv'); query ITT SELECT * FROM test ORDER BY column0; @@ -297,7 +297,7 @@ DROP TABLE test; # CSV file with trailing empty lines statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_1254.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_1254.csv'); query II SELECT a, b FROM test; diff --git a/test/sql/copy/csv/auto/test_date_format_bug_linux.test b/test/sql/copy/csv/auto/test_date_format_bug_linux.test index 453ea6ad753c..a7f8737433b9 100644 --- a/test/sql/copy/csv/auto/test_date_format_bug_linux.test +++ b/test/sql/copy/csv/auto/test_date_format_bug_linux.test @@ -6,7 +6,7 @@ PRAGMA enable_verification query I -SELECT * FROM read_csv_auto('data/csv/auto/date_format_bug_linux.csv') +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/auto/date_format_bug_linux.csv') ---- 8cb123cb8 34fd321 diff --git a/test/sql/copy/csv/auto/test_describe_order.test b/test/sql/copy/csv/auto/test_describe_order.test index a86899c58557..4c4368616941 100644 --- a/test/sql/copy/csv/auto/test_describe_order.test +++ b/test/sql/copy/csv/auto/test_describe_order.test @@ -5,7 +5,7 @@ statement ok PRAGMA enable_verification statement ok -create view v as select * from read_csv_auto('data/csv/who.csv.gz'); +create view v as select * from read_csv_auto('{DATA_DIR}/csv/who.csv.gz'); query IIIIII describe v; diff --git a/test/sql/copy/csv/auto/test_double_quoted_header.test b/test/sql/copy/csv/auto/test_double_quoted_header.test index fbf04965c15e..13edc88a5ba3 100644 --- a/test/sql/copy/csv/auto/test_double_quoted_header.test +++ b/test/sql/copy/csv/auto/test_double_quoted_header.test @@ -5,13 +5,13 @@ statement ok PRAGMA enable_verification query IIIIII -describe from 'data/csv/double_quoted_header.csv'; +describe from '{DATA_DIR}/csv/double_quoted_header.csv'; ---- foo "bar BIGINT YES NULL NULL NULL name VARCHAR YES NULL NULL NULL query II -from 'data/csv/double_quoted_header.csv'; +from '{DATA_DIR}/csv/double_quoted_header.csv'; ---- 1 rob 2 sally \ No newline at end of file diff --git a/test/sql/copy/csv/auto/test_early_out.test b/test/sql/copy/csv/auto/test_early_out.test index e447427cf0ce..77a7557cfe69 100644 --- a/test/sql/copy/csv/auto/test_early_out.test +++ b/test/sql/copy/csv/auto/test_early_out.test @@ -6,6 +6,6 @@ PRAGMA enable_verification statement error SELECT * -FROM read_csv('data/csv/auto/early_out_error.csv', buffer_size = 8, maximum_line_size = 8, auto_detect = false, columns = {'a': 'integer','b': 'integer','c': 'integer'}, header = true) +FROM read_csv('{DATA_DIR}/csv/auto/early_out_error.csv', buffer_size = 8, maximum_line_size = 8, auto_detect = false, columns = {'a': 'integer','b': 'integer','c': 'integer'}, header = true) ---- Error when converting column "b". Could not convert string "\n" to 'INTEGER' \ No newline at end of file diff --git a/test/sql/copy/csv/auto/test_fallback_all_varchar.test_slow b/test/sql/copy/csv/auto/test_fallback_all_varchar.test_slow index 227fdad8a376..f85357f08482 100644 --- a/test/sql/copy/csv/auto/test_fallback_all_varchar.test_slow +++ b/test/sql/copy/csv/auto/test_fallback_all_varchar.test_slow @@ -11,7 +11,7 @@ PRAGMA verify_parallelism # CSV file with irregularity in first column and default sample size statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/test_fallback.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/test_fallback.csv'); query TTTT SELECT typeof(TestDoubleError), typeof(TestDouble), typeof(TestText), typeof(TestInteger) FROM test LIMIT 1 @@ -25,14 +25,14 @@ loop i 1 100 # CSV file with irregularity in first column and small sample size statement error -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/test_fallback.csv', SAMPLE_SIZE=1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/test_fallback.csv', SAMPLE_SIZE=1); ---- Column TestDoubleError is being converted as type DOUBLE endloop # CSV file with irregularity in first column, small sample size and fallback activated statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/test_fallback.csv', SAMPLE_SIZE=1, ALL_VARCHAR=1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/test_fallback.csv', SAMPLE_SIZE=1, ALL_VARCHAR=1); query TTTT SELECT typeof(TestDoubleError), typeof(TestDouble), typeof(TestText), typeof(TestInteger) FROM test LIMIT 1 diff --git a/test/sql/copy/csv/auto/test_header_completion.test b/test/sql/copy/csv/auto/test_header_completion.test index b4812ca22dd1..ad889327f221 100644 --- a/test/sql/copy/csv/auto/test_header_completion.test +++ b/test/sql/copy/csv/auto/test_header_completion.test @@ -8,7 +8,7 @@ PRAGMA enable_verification # CSV file with one missing header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/missing_header_col.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/missing_header_col.csv'); query ITT SELECT a, column1, c FROM test ORDER BY a; @@ -21,7 +21,7 @@ DROP TABLE test; # CSV file with one duplicate header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/duplicate_header_col.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/duplicate_header_col.csv'); query ITT SELECT a, b, a_1 FROM test ORDER BY a; @@ -34,7 +34,7 @@ DROP TABLE test; # CSV file with one duplicate header and collision statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/duplicate_header_collision.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/duplicate_header_collision.csv'); query ITTT SELECT a, b, a_1, a_1_1 FROM test ORDER BY a; @@ -47,7 +47,7 @@ DROP TABLE test; # CSV file with all column names missing statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/empty_header.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/empty_header.csv'); query ITT SELECT column0, column1, column2 FROM test ORDER BY column0; @@ -60,7 +60,7 @@ DROP TABLE test; # CSV file with 12 columns and all but one column name missing statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/missing_many_col.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/missing_many_col.csv'); query ITT SELECT a, column01, column12 FROM test; @@ -73,7 +73,7 @@ DROP TABLE test; # CSV file with 12 equally called columns statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/duplicate_header_columns.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/duplicate_header_columns.csv'); query IIIT SELECT a, a_8, a_9, column12 FROM test; @@ -86,7 +86,7 @@ DROP TABLE test; # CSV file with 10 equally called columns, one named column12 and column 11 and 12 missing statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/test_header_mix.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/test_header_mix.csv'); query IIIIIT SELECT a, a_8, a_9, column12, column11, column12_1 FROM test; @@ -99,7 +99,7 @@ DROP TABLE test; # CSV file with 12 unnamed columns and check for correct naming statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/unnamed_columns.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/unnamed_columns.csv'); query ITTIIIIIIIIIT SELECT column00, column01, column02, column03, column04, column05, column06, column07, column08, column09, column10, column11, column12 FROM test; diff --git a/test/sql/copy/csv/auto/test_header_detection.test b/test/sql/copy/csv/auto/test_header_detection.test index b5d17a6fb16d..0203c8ff3bd8 100644 --- a/test/sql/copy/csv/auto/test_header_detection.test +++ b/test/sql/copy/csv/auto/test_header_detection.test @@ -8,7 +8,7 @@ PRAGMA enable_verification # CSV file with two lines, none header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/no_header.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/no_header.csv'); query RTT SELECT column0, column1, column2 FROM test ORDER BY column0; @@ -21,7 +21,7 @@ DROP TABLE test; # CSV file with two lines, one header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/single_header.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/single_header.csv'); query RTT SELECT number, text, date FROM test ORDER BY number; @@ -33,7 +33,7 @@ DROP TABLE test; # CSV file with three lines, one header, one skip row statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/skip_row.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/skip_row.csv'); query RTT SELECT number, text, date FROM test ORDER BY number; @@ -45,7 +45,7 @@ DROP TABLE test; # CSV file with three lines, one header, two skip rows statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/multiple_skip_row.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/multiple_skip_row.csv'); query RTT SELECT number, text, date FROM test ORDER BY number; @@ -57,7 +57,7 @@ DROP TABLE test; # CSV file with two lines both only strings statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/varchar_multi_line.csv', header = 0); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/varchar_multi_line.csv', header = 0); query TTT SELECT * FROM test ORDER BY column0; @@ -70,7 +70,7 @@ DROP TABLE test; # CSV file with one line, two columns, only strings statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/varchar_single_line.csv', header = 0); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/varchar_single_line.csv', header = 0); query TT SELECT column0, column1 FROM test ORDER BY column0; @@ -82,7 +82,7 @@ DROP TABLE test; # CSV file with one line, two columns - one numeric, one string statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/mixed_single_line.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/mixed_single_line.csv'); query IT SELECT column0, column1 FROM test ORDER BY column0; @@ -94,7 +94,7 @@ DROP TABLE test; # CSV file with one line, one string column statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/single_value.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/single_value.csv'); query T SELECT * FROM test; @@ -105,7 +105,7 @@ DROP TABLE test; # CSV file with one line, one numeric column statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/single_numeric.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/single_numeric.csv'); query I SELECT * FROM test; @@ -117,7 +117,7 @@ DROP TABLE test; # CSV with UTF-8 BOM marker that could mess up the header line parsing statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto('data/csv/auto/utf8bom.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/auto/utf8bom.csv'); query II SELECT * FROM test; @@ -141,16 +141,16 @@ statement ok INSERT INTO my_varchars VALUES ('Hello', 'Beautiful', 'World'); statement ok -COPY my_varchars TO '__TEST_DIR__/varchar_header.csv' (HEADER 1); +COPY my_varchars TO '{TEMP_DIR}/varchar_header.csv' (HEADER 1); statement ok -COPY my_varchars TO '__TEST_DIR__/varchar_no_header.csv' (HEADER 0); +COPY my_varchars TO '{TEMP_DIR}/varchar_no_header.csv' (HEADER 0); statement ok -COPY my_varchars FROM '__TEST_DIR__/varchar_header.csv' ; +COPY my_varchars FROM '{TEMP_DIR}/varchar_header.csv' ; statement ok -COPY my_varchars FROM '__TEST_DIR__/varchar_no_header.csv' (HEADER 0); +COPY my_varchars FROM '{TEMP_DIR}/varchar_no_header.csv' (HEADER 0); query III FROM my_varchars ; @@ -160,10 +160,10 @@ Hello Beautiful World Hello Beautiful World statement ok -COPY my_varchars TO '__TEST_DIR__/big_varchar.csv'; +COPY my_varchars TO '{TEMP_DIR}/big_varchar.csv'; statement ok -COPY my_varchars FROM '__TEST_DIR__/big_varchar.csv'; +COPY my_varchars FROM '{TEMP_DIR}/big_varchar.csv'; query III FROM my_varchars; diff --git a/test/sql/copy/csv/auto/test_normalize_names.test b/test/sql/copy/csv/auto/test_normalize_names.test index 25e73786a720..658e17a05268 100644 --- a/test/sql/copy/csv/auto/test_normalize_names.test +++ b/test/sql/copy/csv/auto/test_normalize_names.test @@ -6,13 +6,13 @@ statement ok PRAGMA enable_verification query I -select columns from sniff_csv('data/csv/test_commit_rollback.csv', normalize_names = true) +select columns from sniff_csv('{DATA_DIR}/csv/test_commit_rollback.csv', normalize_names = true) ---- [{'name': _commit, 'type': BIGINT}, {'name': _rollback, 'type': BIGINT}, {'name': _abort, 'type': BIGINT}] # CSV file with uppercase header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_1.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_1.csv', normalize_names=TRUE); query ITT SELECT a, b, c FROM test ORDER BY a; @@ -25,7 +25,7 @@ DROP TABLE test; # CSV file with uppercase header and normalize names off statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_1.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_1.csv'); query ITT SELECT A, B, C FROM test ORDER BY a; @@ -38,14 +38,14 @@ DROP TABLE test; query I -select columns from sniff_csv('data/csv/auto/normalize_names_2.csv', normalize_names = true) +select columns from sniff_csv('{DATA_DIR}/csv/auto/normalize_names_2.csv', normalize_names = true) ---- [{'name': _select, 'type': BIGINT}, {'name': _insert, 'type': VARCHAR}, {'name': _join, 'type': VARCHAR}] # CSV file with keywords in header statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_2.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_2.csv', normalize_names=TRUE); query ITT SELECT _select, _insert, _join FROM test ORDER BY _select; @@ -58,7 +58,7 @@ DROP TABLE test; # CSV file with names starting with numerics statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_3.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_3.csv', normalize_names=TRUE); query ITT SELECT _0_a, _1_b, _9_c FROM test ORDER BY _0_a; @@ -71,7 +71,7 @@ DROP TABLE test; # CSV file with accents and UTF8 characters statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_4.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_4.csv', normalize_names=TRUE); query ITT SELECT allo, teost, _ FROM test ORDER BY allo; @@ -84,7 +84,7 @@ DROP TABLE test; # CSV file with accents and UTF8 characters statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_5.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_5.csv', normalize_names=TRUE); query ITT SELECT a, b, c FROM test ORDER BY a; @@ -97,7 +97,7 @@ DROP TABLE test; # CSV file with superscripts and UTF8 characters statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/normalize_names_6.csv', normalize_names=TRUE); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/normalize_names_6.csv', normalize_names=TRUE); query ITT SELECT aax, hello_world, qty_m2 FROM test ORDER BY aax; @@ -109,6 +109,6 @@ statement ok DROP TABLE test; query I -select columns from sniff_csv('data/csv/normalize.csv', normalize_names = true) +select columns from sniff_csv('{DATA_DIR}/csv/normalize.csv', normalize_names = true) ---- [{'name': _name, 'type': VARCHAR}, {'name': _text, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/auto/test_sample_size.test b/test/sql/copy/csv/auto/test_sample_size.test index c81e4e062295..8604ecb04a51 100644 --- a/test/sql/copy/csv/auto/test_sample_size.test +++ b/test/sql/copy/csv/auto/test_sample_size.test @@ -8,7 +8,7 @@ PRAGMA enable_verification # CSV file with very sparse column statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_811.csv', SAMPLE_SIZE=1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_811.csv', SAMPLE_SIZE=1); query IIII SELECT typeof(TestInteger), typeof(TestDouble), typeof(TestDate), typeof(TestText) FROM test LIMIT 1 @@ -20,7 +20,7 @@ DROP TABLE test # CSV file with very sparse column statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_811.csv', SAMPLE_SIZE=-1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_811.csv', SAMPLE_SIZE=-1); query IIII @@ -33,7 +33,7 @@ DROP TABLE test # CSV file with very sparse column and sample size 500 statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_811.csv', SAMPLE_SIZE = -1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_811.csv', SAMPLE_SIZE = -1); query IRTT SELECT TestInteger, TestDouble, TestDate, TestText FROM test WHERE TestDouble is not NULL ; @@ -50,7 +50,7 @@ drop table test; # CSV file with very sparse column and number of samples 50 statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_811.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_811.csv'); query IRTT SELECT TestInteger, TestDouble, TestDate, TestText FROM test WHERE TestDouble is not NULL ; @@ -67,7 +67,7 @@ drop table test; # CSV file with very sparse column with sample size 200 and number of samples 20 statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/issue_811.csv', SAMPLE_SIZE = -1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/issue_811.csv', SAMPLE_SIZE = -1); query IRTT SELECT TestInteger, TestDouble, TestDate, TestText FROM test WHERE TestDouble is not NULL ; @@ -88,7 +88,7 @@ CREATE TABLE test (TestInteger integer, TestDouble double, TestDate varchar, Tes # CSV file with very sparse column, automatically aligns column types, small sample size statement ok -COPY test FROM 'data/csv/auto/issue_811.csv' (AUTO_DETECT TRUE); +COPY test FROM '{DATA_DIR}/csv/auto/issue_811.csv' (AUTO_DETECT TRUE); statement ok drop table test; @@ -99,7 +99,7 @@ CREATE TABLE test (TestInteger integer, TestDouble double, TestDate varchar, Tes # CSV file with very sparse column, automatically aligns column types, small sample size statement ok -COPY test FROM 'data/csv/auto/issue_811.csv' (SAMPLE_SIZE -1, AUTO_DETECT TRUE); +COPY test FROM '{DATA_DIR}/csv/auto/issue_811.csv' (SAMPLE_SIZE -1, AUTO_DETECT TRUE); statement ok drop table test; diff --git a/test/sql/copy/csv/auto/test_sniffer_blob.test b/test/sql/copy/csv/auto/test_sniffer_blob.test index 56444d261a75..f5f3dde42a99 100644 --- a/test/sql/copy/csv/auto/test_sniffer_blob.test +++ b/test/sql/copy/csv/auto/test_sniffer_blob.test @@ -8,19 +8,19 @@ PRAGMA enable_verification # This is the only way to try to trick the sniffer into checking blobs and it is not valid statement error -select count(*) from read_csv('data/csv/test/blob.csv',auto_type_candidates=['blob']) +select count(*) from read_csv('{DATA_DIR}/csv/test/blob.csv',auto_type_candidates=['blob']) ---- Auto Type Candidate of type BLOB is not accepted as a valid input # All this is cool and should work. query I -select count(*) from read_csv('data/csv/test/blob.csv',types=['blob'], header = 0) +select count(*) from read_csv('{DATA_DIR}/csv/test/blob.csv',types=['blob'], header = 0) ---- 1 query I -select count(*) from read_csv('data/csv/test/blob.csv',columns={'col1': 'BLOB'}) +select count(*) from read_csv('{DATA_DIR}/csv/test/blob.csv',columns={'col1': 'BLOB'}) ---- 1 @@ -28,9 +28,9 @@ statement ok create table t ( a blob) statement ok -COPY t FROM 'data/csv/test/blob.csv'; +COPY t FROM '{DATA_DIR}/csv/test/blob.csv'; query I -select count(*) from read_csv('data/csv/test/blob.csv',columns={'col1': 'BLOB'}) +select count(*) from read_csv('{DATA_DIR}/csv/test/blob.csv',columns={'col1': 'BLOB'}) ---- 1 diff --git a/test/sql/copy/csv/auto/test_sniffer_empty_start_value.test b/test/sql/copy/csv/auto/test_sniffer_empty_start_value.test index 3825a51908cb..684b8f57ef81 100644 --- a/test/sql/copy/csv/auto/test_sniffer_empty_start_value.test +++ b/test/sql/copy/csv/auto/test_sniffer_empty_start_value.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query III -from read_csv('data/csv/empty_space_start_value.csv') +from read_csv('{DATA_DIR}/csv/empty_space_start_value.csv') ---- 1968 86 Greetings 1970 17 Bloody Mama diff --git a/test/sql/copy/csv/auto/test_timings_csv.test b/test/sql/copy/csv/auto/test_timings_csv.test index bd14b406d728..12c34ec49b9d 100644 --- a/test/sql/copy/csv/auto/test_timings_csv.test +++ b/test/sql/copy/csv/auto/test_timings_csv.test @@ -13,6 +13,6 @@ statement ok CREATE OR REPLACE TABLE timings(tool string, sf float, day string, batch_type string, q string, parameters string, time float); query I -COPY timings FROM 'data/csv/timings.csv' (HEADER, DELIMITER '|') +COPY timings FROM '{DATA_DIR}/csv/timings.csv' (HEADER, DELIMITER '|') ---- 1095 diff --git a/test/sql/copy/csv/auto/test_type_detection.test b/test/sql/copy/csv/auto/test_type_detection.test index 4a6098a3a87c..4367e1944142 100644 --- a/test/sql/copy/csv/auto/test_type_detection.test +++ b/test/sql/copy/csv/auto/test_type_detection.test @@ -8,7 +8,7 @@ PRAGMA enable_verification # a CSV file with many strings statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/large_mixed_data.csv', SAMPLE_SIZE=-1); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/large_mixed_data.csv', SAMPLE_SIZE=-1); query ITR SELECT linenr, mixed_string, mixed_double FROM test LIMIT 3; @@ -39,7 +39,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -59,7 +59,7 @@ DROP TABLE test; # a CSV file containing time and date columns with leading/trailing chars statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_trailing.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_trailing.csv'); query ITTTTT SELECT a, b, t, tf, d, df FROM test ORDER BY a; @@ -78,7 +78,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in mm-dd-yyyy (12 hour) statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_mm-dd-yyyy.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_mm-dd-yyyy.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -97,7 +97,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in mm-dd-yy format (12 hour) statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_mm-dd-yy.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_mm-dd-yy.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -116,7 +116,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in dd-mm-yyyy format statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_dd-mm-yyyy.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_dd-mm-yyyy.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -135,7 +135,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in dd-mm-yy format statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_dd-mm-yy.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_dd-mm-yy.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -154,7 +154,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in yyyy.mm.dd format statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -174,7 +174,7 @@ DROP TABLE test; # a CSV file containing time, date and timestamp columns in yy.mm.dd format statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/time_date_timestamp_yy.mm.dd.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/time_date_timestamp_yy.mm.dd.csv'); query ITTTT SELECT a, b, t, d, ts FROM test ORDER BY a; @@ -193,7 +193,7 @@ DROP TABLE test; # a CSV file containing integer bool value statement ok -CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/auto/int_bol.csv'); +CREATE TABLE test AS SELECT * FROM read_csv_auto ('{DATA_DIR}/csv/auto/int_bol.csv'); query I SELECT i FROM test ORDER BY i; diff --git a/test/sql/copy/csv/bug_10283.test_slow b/test/sql/copy/csv/bug_10283.test_slow index f4f985584bcd..6f830620f118 100644 --- a/test/sql/copy/csv/bug_10283.test_slow +++ b/test/sql/copy/csv/bug_10283.test_slow @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1,header =0,all_varchar=1,sample_size=-1, columns={ 'I': 'VARCHAR','UNIT': 'VARCHAR','XX': 'VARCHAR','VERSION': 'VARCHAR','SETTLEMENTDATE': 'VARCHAR','RUNNO': 'VARCHAR', @@ -29,298 +29,298 @@ columns={ }, filename =1,null_padding = true,ignore_errors=1,auto_detect=false, strict_mode=True) ---- -I DISPATCH CASESOLUTION 1 SETTLEMENTDATE RUNNO INTERVENTION CASESUBTYPE SOLUTIONSTATUS SPDVERSION NONPHYSICALLOSSES TOTALOBJECTIVE TOTALAREAGENVIOLATION TOTALINTERCONNECTORVIOLATION TOTALGENERICVIOLATION TOTALRAMPRATEVIOLATION TOTALUNITMWCAPACITYVIOLATION TOTAL5MINVIOLATION TOTALREGVIOLATION TOTAL6SECVIOLATION TOTAL60SECVIOLATION TOTALASPROFILEVIOLATION TOTALFASTSTARTVIOLATION TOTALENERGYOFFERVIOLATION LASTCHANGED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:05:00 1 0 NULL 0 NULL 0 -18891916.3260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:10:00 1 0 NULL 0 NULL 0 -18991572.1050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:15:00 1 0 NULL 0 NULL 0 -18873654.0430 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:20:00 1 0 NULL 0 NULL 0 -18814533.9560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:25:00 1 0 NULL 0 NULL 0 -18955411.5810 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:30:00 1 0 NULL 1 NULL 0 -16453600.9790 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:35:00 1 0 NULL 0 NULL 0 -18918229.6050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:40:00 1 0 NULL 0 NULL 0 -18903552.1320 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:45:00 1 0 NULL 0 NULL 0 -18956195.7840 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:50:00 1 0 NULL 0 NULL 0 -19068752.4310 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:55:00 1 0 NULL 1 NULL 0 -16547387.8360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:00:00 1 0 NULL 1 NULL 0 -14134931.5130 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 04:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:05:00 1 0 NULL 0 NULL 0 -19099921.3580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:10:00 1 0 NULL 0 NULL 0 -18967112.4580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:15:00 1 0 NULL 0 NULL 0 -19075483.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:20:00 1 0 NULL 0 NULL 0 -19184913.3440 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:25:00 1 0 NULL 1 NULL 0 -14343450.8660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 05:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:30:00 1 0 NULL 0 NULL 0 -19331879.1490 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:35:00 1 0 NULL 0 NULL 0 -19582540.7460 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:40:00 1 0 NULL 0 NULL 0 -19995441.8250 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:45:00 1 0 NULL 0 NULL 0 -20392189.2680 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:50:00 1 0 NULL 0 NULL 0 -20620333.3450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:55:00 1 0 NULL 0 NULL 0 -21362524.3070 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:00:00 1 0 NULL 0 NULL 0 -21489793.8170 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:05:00 1 0 NULL 1 NULL 0 2094013133.35 0 0 0 0 86.091 NULL NULL NULL NULL 0 0 0 2023/11/17 06:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:10:00 1 0 NULL 1 NULL 0 1338468636.7650 0 0 0 0 55.418 NULL NULL NULL NULL 0 0 0 2023/11/17 06:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:15:00 1 0 NULL 1 NULL 0 548289035.8710 0 0 0 0 23.281 NULL NULL NULL NULL 0 0 0 2023/11/17 06:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:20:00 1 0 NULL 1 NULL 0 2773061028.0780 0 0 0 0 113.806 NULL NULL NULL NULL 0 0 0 2023/11/17 06:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:25:00 1 0 NULL 1 NULL 0 1505021017.9440 0 0 0 0 62.326 NULL NULL NULL NULL 0 0 0 2023/11/17 06:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:30:00 1 0 NULL 1 NULL 0 490360523.2630 0 0 0 0 21.085 NULL NULL NULL NULL 0 0 0 2023/11/17 06:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:35:00 1 0 NULL 1 NULL 0 2187250227.8070 0 0 0 0 90.128 NULL NULL NULL NULL 0 0 0 2023/11/17 06:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:40:00 1 0 NULL 1 NULL 0 2664659481.8670 0 0 8.851 0 91.522 NULL NULL NULL NULL 0 0 0 2023/11/17 06:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:45:00 1 0 NULL 1 NULL 0 1062603019.6170 0 0 0 0 44.402 NULL NULL NULL NULL 0 0 0 2023/11/17 06:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:50:00 1 0 NULL 1 NULL 0 586841408.2250 0 0 0 0 25.053 NULL NULL NULL NULL 0 0 0 2023/11/17 06:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:55:00 1 0 NULL 0 NULL 0 -28726654.8030 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 06:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:00:00 1 0 NULL 1 NULL 0 1485033818.2130 0 0 0 0 61.621 NULL NULL NULL NULL 0 0 0 2023/11/17 06:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:05:00 1 0 NULL 1 NULL 0 1216625476.3170 0 0 0 0 50.703 NULL NULL NULL NULL 0 0 0 2023/11/17 07:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:10:00 1 0 NULL 1 NULL 0 1305259053.6630 0 0 0 0 54.355 NULL NULL NULL NULL 0 0 0 2023/11/17 07:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:15:00 1 0 NULL 1 NULL 0 591119444.4970 0 0 0 0 25.29 NULL NULL NULL NULL 0 0 0 2023/11/17 07:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:20:00 1 0 NULL 1 NULL 0 742149171.6520 0 0 0 0.416 30.151 NULL NULL NULL NULL 0 0 0 2023/11/17 07:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:25:00 1 0 NULL 1 NULL 0 766417739.3210 0 0 0 0.416 31.141 NULL NULL NULL NULL 0 0 0 2023/11/17 07:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:30:00 1 0 NULL 1 NULL 0 352861875.1030 0 0 0 0.416 14.315 NULL NULL NULL NULL 0 0 0 2023/11/17 07:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:35:00 1 0 NULL 1 NULL 0 8817834722.1480 0 0 0 0.416 358.351 NULL NULL NULL NULL 0 0 0 2023/11/17 07:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:40:00 1 0 NULL 1 NULL 0 2359869476.2250 0 0 0 0.416 95.668 NULL NULL NULL NULL 0 0 0 2023/11/17 07:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:45:00 1 0 NULL 1 NULL 0 1365028867.05 0 0 0 0.416 55.296 NULL NULL NULL NULL 0 0 0 2023/11/17 07:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:50:00 1 0 NULL 1 NULL 0 15272157.7030 0 0 0 0.416 0.51 NULL NULL NULL NULL 0 0 0 2023/11/17 07:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:55:00 1 0 NULL 1 NULL 0 5022568.40 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 07:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:00:00 1 0 NULL 1 NULL 0 2071536.4830 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 07:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:05:00 1 0 NULL 1 NULL 0 5881035925.2910 0 0 0 0.416 239.213 NULL NULL NULL NULL 0 0 0 2023/11/17 08:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:10:00 1 0 NULL 1 NULL 0 4892605710.2240 0 0 0 0.416 198.989 NULL NULL NULL NULL 0 0 0 2023/11/17 08:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:15:00 1 0 NULL 1 NULL 0 3819130532.2320 0 0 0 0.416 155.302 NULL NULL NULL NULL 0 0 0 2023/11/17 08:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:20:00 1 0 NULL 1 NULL 0 2677901325.5920 0 0 0 0.416 108.846 NULL NULL NULL NULL 0 0 0 2023/11/17 08:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:25:00 1 0 NULL 1 NULL 0 1509812889.8560 0 0 0 0.416 61.311 NULL NULL NULL NULL 0 0 0 2023/11/17 08:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:30:00 1 0 NULL 1 NULL 0 168787982.9490 0 0 0 0.416 6.736 NULL NULL NULL NULL 0 0 0 2023/11/17 08:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:35:00 1 0 NULL 1 NULL 0 7564605.6680 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 08:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:40:00 1 0 NULL 1 NULL 0 3536720.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:45:00 1 0 NULL 1 NULL 0 3379078.5810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:50:00 1 0 NULL 1 NULL 0 123950548.9330 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:55:00 1 0 NULL 1 NULL 0 122839634.4540 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:00:00 1 0 NULL 1 NULL 0 122216116.48 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:05:00 1 0 NULL 1 NULL 0 3424718.80 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:10:00 1 0 NULL 1 NULL 0 3271843.5390 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:15:00 1 0 NULL 1 NULL 0 3257726.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:20:00 1 0 NULL 1 NULL 0 3586935.9820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:25:00 1 0 NULL 1 NULL 0 5994413.4070 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:30:00 1 0 NULL 1 NULL 0 3610284.7060 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:35:00 1 0 NULL 1 NULL 0 4456441.6050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:30:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:40:00 1 0 NULL 1 NULL 0 18731332.1660 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 09:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:45:00 1 0 NULL 1 NULL 0 6422848.2190 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:50:00 1 0 NULL 1 NULL 0 6444921.5360 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:55:00 1 0 NULL 1 NULL 0 6859042.8620 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:00:00 1 0 NULL 1 NULL 0 5911282.9530 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/17 09:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:05:00 1 0 NULL 1 NULL 0 5201965.0710 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:10:00 1 0 NULL 1 NULL 0 4376118.4030 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:15:00 1 0 NULL 1 NULL 0 4013503.9750 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:20:00 1 0 NULL 1 NULL 0 6348990.8130 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:25:00 1 0 NULL 1 NULL 0 13747527.9270 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 10:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:30:00 1 0 NULL 1 NULL 0 6571442.6830 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:35:00 1 0 NULL 1 NULL 0 14211057.60 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 10:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:40:00 1 0 NULL 1 NULL 0 7504324.2550 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:45:00 1 0 NULL 1 NULL 0 8528550.0170 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 10:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:50:00 1 0 NULL 1 NULL 0 4225305.82 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:55:00 1 0 NULL 1 NULL 0 21485872.2540 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 10:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:00:00 1 0 NULL 1 NULL 0 4226007.7930 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:05:00 1 0 NULL 1 NULL 0 4172215.6160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:10:00 1 0 NULL 1 NULL 0 6683973.9840 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:15:00 1 0 NULL 1 NULL 0 3830504.4820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:20:00 1 0 NULL 1 NULL 0 11791856.5180 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 11:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:25:00 1 0 NULL 1 NULL 0 6975406.9030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:30:00 1 0 NULL 1 NULL 0 4534686.4890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:35:00 1 0 NULL 1 NULL 0 4487944.2080 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:30:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:40:00 1 0 NULL 1 NULL 0 4280498.3490 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:45:00 1 0 NULL 1 NULL 0 4225721.8810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:50:00 1 0 NULL 1 NULL 0 4102987.3650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:55:00 1 0 NULL 1 NULL 0 22144271.3020 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 11:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:00:00 1 0 NULL 1 NULL 0 9140815.3220 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:05:00 1 0 NULL 1 NULL 0 9116493.9280 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:10:00 1 0 NULL 1 NULL 0 10003655.0370 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:05:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:15:00 1 0 NULL 1 NULL 0 18079517.01 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:20:00 1 0 NULL 1 NULL 0 27406117.0570 0 0 0 0.416 0.91 NULL NULL NULL NULL 0 0 0 2023/11/17 12:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:25:00 1 0 NULL 1 NULL 0 4320521.9670 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:30:00 1 0 NULL 1 NULL 0 2476488974.4520 0 0 0 0.416 100.343 NULL NULL NULL NULL 0 0 0 2023/11/17 12:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:35:00 1 0 NULL 1 NULL 0 1244271669.4330 0 0 0 0.416 50.318 NULL NULL NULL NULL 0 0 0 2023/11/17 12:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:40:00 1 0 NULL 1 NULL 0 27240398.8090 0 0 0 0.416 0.912 NULL NULL NULL NULL 0 0 0 2023/11/17 12:35:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:45:00 1 0 NULL 1 NULL 0 165936351.37 0 0 6.3 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:50:00 1 0 NULL 1 NULL 0 165859462.5740 0 0 5.5 0.416 0.92 NULL NULL NULL NULL 0 0 0 2023/11/17 12:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:55:00 1 0 NULL 1 NULL 0 8521883.3220 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 12:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:00:00 1 0 NULL 1 NULL 0 7561068.9480 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:55:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:05:00 1 0 NULL 1 NULL 0 4643264.7460 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:10:00 1 0 NULL 1 NULL 0 7234913.6530 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:15:00 1 0 NULL 1 NULL 0 7173770.6820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:20:00 1 0 NULL 1 NULL 0 7113525.2630 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:15:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:25:00 1 0 NULL 1 NULL 0 4677430.2130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:20:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:30:00 1 0 NULL 1 NULL 0 7137757.2140 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:35:00 1 0 NULL 1 NULL 0 3968739.0110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:40:00 1 0 NULL 1 NULL 0 6180673.3160 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:45:00 1 0 NULL 1 NULL 0 11551903.7220 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 13:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:50:00 1 0 NULL 1 NULL 0 3872835.5480 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:55:00 1 0 NULL 1 NULL 0 17473325.51 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/17 13:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:00:00 1 0 NULL 1 NULL 0 3750810.7370 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:55:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:05:00 1 0 NULL 1 NULL 0 13604150.2940 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 14:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:10:00 1 0 NULL 1 NULL 0 527721659.4740 0 0 0 0.416 21.32 NULL NULL NULL NULL 0 0 0 2023/11/17 14:05:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:15:00 1 0 NULL 1 NULL 0 6294109.5750 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:10:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:20:00 1 0 NULL 1 NULL 0 21318486.3070 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 14:15:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:25:00 1 0 NULL 1 NULL 0 6046850.4370 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:30:00 1 0 NULL 1 NULL 0 3612256.4120 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:35:00 1 0 NULL 1 NULL 0 5906741.4970 0 0 0 0.416 0.21 NULL NULL NULL NULL 0 0 0 2023/11/17 14:30:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:40:00 1 0 NULL 1 NULL 0 72617.3270 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:45:00 1 0 NULL 1 NULL 0 272811.3590 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:40:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:50:00 1 0 NULL 1 NULL 0 548861020.6880 0 0 7.337 0.416 7.337 NULL NULL NULL NULL 0 0 0 2023/11/17 14:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:55:00 1 0 NULL 1 NULL 0 14953704.1040 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 14:50:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:00:00 1 0 NULL 1 NULL 0 76782.3890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:05:00 1 0 NULL 1 NULL 0 1829348191.2050 0 0 72.095 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 15:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:10:00 1 0 NULL 1 NULL 0 1410714484.4390 0 0 55.817 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:15:00 1 0 NULL 1 NULL 0 1070070453.6940 0 0 42.332 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:20:00 1 0 NULL 1 NULL 0 741446570.9290 0 0 29.271 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 15:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:25:00 1 0 NULL 1 NULL 0 604683647.8940 0 0 14.659 2.808 2.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:30:00 1 0 NULL 1 NULL 0 182919191.4570 0 0 0 2.808 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:35:00 1 0 NULL 1 NULL 0 351791624.4190 0 0 0 5.002 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:40:00 1 0 NULL 0 NULL 0 -32479057.79 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:35:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:45:00 1 0 NULL 1 NULL 0 -21112431.6520 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 15:40:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:50:00 1 0 NULL 1 NULL 0 -29981122.65 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:55:00 1 0 NULL 1 NULL 0 -30107374.6380 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:00:00 1 0 NULL 0 NULL 0 -32339890.8670 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:05:00 1 0 NULL 1 NULL 0 80864316.1390 0 0 0 0 4.65 NULL NULL NULL NULL 0 0 0 2023/11/17 16:00:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:10:00 1 0 NULL 0 NULL 0 -34074408.4470 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:15:00 1 0 NULL 0 NULL 0 -35444084.7180 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:20:00 1 0 NULL 1 NULL 0 -30480486.3150 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 16:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:25:00 1 0 NULL 1 NULL 0 -33015540.6270 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:30:00 1 0 NULL 1 NULL 0 -24879655.5480 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 16:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:35:00 1 0 NULL 0 NULL 0 -41333053.0260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:40:00 1 0 NULL 0 NULL 0 -41329313.6780 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:45:00 1 0 NULL 0 NULL 0 -41833547.9990 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:40:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:50:00 1 0 NULL 0 NULL 0 -42551070.1150 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:55:00 1 0 NULL 1 NULL 0 -40165172.80 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:00:00 1 0 NULL 1 NULL 0 -43463696.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:05:00 1 0 NULL 1 NULL 0 402372864.7860 0 0 0 0 18.24 NULL NULL NULL NULL 0 0 0 2023/11/17 17:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:10:00 1 0 NULL 1 NULL 0 179581774.6110 0 0 0 0 9.16 NULL NULL NULL NULL 0 0 0 2023/11/17 17:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:15:00 1 0 NULL 0 NULL 0 -44774717.5690 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:20:00 1 0 NULL 0 NULL 0 -44827330.6740 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:25:00 1 0 NULL 0 NULL 0 -44178924.7790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:30:00 1 0 NULL 0 NULL 0 -43916506.8830 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:35:00 1 0 NULL 0 NULL 0 -41189433.84 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:40:00 1 0 NULL 0 NULL 0 -40402133.1480 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:45:00 1 0 NULL 1 NULL 0 -34454714.9820 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:50:00 1 0 NULL 1 NULL 0 -33579028.50 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:55:00 1 0 NULL 1 NULL 0 -36094950.5570 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:00:00 1 0 NULL 1 NULL 0 -36433457.0950 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:05:00 1 0 NULL 1 NULL 0 -34430611.6970 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:10:00 1 0 NULL 0 NULL 0 -36891093.4820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:15:00 1 0 NULL 0 NULL 0 -37374801.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:20:00 1 0 NULL 1 NULL 0 -34255085.9680 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:25:00 1 0 NULL 1 NULL 0 -33215504.3040 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:30:00 1 0 NULL 0 NULL 0 -35196263.3820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:35:00 1 0 NULL 1 NULL 0 -32334724.6370 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:40:00 1 0 NULL 0 NULL 0 -34191415.0880 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:45:00 1 0 NULL 1 NULL 0 -31336602.66 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:50:00 1 0 NULL 0 NULL 0 -34153952.3790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:55:00 1 0 NULL 0 NULL 0 -34067481.2860 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:00:00 1 0 NULL 0 NULL 0 -34161469.6580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:05:00 1 0 NULL 0 NULL 0 -25740682.46 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:10:00 1 0 NULL 1 NULL 0 888159560.5950 0 0 0 0 37.27 NULL NULL NULL NULL 0 0 0 2023/11/17 19:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:15:00 1 0 NULL 1 NULL 0 883712099.9530 0 0 0 0 37.23 NULL NULL NULL NULL 0 0 0 2023/11/17 19:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:20:00 1 0 NULL 1 NULL 0 881329177.0250 0 0 0 0 37.22 NULL NULL NULL NULL 0 0 0 2023/11/17 19:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:25:00 1 0 NULL 1 NULL 0 899723467.1060 0 0 0 0 37.98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:30:00 1 0 NULL 1 NULL 0 872181619.7320 0 0 0 0 36.87 NULL NULL NULL NULL 0 0 0 2023/11/17 19:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:35:00 1 0 NULL 1 NULL 0 6225595163.4330 0 0 0 0 254.68 NULL NULL NULL NULL 0 0 0 2023/11/17 19:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:40:00 1 0 NULL 1 NULL 0 4900041508.0970 0 0 0 0 200.82 NULL NULL NULL NULL 0 0 0 2023/11/17 19:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:45:00 1 0 NULL 1 NULL 0 2374895739.2760 0 0 0 0 98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:50:00 1 0 NULL 1 NULL 0 1051496257.6480 0 0 0 0 44.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:55:00 1 0 NULL 0 NULL 0 -32267107.2450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:00:00 1 0 NULL 1 NULL 0 -30322764.1280 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:05:00 1 0 NULL 1 NULL 0 4027187861.5690 0 0 0 0 165.964 NULL NULL NULL NULL 0 0 0 2023/11/17 20:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:10:00 1 0 NULL 1 NULL 0 3623567899.1380 0 0 0 0 149.595 NULL NULL NULL NULL 0 0 0 2023/11/17 20:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:15:00 1 0 NULL 1 NULL 0 2841880378.5970 0 0 0 0 117.613 NULL NULL NULL NULL 0 0 0 2023/11/17 20:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:20:00 1 0 NULL 1 NULL 0 2244933416.7980 0 0 0 0 93.477 NULL NULL NULL NULL 0 0 0 2023/11/17 20:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:25:00 1 0 NULL 1 NULL 0 2863030435.6560 0 0 0 0 118.77 NULL NULL NULL NULL 0 0 0 2023/11/17 20:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:30:00 1 0 NULL 1 NULL 0 1904503784.6410 0 0 0 0 79.764 NULL NULL NULL NULL 0 0 0 2023/11/17 20:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:35:00 1 0 NULL 1 NULL 0 2438952364.8470 0 0 0 0 101.415 NULL NULL NULL NULL 0 0 0 2023/11/17 20:30:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:40:00 1 0 NULL 1 NULL 0 2404374386.37 0 0 0 0 100.112 NULL NULL NULL NULL 0 0 0 2023/11/17 20:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:45:00 1 0 NULL 1 NULL 0 483838402.8310 0 0 0 0 21.94 NULL NULL NULL NULL 0 0 0 2023/11/17 20:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:50:00 1 0 NULL 1 NULL 0 229032920.0450 0 0 0 0 11.564 NULL NULL NULL NULL 0 0 0 2023/11/17 20:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:55:00 1 0 NULL 0 NULL 0 -55281754.6920 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:00:00 1 0 NULL 0 NULL 0 -55407449.3540 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:05:00 1 0 NULL 1 NULL 0 638042862.1320 0 0 0 0 27.755 NULL NULL NULL NULL 0 0 0 2023/11/17 21:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:10:00 1 0 NULL 1 NULL 0 634495334.57 0 0 0 0 27.846 NULL NULL NULL NULL 0 0 0 2023/11/17 21:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:15:00 1 0 NULL 1 NULL 0 1074013155.4160 0 0 0 0 45.937 NULL NULL NULL NULL 0 0 0 2023/11/17 21:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:20:00 1 0 NULL 1 NULL 0 -52304385.61 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:25:00 1 0 NULL 0 NULL 0 -54778930.8770 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:30:00 1 0 NULL 1 NULL 0 -49933804.9660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 21:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:35:00 1 0 NULL 1 NULL 0 -12582643.9120 0 0 0 0 1.487 NULL NULL NULL NULL 0 0 0 2023/11/17 21:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:40:00 1 0 NULL 0 NULL 0 -52297928.4560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:45:00 1 0 NULL 1 NULL 0 3636295874.3410 0 0 0 0 150.225 NULL NULL NULL NULL 0 0 0 2023/11/17 21:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:50:00 1 0 NULL 1 NULL 0 -51935117.2980 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:55:00 1 0 NULL 0 NULL 0 -54402795.21 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:00:00 1 0 NULL 1 NULL 0 -51909940.0510 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:05:00 1 0 NULL 1 NULL 0 -46272644.3910 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:10:00 1 0 NULL 1 NULL 0 -48524238.9060 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:15:00 1 0 NULL 1 NULL 0 365490378.6370 0 0 0 0 16.983 NULL NULL NULL NULL 0 0 0 2023/11/17 22:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:20:00 1 0 NULL 1 NULL 0 319918669.0540 0 0 0 0 15.099 NULL NULL NULL NULL 0 0 0 2023/11/17 22:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:25:00 1 0 NULL 1 NULL 0 279776932.5480 0 0 0 0 13.463 NULL NULL NULL NULL 0 0 0 2023/11/17 22:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:30:00 1 0 NULL 0 NULL 0 -50757875.4790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:35:00 1 0 NULL 0 NULL 0 -49364760.0730 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:40:00 1 0 NULL 1 NULL 0 -47241907.5760 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:45:00 1 0 NULL 1 NULL 0 -47915767.6690 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:50:00 1 0 NULL 0 NULL 0 -50172567.1080 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:55:00 1 0 NULL 0 NULL 0 -50056650.4350 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:00:00 1 0 NULL 1 NULL 0 -47573958.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:05:00 1 0 NULL 1 NULL 0 -45568109.9610 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:10:00 1 0 NULL 1 NULL 0 -45494464.1590 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:15:00 1 0 NULL 0 NULL 0 -47995783.19 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:20:00 1 0 NULL 1 NULL 0 -16025744.1730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:25:00 1 0 NULL 1 NULL 0 -15966430.1110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:30:00 1 0 NULL 1 NULL 0 -15990262.8730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:35:00 1 0 NULL 1 NULL 0 -15922778.3050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:40:00 1 0 NULL 1 NULL 0 -7172455.1420 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:45:00 1 0 NULL 1 NULL 0 -3762172.28 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/17 23:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:50:00 1 0 NULL 1 NULL 0 -14168482.45 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:55:00 1 0 NULL 1 NULL 0 -15275361.3130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:00:00 1 0 NULL 1 NULL 0 -5203508.2810 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 23:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:05:00 1 0 NULL 1 NULL 0 2006985581.7150 0 0 79.765 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:10:00 1 0 NULL 1 NULL 0 1713933363.3980 0 0 68.28 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:15:00 1 0 NULL 1 NULL 0 1324747813.8940 0 0 52.898 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:20:00 1 0 NULL 1 NULL 0 933413090.4160 0 0 37.427 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:25:00 1 0 NULL 1 NULL 0 550689839.8880 0 0 22.2 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:30:00 1 0 NULL 1 NULL 0 218937418.1080 0 0 8.395 0.416 0.81 NULL NULL NULL NULL 0 0 0 2023/11/18 00:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:35:00 1 0 NULL 1 NULL 0 7579525926.7580 0 0 0 0.416 308.996 NULL NULL NULL NULL 0 0 0 2023/11/18 00:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:40:00 1 0 NULL 1 NULL 0 7336915650.9690 0 0 0 0.416 299.12 NULL NULL NULL NULL 0 0 0 2023/11/18 00:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:45:00 1 0 NULL 1 NULL 0 6045182791.2170 0 0 0 0.416 246.546 NULL NULL NULL NULL 0 0 0 2023/11/18 00:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:50:00 1 0 NULL 1 NULL 0 4782443540.40 0 0 0 0.416 195.149 NULL NULL NULL NULL 0 0 0 2023/11/18 00:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:55:00 1 0 NULL 1 NULL 0 3533422992.0770 0 0 0 0.416 144.306 NULL NULL NULL NULL 0 0 0 2023/11/18 00:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:00:00 1 0 NULL 1 NULL 0 2826754356.2650 0 0 0 0.416 115.54 NULL NULL NULL NULL 0 0 0 2023/11/18 00:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:05:00 1 0 NULL 1 NULL 0 1453322777.0730 0 0 0 0.416 59.628 NULL NULL NULL NULL 0 0 0 2023/11/18 01:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:10:00 1 0 NULL 1 NULL 0 499129476.38 0 0 0 0.416 20.781 NULL NULL NULL NULL 0 0 0 2023/11/18 01:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:15:00 1 0 NULL 1 NULL 0 485407380.7220 0 0 0 0.416 20.229 NULL NULL NULL NULL 0 0 0 2023/11/18 01:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:20:00 1 0 NULL 1 NULL 0 17265665.6770 0 0 0 0.416 1.17 NULL NULL NULL NULL 0 0 0 2023/11/18 01:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:25:00 1 0 NULL 1 NULL 0 -11434172.2560 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:30:00 1 0 NULL 1 NULL 0 -3823519.21 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:35:00 1 0 NULL 1 NULL 0 1498287664.4310 0 0 0 0.416 61.408 NULL NULL NULL NULL 0 0 0 2023/11/18 01:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:40:00 1 0 NULL 1 NULL 0 608001450.7020 0 0 0 0.416 25.168 NULL NULL NULL NULL 0 0 0 2023/11/18 01:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:45:00 1 0 NULL 1 NULL 0 -7888393.7570 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 01:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:50:00 1 0 NULL 1 NULL 0 -10396306.4160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:55:00 1 0 NULL 1 NULL 0 8316084.7680 0 0 0 0.416 0.76 NULL NULL NULL NULL 0 0 0 2023/11/18 01:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:00:00 1 0 NULL 1 NULL 0 -2731101.26 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:05:00 1 0 NULL 1 NULL 0 -8814993.7150 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:10:00 1 0 NULL 1 NULL 0 -7598613.6930 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:15:00 1 0 NULL 1 NULL 0 -7628193.4030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:20:00 1 0 NULL 1 NULL 0 -7639055.3820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:25:00 1 0 NULL 1 NULL 0 -2417482.6810 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:30:00 1 0 NULL 1 NULL 0 -9895427.42 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 02:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:35:00 1 0 NULL 1 NULL 0 -2318028.8590 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:40:00 1 0 NULL 1 NULL 0 -9895921.9650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:45:00 1 0 NULL 1 NULL 0 -8500955.7880 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:50:00 1 0 NULL 1 NULL 0 -9903525.7990 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:55:00 1 0 NULL 1 NULL 0 -3502096.8430 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/18 02:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:00:00 1 0 NULL 1 NULL 0 -2234303.9630 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:05:00 1 0 NULL 1 NULL 0 -9867178.7940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:10:00 1 0 NULL 1 NULL 0 -9517507.4350 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 03:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:15:00 1 0 NULL 1 NULL 0 -7178170.5820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:20:00 1 0 NULL 1 NULL 0 -9624785.8940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:25:00 1 0 NULL 1 NULL 0 -878688.5970 0 0 0 0.416 0.36 NULL NULL NULL NULL 0 0 0 2023/11/18 03:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:30:00 1 0 NULL 1 NULL 0 14145537.0950 0 0 0 0.416 0.97 NULL NULL NULL NULL 0 0 0 2023/11/18 03:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:35:00 1 0 NULL 1 NULL 0 -7149770.39 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:40:00 1 0 NULL 1 NULL 0 -9596739.4250 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:45:00 1 0 NULL 1 NULL 0 88789129.03 0 0 0 0.416 3.995 NULL NULL NULL NULL 0 0 0 2023/11/18 03:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:50:00 1 0 NULL 1 NULL 0 4426114.4320 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/18 03:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:55:00 1 0 NULL 1 NULL 0 5669292.5630 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/18 03:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 04:00:00 1 0 NULL 1 NULL 0 8250793.4510 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/18 03:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv +I DISPATCH CASESOLUTION 1 SETTLEMENTDATE RUNNO INTERVENTION CASESUBTYPE SOLUTIONSTATUS SPDVERSION NONPHYSICALLOSSES TOTALOBJECTIVE TOTALAREAGENVIOLATION TOTALINTERCONNECTORVIOLATION TOTALGENERICVIOLATION TOTALRAMPRATEVIOLATION TOTALUNITMWCAPACITYVIOLATION TOTAL5MINVIOLATION TOTALREGVIOLATION TOTAL6SECVIOLATION TOTAL60SECVIOLATION TOTALASPROFILEVIOLATION TOTALFASTSTARTVIOLATION TOTALENERGYOFFERVIOLATION LASTCHANGED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:05:00 1 0 NULL 0 NULL 0 -18891916.3260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:10:00 1 0 NULL 0 NULL 0 -18991572.1050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:15:00 1 0 NULL 0 NULL 0 -18873654.0430 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:20:00 1 0 NULL 0 NULL 0 -18814533.9560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:25:00 1 0 NULL 0 NULL 0 -18955411.5810 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:30:00 1 0 NULL 1 NULL 0 -16453600.9790 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:35:00 1 0 NULL 0 NULL 0 -18918229.6050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:40:00 1 0 NULL 0 NULL 0 -18903552.1320 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:45:00 1 0 NULL 0 NULL 0 -18956195.7840 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:50:00 1 0 NULL 0 NULL 0 -19068752.4310 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:55:00 1 0 NULL 1 NULL 0 -16547387.8360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:00:00 1 0 NULL 1 NULL 0 -14134931.5130 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 04:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:05:00 1 0 NULL 0 NULL 0 -19099921.3580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:10:00 1 0 NULL 0 NULL 0 -18967112.4580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:15:00 1 0 NULL 0 NULL 0 -19075483.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:20:00 1 0 NULL 0 NULL 0 -19184913.3440 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:25:00 1 0 NULL 1 NULL 0 -14343450.8660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 05:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:30:00 1 0 NULL 0 NULL 0 -19331879.1490 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:35:00 1 0 NULL 0 NULL 0 -19582540.7460 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:40:00 1 0 NULL 0 NULL 0 -19995441.8250 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:45:00 1 0 NULL 0 NULL 0 -20392189.2680 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:50:00 1 0 NULL 0 NULL 0 -20620333.3450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:55:00 1 0 NULL 0 NULL 0 -21362524.3070 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:00:00 1 0 NULL 0 NULL 0 -21489793.8170 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:05:00 1 0 NULL 1 NULL 0 2094013133.35 0 0 0 0 86.091 NULL NULL NULL NULL 0 0 0 2023/11/17 06:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:10:00 1 0 NULL 1 NULL 0 1338468636.7650 0 0 0 0 55.418 NULL NULL NULL NULL 0 0 0 2023/11/17 06:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:15:00 1 0 NULL 1 NULL 0 548289035.8710 0 0 0 0 23.281 NULL NULL NULL NULL 0 0 0 2023/11/17 06:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:20:00 1 0 NULL 1 NULL 0 2773061028.0780 0 0 0 0 113.806 NULL NULL NULL NULL 0 0 0 2023/11/17 06:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:25:00 1 0 NULL 1 NULL 0 1505021017.9440 0 0 0 0 62.326 NULL NULL NULL NULL 0 0 0 2023/11/17 06:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:30:00 1 0 NULL 1 NULL 0 490360523.2630 0 0 0 0 21.085 NULL NULL NULL NULL 0 0 0 2023/11/17 06:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:35:00 1 0 NULL 1 NULL 0 2187250227.8070 0 0 0 0 90.128 NULL NULL NULL NULL 0 0 0 2023/11/17 06:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:40:00 1 0 NULL 1 NULL 0 2664659481.8670 0 0 8.851 0 91.522 NULL NULL NULL NULL 0 0 0 2023/11/17 06:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:45:00 1 0 NULL 1 NULL 0 1062603019.6170 0 0 0 0 44.402 NULL NULL NULL NULL 0 0 0 2023/11/17 06:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:50:00 1 0 NULL 1 NULL 0 586841408.2250 0 0 0 0 25.053 NULL NULL NULL NULL 0 0 0 2023/11/17 06:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:55:00 1 0 NULL 0 NULL 0 -28726654.8030 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 06:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:00:00 1 0 NULL 1 NULL 0 1485033818.2130 0 0 0 0 61.621 NULL NULL NULL NULL 0 0 0 2023/11/17 06:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:05:00 1 0 NULL 1 NULL 0 1216625476.3170 0 0 0 0 50.703 NULL NULL NULL NULL 0 0 0 2023/11/17 07:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:10:00 1 0 NULL 1 NULL 0 1305259053.6630 0 0 0 0 54.355 NULL NULL NULL NULL 0 0 0 2023/11/17 07:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:15:00 1 0 NULL 1 NULL 0 591119444.4970 0 0 0 0 25.29 NULL NULL NULL NULL 0 0 0 2023/11/17 07:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:20:00 1 0 NULL 1 NULL 0 742149171.6520 0 0 0 0.416 30.151 NULL NULL NULL NULL 0 0 0 2023/11/17 07:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:25:00 1 0 NULL 1 NULL 0 766417739.3210 0 0 0 0.416 31.141 NULL NULL NULL NULL 0 0 0 2023/11/17 07:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:30:00 1 0 NULL 1 NULL 0 352861875.1030 0 0 0 0.416 14.315 NULL NULL NULL NULL 0 0 0 2023/11/17 07:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:35:00 1 0 NULL 1 NULL 0 8817834722.1480 0 0 0 0.416 358.351 NULL NULL NULL NULL 0 0 0 2023/11/17 07:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:40:00 1 0 NULL 1 NULL 0 2359869476.2250 0 0 0 0.416 95.668 NULL NULL NULL NULL 0 0 0 2023/11/17 07:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:45:00 1 0 NULL 1 NULL 0 1365028867.05 0 0 0 0.416 55.296 NULL NULL NULL NULL 0 0 0 2023/11/17 07:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:50:00 1 0 NULL 1 NULL 0 15272157.7030 0 0 0 0.416 0.51 NULL NULL NULL NULL 0 0 0 2023/11/17 07:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:55:00 1 0 NULL 1 NULL 0 5022568.40 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 07:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:00:00 1 0 NULL 1 NULL 0 2071536.4830 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 07:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:05:00 1 0 NULL 1 NULL 0 5881035925.2910 0 0 0 0.416 239.213 NULL NULL NULL NULL 0 0 0 2023/11/17 08:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:10:00 1 0 NULL 1 NULL 0 4892605710.2240 0 0 0 0.416 198.989 NULL NULL NULL NULL 0 0 0 2023/11/17 08:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:15:00 1 0 NULL 1 NULL 0 3819130532.2320 0 0 0 0.416 155.302 NULL NULL NULL NULL 0 0 0 2023/11/17 08:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:20:00 1 0 NULL 1 NULL 0 2677901325.5920 0 0 0 0.416 108.846 NULL NULL NULL NULL 0 0 0 2023/11/17 08:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:25:00 1 0 NULL 1 NULL 0 1509812889.8560 0 0 0 0.416 61.311 NULL NULL NULL NULL 0 0 0 2023/11/17 08:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:30:00 1 0 NULL 1 NULL 0 168787982.9490 0 0 0 0.416 6.736 NULL NULL NULL NULL 0 0 0 2023/11/17 08:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:35:00 1 0 NULL 1 NULL 0 7564605.6680 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 08:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:40:00 1 0 NULL 1 NULL 0 3536720.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:45:00 1 0 NULL 1 NULL 0 3379078.5810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:50:00 1 0 NULL 1 NULL 0 123950548.9330 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:55:00 1 0 NULL 1 NULL 0 122839634.4540 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:00:00 1 0 NULL 1 NULL 0 122216116.48 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:05:00 1 0 NULL 1 NULL 0 3424718.80 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:10:00 1 0 NULL 1 NULL 0 3271843.5390 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:15:00 1 0 NULL 1 NULL 0 3257726.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:20:00 1 0 NULL 1 NULL 0 3586935.9820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:25:00 1 0 NULL 1 NULL 0 5994413.4070 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:30:00 1 0 NULL 1 NULL 0 3610284.7060 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:35:00 1 0 NULL 1 NULL 0 4456441.6050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:30:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:40:00 1 0 NULL 1 NULL 0 18731332.1660 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 09:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:45:00 1 0 NULL 1 NULL 0 6422848.2190 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:50:00 1 0 NULL 1 NULL 0 6444921.5360 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:55:00 1 0 NULL 1 NULL 0 6859042.8620 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:00:00 1 0 NULL 1 NULL 0 5911282.9530 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/17 09:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:05:00 1 0 NULL 1 NULL 0 5201965.0710 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:10:00 1 0 NULL 1 NULL 0 4376118.4030 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:15:00 1 0 NULL 1 NULL 0 4013503.9750 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:20:00 1 0 NULL 1 NULL 0 6348990.8130 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:25:00 1 0 NULL 1 NULL 0 13747527.9270 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 10:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:30:00 1 0 NULL 1 NULL 0 6571442.6830 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:35:00 1 0 NULL 1 NULL 0 14211057.60 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 10:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:40:00 1 0 NULL 1 NULL 0 7504324.2550 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:45:00 1 0 NULL 1 NULL 0 8528550.0170 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 10:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:50:00 1 0 NULL 1 NULL 0 4225305.82 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:55:00 1 0 NULL 1 NULL 0 21485872.2540 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 10:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:00:00 1 0 NULL 1 NULL 0 4226007.7930 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:05:00 1 0 NULL 1 NULL 0 4172215.6160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:10:00 1 0 NULL 1 NULL 0 6683973.9840 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:15:00 1 0 NULL 1 NULL 0 3830504.4820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:20:00 1 0 NULL 1 NULL 0 11791856.5180 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 11:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:25:00 1 0 NULL 1 NULL 0 6975406.9030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:30:00 1 0 NULL 1 NULL 0 4534686.4890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:35:00 1 0 NULL 1 NULL 0 4487944.2080 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:30:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:40:00 1 0 NULL 1 NULL 0 4280498.3490 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:45:00 1 0 NULL 1 NULL 0 4225721.8810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:50:00 1 0 NULL 1 NULL 0 4102987.3650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:55:00 1 0 NULL 1 NULL 0 22144271.3020 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 11:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:00:00 1 0 NULL 1 NULL 0 9140815.3220 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:05:00 1 0 NULL 1 NULL 0 9116493.9280 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:10:00 1 0 NULL 1 NULL 0 10003655.0370 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:05:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:15:00 1 0 NULL 1 NULL 0 18079517.01 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:20:00 1 0 NULL 1 NULL 0 27406117.0570 0 0 0 0.416 0.91 NULL NULL NULL NULL 0 0 0 2023/11/17 12:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:25:00 1 0 NULL 1 NULL 0 4320521.9670 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:30:00 1 0 NULL 1 NULL 0 2476488974.4520 0 0 0 0.416 100.343 NULL NULL NULL NULL 0 0 0 2023/11/17 12:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:35:00 1 0 NULL 1 NULL 0 1244271669.4330 0 0 0 0.416 50.318 NULL NULL NULL NULL 0 0 0 2023/11/17 12:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:40:00 1 0 NULL 1 NULL 0 27240398.8090 0 0 0 0.416 0.912 NULL NULL NULL NULL 0 0 0 2023/11/17 12:35:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:45:00 1 0 NULL 1 NULL 0 165936351.37 0 0 6.3 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:50:00 1 0 NULL 1 NULL 0 165859462.5740 0 0 5.5 0.416 0.92 NULL NULL NULL NULL 0 0 0 2023/11/17 12:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:55:00 1 0 NULL 1 NULL 0 8521883.3220 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 12:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:00:00 1 0 NULL 1 NULL 0 7561068.9480 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:55:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:05:00 1 0 NULL 1 NULL 0 4643264.7460 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:10:00 1 0 NULL 1 NULL 0 7234913.6530 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:15:00 1 0 NULL 1 NULL 0 7173770.6820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:20:00 1 0 NULL 1 NULL 0 7113525.2630 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:15:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:25:00 1 0 NULL 1 NULL 0 4677430.2130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:20:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:30:00 1 0 NULL 1 NULL 0 7137757.2140 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:35:00 1 0 NULL 1 NULL 0 3968739.0110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:40:00 1 0 NULL 1 NULL 0 6180673.3160 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:45:00 1 0 NULL 1 NULL 0 11551903.7220 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 13:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:50:00 1 0 NULL 1 NULL 0 3872835.5480 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:55:00 1 0 NULL 1 NULL 0 17473325.51 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/17 13:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:00:00 1 0 NULL 1 NULL 0 3750810.7370 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:55:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:05:00 1 0 NULL 1 NULL 0 13604150.2940 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 14:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:10:00 1 0 NULL 1 NULL 0 527721659.4740 0 0 0 0.416 21.32 NULL NULL NULL NULL 0 0 0 2023/11/17 14:05:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:15:00 1 0 NULL 1 NULL 0 6294109.5750 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:10:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:20:00 1 0 NULL 1 NULL 0 21318486.3070 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 14:15:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:25:00 1 0 NULL 1 NULL 0 6046850.4370 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:30:00 1 0 NULL 1 NULL 0 3612256.4120 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:35:00 1 0 NULL 1 NULL 0 5906741.4970 0 0 0 0.416 0.21 NULL NULL NULL NULL 0 0 0 2023/11/17 14:30:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:40:00 1 0 NULL 1 NULL 0 72617.3270 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:45:00 1 0 NULL 1 NULL 0 272811.3590 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:40:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:50:00 1 0 NULL 1 NULL 0 548861020.6880 0 0 7.337 0.416 7.337 NULL NULL NULL NULL 0 0 0 2023/11/17 14:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:55:00 1 0 NULL 1 NULL 0 14953704.1040 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 14:50:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:00:00 1 0 NULL 1 NULL 0 76782.3890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:05:00 1 0 NULL 1 NULL 0 1829348191.2050 0 0 72.095 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 15:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:10:00 1 0 NULL 1 NULL 0 1410714484.4390 0 0 55.817 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:15:00 1 0 NULL 1 NULL 0 1070070453.6940 0 0 42.332 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:20:00 1 0 NULL 1 NULL 0 741446570.9290 0 0 29.271 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 15:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:25:00 1 0 NULL 1 NULL 0 604683647.8940 0 0 14.659 2.808 2.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:30:00 1 0 NULL 1 NULL 0 182919191.4570 0 0 0 2.808 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:35:00 1 0 NULL 1 NULL 0 351791624.4190 0 0 0 5.002 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:40:00 1 0 NULL 0 NULL 0 -32479057.79 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:35:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:45:00 1 0 NULL 1 NULL 0 -21112431.6520 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 15:40:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:50:00 1 0 NULL 1 NULL 0 -29981122.65 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:55:00 1 0 NULL 1 NULL 0 -30107374.6380 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:00:00 1 0 NULL 0 NULL 0 -32339890.8670 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:05:00 1 0 NULL 1 NULL 0 80864316.1390 0 0 0 0 4.65 NULL NULL NULL NULL 0 0 0 2023/11/17 16:00:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:10:00 1 0 NULL 0 NULL 0 -34074408.4470 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:15:00 1 0 NULL 0 NULL 0 -35444084.7180 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:20:00 1 0 NULL 1 NULL 0 -30480486.3150 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 16:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:25:00 1 0 NULL 1 NULL 0 -33015540.6270 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:30:00 1 0 NULL 1 NULL 0 -24879655.5480 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 16:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:35:00 1 0 NULL 0 NULL 0 -41333053.0260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:40:00 1 0 NULL 0 NULL 0 -41329313.6780 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:45:00 1 0 NULL 0 NULL 0 -41833547.9990 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:40:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:50:00 1 0 NULL 0 NULL 0 -42551070.1150 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:55:00 1 0 NULL 1 NULL 0 -40165172.80 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:00:00 1 0 NULL 1 NULL 0 -43463696.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:05:00 1 0 NULL 1 NULL 0 402372864.7860 0 0 0 0 18.24 NULL NULL NULL NULL 0 0 0 2023/11/17 17:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:10:00 1 0 NULL 1 NULL 0 179581774.6110 0 0 0 0 9.16 NULL NULL NULL NULL 0 0 0 2023/11/17 17:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:15:00 1 0 NULL 0 NULL 0 -44774717.5690 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:20:00 1 0 NULL 0 NULL 0 -44827330.6740 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:25:00 1 0 NULL 0 NULL 0 -44178924.7790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:30:00 1 0 NULL 0 NULL 0 -43916506.8830 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:35:00 1 0 NULL 0 NULL 0 -41189433.84 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:40:00 1 0 NULL 0 NULL 0 -40402133.1480 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:45:00 1 0 NULL 1 NULL 0 -34454714.9820 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:50:00 1 0 NULL 1 NULL 0 -33579028.50 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:55:00 1 0 NULL 1 NULL 0 -36094950.5570 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:00:00 1 0 NULL 1 NULL 0 -36433457.0950 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:05:00 1 0 NULL 1 NULL 0 -34430611.6970 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:10:00 1 0 NULL 0 NULL 0 -36891093.4820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:15:00 1 0 NULL 0 NULL 0 -37374801.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:20:00 1 0 NULL 1 NULL 0 -34255085.9680 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:25:00 1 0 NULL 1 NULL 0 -33215504.3040 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:30:00 1 0 NULL 0 NULL 0 -35196263.3820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:35:00 1 0 NULL 1 NULL 0 -32334724.6370 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:40:00 1 0 NULL 0 NULL 0 -34191415.0880 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:45:00 1 0 NULL 1 NULL 0 -31336602.66 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:50:00 1 0 NULL 0 NULL 0 -34153952.3790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:55:00 1 0 NULL 0 NULL 0 -34067481.2860 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:00:00 1 0 NULL 0 NULL 0 -34161469.6580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:05:00 1 0 NULL 0 NULL 0 -25740682.46 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:10:00 1 0 NULL 1 NULL 0 888159560.5950 0 0 0 0 37.27 NULL NULL NULL NULL 0 0 0 2023/11/17 19:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:15:00 1 0 NULL 1 NULL 0 883712099.9530 0 0 0 0 37.23 NULL NULL NULL NULL 0 0 0 2023/11/17 19:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:20:00 1 0 NULL 1 NULL 0 881329177.0250 0 0 0 0 37.22 NULL NULL NULL NULL 0 0 0 2023/11/17 19:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:25:00 1 0 NULL 1 NULL 0 899723467.1060 0 0 0 0 37.98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:30:00 1 0 NULL 1 NULL 0 872181619.7320 0 0 0 0 36.87 NULL NULL NULL NULL 0 0 0 2023/11/17 19:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:35:00 1 0 NULL 1 NULL 0 6225595163.4330 0 0 0 0 254.68 NULL NULL NULL NULL 0 0 0 2023/11/17 19:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:40:00 1 0 NULL 1 NULL 0 4900041508.0970 0 0 0 0 200.82 NULL NULL NULL NULL 0 0 0 2023/11/17 19:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:45:00 1 0 NULL 1 NULL 0 2374895739.2760 0 0 0 0 98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:50:00 1 0 NULL 1 NULL 0 1051496257.6480 0 0 0 0 44.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:55:00 1 0 NULL 0 NULL 0 -32267107.2450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:00:00 1 0 NULL 1 NULL 0 -30322764.1280 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:05:00 1 0 NULL 1 NULL 0 4027187861.5690 0 0 0 0 165.964 NULL NULL NULL NULL 0 0 0 2023/11/17 20:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:10:00 1 0 NULL 1 NULL 0 3623567899.1380 0 0 0 0 149.595 NULL NULL NULL NULL 0 0 0 2023/11/17 20:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:15:00 1 0 NULL 1 NULL 0 2841880378.5970 0 0 0 0 117.613 NULL NULL NULL NULL 0 0 0 2023/11/17 20:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:20:00 1 0 NULL 1 NULL 0 2244933416.7980 0 0 0 0 93.477 NULL NULL NULL NULL 0 0 0 2023/11/17 20:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:25:00 1 0 NULL 1 NULL 0 2863030435.6560 0 0 0 0 118.77 NULL NULL NULL NULL 0 0 0 2023/11/17 20:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:30:00 1 0 NULL 1 NULL 0 1904503784.6410 0 0 0 0 79.764 NULL NULL NULL NULL 0 0 0 2023/11/17 20:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:35:00 1 0 NULL 1 NULL 0 2438952364.8470 0 0 0 0 101.415 NULL NULL NULL NULL 0 0 0 2023/11/17 20:30:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:40:00 1 0 NULL 1 NULL 0 2404374386.37 0 0 0 0 100.112 NULL NULL NULL NULL 0 0 0 2023/11/17 20:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:45:00 1 0 NULL 1 NULL 0 483838402.8310 0 0 0 0 21.94 NULL NULL NULL NULL 0 0 0 2023/11/17 20:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:50:00 1 0 NULL 1 NULL 0 229032920.0450 0 0 0 0 11.564 NULL NULL NULL NULL 0 0 0 2023/11/17 20:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:55:00 1 0 NULL 0 NULL 0 -55281754.6920 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:00:00 1 0 NULL 0 NULL 0 -55407449.3540 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:05:00 1 0 NULL 1 NULL 0 638042862.1320 0 0 0 0 27.755 NULL NULL NULL NULL 0 0 0 2023/11/17 21:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:10:00 1 0 NULL 1 NULL 0 634495334.57 0 0 0 0 27.846 NULL NULL NULL NULL 0 0 0 2023/11/17 21:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:15:00 1 0 NULL 1 NULL 0 1074013155.4160 0 0 0 0 45.937 NULL NULL NULL NULL 0 0 0 2023/11/17 21:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:20:00 1 0 NULL 1 NULL 0 -52304385.61 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:25:00 1 0 NULL 0 NULL 0 -54778930.8770 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:30:00 1 0 NULL 1 NULL 0 -49933804.9660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 21:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:35:00 1 0 NULL 1 NULL 0 -12582643.9120 0 0 0 0 1.487 NULL NULL NULL NULL 0 0 0 2023/11/17 21:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:40:00 1 0 NULL 0 NULL 0 -52297928.4560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:45:00 1 0 NULL 1 NULL 0 3636295874.3410 0 0 0 0 150.225 NULL NULL NULL NULL 0 0 0 2023/11/17 21:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:50:00 1 0 NULL 1 NULL 0 -51935117.2980 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:55:00 1 0 NULL 0 NULL 0 -54402795.21 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:00:00 1 0 NULL 1 NULL 0 -51909940.0510 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:05:00 1 0 NULL 1 NULL 0 -46272644.3910 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:10:00 1 0 NULL 1 NULL 0 -48524238.9060 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:15:00 1 0 NULL 1 NULL 0 365490378.6370 0 0 0 0 16.983 NULL NULL NULL NULL 0 0 0 2023/11/17 22:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:20:00 1 0 NULL 1 NULL 0 319918669.0540 0 0 0 0 15.099 NULL NULL NULL NULL 0 0 0 2023/11/17 22:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:25:00 1 0 NULL 1 NULL 0 279776932.5480 0 0 0 0 13.463 NULL NULL NULL NULL 0 0 0 2023/11/17 22:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:30:00 1 0 NULL 0 NULL 0 -50757875.4790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:35:00 1 0 NULL 0 NULL 0 -49364760.0730 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:40:00 1 0 NULL 1 NULL 0 -47241907.5760 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:45:00 1 0 NULL 1 NULL 0 -47915767.6690 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:50:00 1 0 NULL 0 NULL 0 -50172567.1080 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:55:00 1 0 NULL 0 NULL 0 -50056650.4350 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:00:00 1 0 NULL 1 NULL 0 -47573958.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:05:00 1 0 NULL 1 NULL 0 -45568109.9610 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:10:00 1 0 NULL 1 NULL 0 -45494464.1590 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:15:00 1 0 NULL 0 NULL 0 -47995783.19 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:20:00 1 0 NULL 1 NULL 0 -16025744.1730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:25:00 1 0 NULL 1 NULL 0 -15966430.1110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:30:00 1 0 NULL 1 NULL 0 -15990262.8730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:35:00 1 0 NULL 1 NULL 0 -15922778.3050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:40:00 1 0 NULL 1 NULL 0 -7172455.1420 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:45:00 1 0 NULL 1 NULL 0 -3762172.28 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/17 23:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:50:00 1 0 NULL 1 NULL 0 -14168482.45 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:55:00 1 0 NULL 1 NULL 0 -15275361.3130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:00:00 1 0 NULL 1 NULL 0 -5203508.2810 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 23:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:05:00 1 0 NULL 1 NULL 0 2006985581.7150 0 0 79.765 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:10:00 1 0 NULL 1 NULL 0 1713933363.3980 0 0 68.28 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:15:00 1 0 NULL 1 NULL 0 1324747813.8940 0 0 52.898 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:20:00 1 0 NULL 1 NULL 0 933413090.4160 0 0 37.427 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:25:00 1 0 NULL 1 NULL 0 550689839.8880 0 0 22.2 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:30:00 1 0 NULL 1 NULL 0 218937418.1080 0 0 8.395 0.416 0.81 NULL NULL NULL NULL 0 0 0 2023/11/18 00:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:35:00 1 0 NULL 1 NULL 0 7579525926.7580 0 0 0 0.416 308.996 NULL NULL NULL NULL 0 0 0 2023/11/18 00:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:40:00 1 0 NULL 1 NULL 0 7336915650.9690 0 0 0 0.416 299.12 NULL NULL NULL NULL 0 0 0 2023/11/18 00:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:45:00 1 0 NULL 1 NULL 0 6045182791.2170 0 0 0 0.416 246.546 NULL NULL NULL NULL 0 0 0 2023/11/18 00:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:50:00 1 0 NULL 1 NULL 0 4782443540.40 0 0 0 0.416 195.149 NULL NULL NULL NULL 0 0 0 2023/11/18 00:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:55:00 1 0 NULL 1 NULL 0 3533422992.0770 0 0 0 0.416 144.306 NULL NULL NULL NULL 0 0 0 2023/11/18 00:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:00:00 1 0 NULL 1 NULL 0 2826754356.2650 0 0 0 0.416 115.54 NULL NULL NULL NULL 0 0 0 2023/11/18 00:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:05:00 1 0 NULL 1 NULL 0 1453322777.0730 0 0 0 0.416 59.628 NULL NULL NULL NULL 0 0 0 2023/11/18 01:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:10:00 1 0 NULL 1 NULL 0 499129476.38 0 0 0 0.416 20.781 NULL NULL NULL NULL 0 0 0 2023/11/18 01:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:15:00 1 0 NULL 1 NULL 0 485407380.7220 0 0 0 0.416 20.229 NULL NULL NULL NULL 0 0 0 2023/11/18 01:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:20:00 1 0 NULL 1 NULL 0 17265665.6770 0 0 0 0.416 1.17 NULL NULL NULL NULL 0 0 0 2023/11/18 01:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:25:00 1 0 NULL 1 NULL 0 -11434172.2560 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:30:00 1 0 NULL 1 NULL 0 -3823519.21 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:35:00 1 0 NULL 1 NULL 0 1498287664.4310 0 0 0 0.416 61.408 NULL NULL NULL NULL 0 0 0 2023/11/18 01:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:40:00 1 0 NULL 1 NULL 0 608001450.7020 0 0 0 0.416 25.168 NULL NULL NULL NULL 0 0 0 2023/11/18 01:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:45:00 1 0 NULL 1 NULL 0 -7888393.7570 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 01:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:50:00 1 0 NULL 1 NULL 0 -10396306.4160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:55:00 1 0 NULL 1 NULL 0 8316084.7680 0 0 0 0.416 0.76 NULL NULL NULL NULL 0 0 0 2023/11/18 01:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:00:00 1 0 NULL 1 NULL 0 -2731101.26 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:05:00 1 0 NULL 1 NULL 0 -8814993.7150 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:10:00 1 0 NULL 1 NULL 0 -7598613.6930 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:15:00 1 0 NULL 1 NULL 0 -7628193.4030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:20:00 1 0 NULL 1 NULL 0 -7639055.3820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:25:00 1 0 NULL 1 NULL 0 -2417482.6810 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:30:00 1 0 NULL 1 NULL 0 -9895427.42 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 02:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:35:00 1 0 NULL 1 NULL 0 -2318028.8590 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:40:00 1 0 NULL 1 NULL 0 -9895921.9650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:45:00 1 0 NULL 1 NULL 0 -8500955.7880 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:50:00 1 0 NULL 1 NULL 0 -9903525.7990 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:55:00 1 0 NULL 1 NULL 0 -3502096.8430 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/18 02:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:00:00 1 0 NULL 1 NULL 0 -2234303.9630 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:05:00 1 0 NULL 1 NULL 0 -9867178.7940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:10:00 1 0 NULL 1 NULL 0 -9517507.4350 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 03:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:15:00 1 0 NULL 1 NULL 0 -7178170.5820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:20:00 1 0 NULL 1 NULL 0 -9624785.8940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:25:00 1 0 NULL 1 NULL 0 -878688.5970 0 0 0 0.416 0.36 NULL NULL NULL NULL 0 0 0 2023/11/18 03:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:30:00 1 0 NULL 1 NULL 0 14145537.0950 0 0 0 0.416 0.97 NULL NULL NULL NULL 0 0 0 2023/11/18 03:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:35:00 1 0 NULL 1 NULL 0 -7149770.39 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:40:00 1 0 NULL 1 NULL 0 -9596739.4250 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:45:00 1 0 NULL 1 NULL 0 88789129.03 0 0 0 0.416 3.995 NULL NULL NULL NULL 0 0 0 2023/11/18 03:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:50:00 1 0 NULL 1 NULL 0 4426114.4320 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/18 03:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:55:00 1 0 NULL 1 NULL 0 5669292.5630 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/18 03:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 04:00:00 1 0 NULL 1 NULL 0 8250793.4510 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/18 03:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv query I -select UNIT from read_csv("data/csv/bug_10283.csv", +select UNIT from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1,header =0,all_varchar=1,sample_size=-1, columns={ 'I': 'VARCHAR','UNIT': 'VARCHAR','XX': 'VARCHAR','VERSION': 'VARCHAR','SETTLEMENTDATE': 'VARCHAR','RUNNO': 'VARCHAR', @@ -634,7 +634,7 @@ DISPATCH DISPATCH query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -776,7 +776,7 @@ auto_detect=false) ---- query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -1210,7 +1210,7 @@ D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -1645,7 +1645,7 @@ D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -1786,301 +1786,301 @@ null_padding=true, filename=true, auto_detect=false) ---- -I DISPATCH CASESOLUTION 1 SETTLEMENTDATE RUNNO INTERVENTION CASESUBTYPE SOLUTIONSTATUS SPDVERSION NONPHYSICALLOSSES TOTALOBJECTIVE TOTALAREAGENVIOLATION TOTALINTERCONNECTORVIOLATION TOTALGENERICVIOLATION TOTALRAMPRATEVIOLATION TOTALUNITMWCAPACITYVIOLATION TOTAL5MINVIOLATION TOTALREGVIOLATION TOTAL6SECVIOLATION TOTAL60SECVIOLATION TOTALASPROFILEVIOLATION TOTALFASTSTARTVIOLATION TOTALENERGYOFFERVIOLATION LASTCHANGED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:05:00 1 0 NULL 0 NULL 0 -18891916.3260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:10:00 1 0 NULL 0 NULL 0 -18991572.1050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:15:00 1 0 NULL 0 NULL 0 -18873654.0430 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:20:00 1 0 NULL 0 NULL 0 -18814533.9560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:25:00 1 0 NULL 0 NULL 0 -18955411.5810 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:30:00 1 0 NULL 1 NULL 0 -16453600.9790 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:35:00 1 0 NULL 0 NULL 0 -18918229.6050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:40:00 1 0 NULL 0 NULL 0 -18903552.1320 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:45:00 1 0 NULL 0 NULL 0 -18956195.7840 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:50:00 1 0 NULL 0 NULL 0 -19068752.4310 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 04:55:00 1 0 NULL 1 NULL 0 -16547387.8360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:00:00 1 0 NULL 1 NULL 0 -14134931.5130 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 04:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:05:00 1 0 NULL 0 NULL 0 -19099921.3580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:10:00 1 0 NULL 0 NULL 0 -18967112.4580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:15:00 1 0 NULL 0 NULL 0 -19075483.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:20:00 1 0 NULL 0 NULL 0 -19184913.3440 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:25:00 1 0 NULL 1 NULL 0 -14343450.8660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 05:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:30:00 1 0 NULL 0 NULL 0 -19331879.1490 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:35:00 1 0 NULL 0 NULL 0 -19582540.7460 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:40:00 1 0 NULL 0 NULL 0 -19995441.8250 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:45:00 1 0 NULL 0 NULL 0 -20392189.2680 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:50:00 1 0 NULL 0 NULL 0 -20620333.3450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 05:55:00 1 0 NULL 0 NULL 0 -21362524.3070 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:00:00 1 0 NULL 0 NULL 0 -21489793.8170 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:05:00 1 0 NULL 1 NULL 0 2094013133.35 0 0 0 0 86.091 NULL NULL NULL NULL 0 0 0 2023/11/17 06:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:10:00 1 0 NULL 1 NULL 0 1338468636.7650 0 0 0 0 55.418 NULL NULL NULL NULL 0 0 0 2023/11/17 06:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:15:00 1 0 NULL 1 NULL 0 548289035.8710 0 0 0 0 23.281 NULL NULL NULL NULL 0 0 0 2023/11/17 06:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:20:00 1 0 NULL 1 NULL 0 2773061028.0780 0 0 0 0 113.806 NULL NULL NULL NULL 0 0 0 2023/11/17 06:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:25:00 1 0 NULL 1 NULL 0 1505021017.9440 0 0 0 0 62.326 NULL NULL NULL NULL 0 0 0 2023/11/17 06:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:30:00 1 0 NULL 1 NULL 0 490360523.2630 0 0 0 0 21.085 NULL NULL NULL NULL 0 0 0 2023/11/17 06:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:35:00 1 0 NULL 1 NULL 0 2187250227.8070 0 0 0 0 90.128 NULL NULL NULL NULL 0 0 0 2023/11/17 06:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:40:00 1 0 NULL 1 NULL 0 2664659481.8670 0 0 8.851 0 91.522 NULL NULL NULL NULL 0 0 0 2023/11/17 06:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:45:00 1 0 NULL 1 NULL 0 1062603019.6170 0 0 0 0 44.402 NULL NULL NULL NULL 0 0 0 2023/11/17 06:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:50:00 1 0 NULL 1 NULL 0 586841408.2250 0 0 0 0 25.053 NULL NULL NULL NULL 0 0 0 2023/11/17 06:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 06:55:00 1 0 NULL 0 NULL 0 -28726654.8030 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 06:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:00:00 1 0 NULL 1 NULL 0 1485033818.2130 0 0 0 0 61.621 NULL NULL NULL NULL 0 0 0 2023/11/17 06:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:05:00 1 0 NULL 1 NULL 0 1216625476.3170 0 0 0 0 50.703 NULL NULL NULL NULL 0 0 0 2023/11/17 07:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:10:00 1 0 NULL 1 NULL 0 1305259053.6630 0 0 0 0 54.355 NULL NULL NULL NULL 0 0 0 2023/11/17 07:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:15:00 1 0 NULL 1 NULL 0 591119444.4970 0 0 0 0 25.29 NULL NULL NULL NULL 0 0 0 2023/11/17 07:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:20:00 1 0 NULL 1 NULL 0 742149171.6520 0 0 0 0.416 30.151 NULL NULL NULL NULL 0 0 0 2023/11/17 07:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:25:00 1 0 NULL 1 NULL 0 766417739.3210 0 0 0 0.416 31.141 NULL NULL NULL NULL 0 0 0 2023/11/17 07:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:30:00 1 0 NULL 1 NULL 0 352861875.1030 0 0 0 0.416 14.315 NULL NULL NULL NULL 0 0 0 2023/11/17 07:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:35:00 1 0 NULL 1 NULL 0 8817834722.1480 0 0 0 0.416 358.351 NULL NULL NULL NULL 0 0 0 2023/11/17 07:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:40:00 1 0 NULL 1 NULL 0 2359869476.2250 0 0 0 0.416 95.668 NULL NULL NULL NULL 0 0 0 2023/11/17 07:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:45:00 1 0 NULL 1 NULL 0 1365028867.05 0 0 0 0.416 55.296 NULL NULL NULL NULL 0 0 0 2023/11/17 07:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:50:00 1 0 NULL 1 NULL 0 15272157.7030 0 0 0 0.416 0.51 NULL NULL NULL NULL 0 0 0 2023/11/17 07:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 07:55:00 1 0 NULL 1 NULL 0 5022568.40 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 07:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:00:00 1 0 NULL 1 NULL 0 2071536.4830 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 07:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:05:00 1 0 NULL 1 NULL 0 5881035925.2910 0 0 0 0.416 239.213 NULL NULL NULL NULL 0 0 0 2023/11/17 08:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:10:00 1 0 NULL 1 NULL 0 4892605710.2240 0 0 0 0.416 198.989 NULL NULL NULL NULL 0 0 0 2023/11/17 08:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:15:00 1 0 NULL 1 NULL 0 3819130532.2320 0 0 0 0.416 155.302 NULL NULL NULL NULL 0 0 0 2023/11/17 08:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:20:00 1 0 NULL 1 NULL 0 2677901325.5920 0 0 0 0.416 108.846 NULL NULL NULL NULL 0 0 0 2023/11/17 08:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:25:00 1 0 NULL 1 NULL 0 1509812889.8560 0 0 0 0.416 61.311 NULL NULL NULL NULL 0 0 0 2023/11/17 08:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:30:00 1 0 NULL 1 NULL 0 168787982.9490 0 0 0 0.416 6.736 NULL NULL NULL NULL 0 0 0 2023/11/17 08:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:35:00 1 0 NULL 1 NULL 0 7564605.6680 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 08:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:40:00 1 0 NULL 1 NULL 0 3536720.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:45:00 1 0 NULL 1 NULL 0 3379078.5810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:50:00 1 0 NULL 1 NULL 0 123950548.9330 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 08:55:00 1 0 NULL 1 NULL 0 122839634.4540 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:00:00 1 0 NULL 1 NULL 0 122216116.48 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:05:00 1 0 NULL 1 NULL 0 3424718.80 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:10:00 1 0 NULL 1 NULL 0 3271843.5390 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:15:00 1 0 NULL 1 NULL 0 3257726.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:20:00 1 0 NULL 1 NULL 0 3586935.9820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:25:00 1 0 NULL 1 NULL 0 5994413.4070 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:30:00 1 0 NULL 1 NULL 0 3610284.7060 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:35:00 1 0 NULL 1 NULL 0 4456441.6050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:30:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:40:00 1 0 NULL 1 NULL 0 18731332.1660 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 09:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:45:00 1 0 NULL 1 NULL 0 6422848.2190 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:50:00 1 0 NULL 1 NULL 0 6444921.5360 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 09:55:00 1 0 NULL 1 NULL 0 6859042.8620 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:00:00 1 0 NULL 1 NULL 0 5911282.9530 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/17 09:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:05:00 1 0 NULL 1 NULL 0 5201965.0710 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:10:00 1 0 NULL 1 NULL 0 4376118.4030 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:15:00 1 0 NULL 1 NULL 0 4013503.9750 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:20:00 1 0 NULL 1 NULL 0 6348990.8130 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:25:00 1 0 NULL 1 NULL 0 13747527.9270 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 10:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:30:00 1 0 NULL 1 NULL 0 6571442.6830 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:35:00 1 0 NULL 1 NULL 0 14211057.60 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 10:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:40:00 1 0 NULL 1 NULL 0 7504324.2550 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:45:00 1 0 NULL 1 NULL 0 8528550.0170 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 10:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:50:00 1 0 NULL 1 NULL 0 4225305.82 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 10:55:00 1 0 NULL 1 NULL 0 21485872.2540 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 10:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:00:00 1 0 NULL 1 NULL 0 4226007.7930 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:05:00 1 0 NULL 1 NULL 0 4172215.6160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:10:00 1 0 NULL 1 NULL 0 6683973.9840 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:15:00 1 0 NULL 1 NULL 0 3830504.4820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:20:00 1 0 NULL 1 NULL 0 11791856.5180 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 11:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:25:00 1 0 NULL 1 NULL 0 6975406.9030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:30:00 1 0 NULL 1 NULL 0 4534686.4890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:35:00 1 0 NULL 1 NULL 0 4487944.2080 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:30:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:40:00 1 0 NULL 1 NULL 0 4280498.3490 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:45:00 1 0 NULL 1 NULL 0 4225721.8810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:50:00 1 0 NULL 1 NULL 0 4102987.3650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 11:55:00 1 0 NULL 1 NULL 0 22144271.3020 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 11:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:00:00 1 0 NULL 1 NULL 0 9140815.3220 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:05:00 1 0 NULL 1 NULL 0 9116493.9280 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:10:00 1 0 NULL 1 NULL 0 10003655.0370 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:05:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:15:00 1 0 NULL 1 NULL 0 18079517.01 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:20:00 1 0 NULL 1 NULL 0 27406117.0570 0 0 0 0.416 0.91 NULL NULL NULL NULL 0 0 0 2023/11/17 12:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:25:00 1 0 NULL 1 NULL 0 4320521.9670 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:30:00 1 0 NULL 1 NULL 0 2476488974.4520 0 0 0 0.416 100.343 NULL NULL NULL NULL 0 0 0 2023/11/17 12:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:35:00 1 0 NULL 1 NULL 0 1244271669.4330 0 0 0 0.416 50.318 NULL NULL NULL NULL 0 0 0 2023/11/17 12:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:40:00 1 0 NULL 1 NULL 0 27240398.8090 0 0 0 0.416 0.912 NULL NULL NULL NULL 0 0 0 2023/11/17 12:35:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:45:00 1 0 NULL 1 NULL 0 165936351.37 0 0 6.3 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:50:00 1 0 NULL 1 NULL 0 165859462.5740 0 0 5.5 0.416 0.92 NULL NULL NULL NULL 0 0 0 2023/11/17 12:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 12:55:00 1 0 NULL 1 NULL 0 8521883.3220 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 12:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:00:00 1 0 NULL 1 NULL 0 7561068.9480 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:55:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:05:00 1 0 NULL 1 NULL 0 4643264.7460 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:10:00 1 0 NULL 1 NULL 0 7234913.6530 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:15:00 1 0 NULL 1 NULL 0 7173770.6820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:20:00 1 0 NULL 1 NULL 0 7113525.2630 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:15:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:25:00 1 0 NULL 1 NULL 0 4677430.2130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:20:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:30:00 1 0 NULL 1 NULL 0 7137757.2140 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:35:00 1 0 NULL 1 NULL 0 3968739.0110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:40:00 1 0 NULL 1 NULL 0 6180673.3160 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:45:00 1 0 NULL 1 NULL 0 11551903.7220 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 13:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:50:00 1 0 NULL 1 NULL 0 3872835.5480 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 13:55:00 1 0 NULL 1 NULL 0 17473325.51 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/17 13:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:00:00 1 0 NULL 1 NULL 0 3750810.7370 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:55:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:05:00 1 0 NULL 1 NULL 0 13604150.2940 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 14:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:10:00 1 0 NULL 1 NULL 0 527721659.4740 0 0 0 0.416 21.32 NULL NULL NULL NULL 0 0 0 2023/11/17 14:05:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:15:00 1 0 NULL 1 NULL 0 6294109.5750 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:10:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:20:00 1 0 NULL 1 NULL 0 21318486.3070 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 14:15:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:25:00 1 0 NULL 1 NULL 0 6046850.4370 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:30:00 1 0 NULL 1 NULL 0 3612256.4120 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:35:00 1 0 NULL 1 NULL 0 5906741.4970 0 0 0 0.416 0.21 NULL NULL NULL NULL 0 0 0 2023/11/17 14:30:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:40:00 1 0 NULL 1 NULL 0 72617.3270 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:45:00 1 0 NULL 1 NULL 0 272811.3590 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:40:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:50:00 1 0 NULL 1 NULL 0 548861020.6880 0 0 7.337 0.416 7.337 NULL NULL NULL NULL 0 0 0 2023/11/17 14:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 14:55:00 1 0 NULL 1 NULL 0 14953704.1040 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 14:50:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:00:00 1 0 NULL 1 NULL 0 76782.3890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:05:00 1 0 NULL 1 NULL 0 1829348191.2050 0 0 72.095 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 15:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:10:00 1 0 NULL 1 NULL 0 1410714484.4390 0 0 55.817 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:15:00 1 0 NULL 1 NULL 0 1070070453.6940 0 0 42.332 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:20:00 1 0 NULL 1 NULL 0 741446570.9290 0 0 29.271 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 15:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:25:00 1 0 NULL 1 NULL 0 604683647.8940 0 0 14.659 2.808 2.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:30:00 1 0 NULL 1 NULL 0 182919191.4570 0 0 0 2.808 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:35:00 1 0 NULL 1 NULL 0 351791624.4190 0 0 0 5.002 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:40:00 1 0 NULL 0 NULL 0 -32479057.79 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:35:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:45:00 1 0 NULL 1 NULL 0 -21112431.6520 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 15:40:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:50:00 1 0 NULL 1 NULL 0 -29981122.65 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 15:55:00 1 0 NULL 1 NULL 0 -30107374.6380 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:00:00 1 0 NULL 0 NULL 0 -32339890.8670 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:05:00 1 0 NULL 1 NULL 0 80864316.1390 0 0 0 0 4.65 NULL NULL NULL NULL 0 0 0 2023/11/17 16:00:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:10:00 1 0 NULL 0 NULL 0 -34074408.4470 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:15:00 1 0 NULL 0 NULL 0 -35444084.7180 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:20:00 1 0 NULL 1 NULL 0 -30480486.3150 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 16:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:25:00 1 0 NULL 1 NULL 0 -33015540.6270 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:30:00 1 0 NULL 1 NULL 0 -24879655.5480 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 16:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:35:00 1 0 NULL 0 NULL 0 -41333053.0260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:40:00 1 0 NULL 0 NULL 0 -41329313.6780 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:45:00 1 0 NULL 0 NULL 0 -41833547.9990 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:40:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:50:00 1 0 NULL 0 NULL 0 -42551070.1150 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 16:55:00 1 0 NULL 1 NULL 0 -40165172.80 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:00:00 1 0 NULL 1 NULL 0 -43463696.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:05:00 1 0 NULL 1 NULL 0 402372864.7860 0 0 0 0 18.24 NULL NULL NULL NULL 0 0 0 2023/11/17 17:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:10:00 1 0 NULL 1 NULL 0 179581774.6110 0 0 0 0 9.16 NULL NULL NULL NULL 0 0 0 2023/11/17 17:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:15:00 1 0 NULL 0 NULL 0 -44774717.5690 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:20:00 1 0 NULL 0 NULL 0 -44827330.6740 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:25:00 1 0 NULL 0 NULL 0 -44178924.7790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:30:00 1 0 NULL 0 NULL 0 -43916506.8830 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:35:00 1 0 NULL 0 NULL 0 -41189433.84 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:40:00 1 0 NULL 0 NULL 0 -40402133.1480 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:45:00 1 0 NULL 1 NULL 0 -34454714.9820 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:50:00 1 0 NULL 1 NULL 0 -33579028.50 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 17:55:00 1 0 NULL 1 NULL 0 -36094950.5570 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:00:00 1 0 NULL 1 NULL 0 -36433457.0950 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:05:00 1 0 NULL 1 NULL 0 -34430611.6970 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:10:00 1 0 NULL 0 NULL 0 -36891093.4820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:15:00 1 0 NULL 0 NULL 0 -37374801.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:20:00 1 0 NULL 1 NULL 0 -34255085.9680 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:25:00 1 0 NULL 1 NULL 0 -33215504.3040 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:30:00 1 0 NULL 0 NULL 0 -35196263.3820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:35:00 1 0 NULL 1 NULL 0 -32334724.6370 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:40:00 1 0 NULL 0 NULL 0 -34191415.0880 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:45:00 1 0 NULL 1 NULL 0 -31336602.66 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:50:00 1 0 NULL 0 NULL 0 -34153952.3790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 18:55:00 1 0 NULL 0 NULL 0 -34067481.2860 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:00:00 1 0 NULL 0 NULL 0 -34161469.6580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:05:00 1 0 NULL 0 NULL 0 -25740682.46 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:10:00 1 0 NULL 1 NULL 0 888159560.5950 0 0 0 0 37.27 NULL NULL NULL NULL 0 0 0 2023/11/17 19:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:15:00 1 0 NULL 1 NULL 0 883712099.9530 0 0 0 0 37.23 NULL NULL NULL NULL 0 0 0 2023/11/17 19:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:20:00 1 0 NULL 1 NULL 0 881329177.0250 0 0 0 0 37.22 NULL NULL NULL NULL 0 0 0 2023/11/17 19:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:25:00 1 0 NULL 1 NULL 0 899723467.1060 0 0 0 0 37.98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:30:00 1 0 NULL 1 NULL 0 872181619.7320 0 0 0 0 36.87 NULL NULL NULL NULL 0 0 0 2023/11/17 19:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:35:00 1 0 NULL 1 NULL 0 6225595163.4330 0 0 0 0 254.68 NULL NULL NULL NULL 0 0 0 2023/11/17 19:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:40:00 1 0 NULL 1 NULL 0 4900041508.0970 0 0 0 0 200.82 NULL NULL NULL NULL 0 0 0 2023/11/17 19:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:45:00 1 0 NULL 1 NULL 0 2374895739.2760 0 0 0 0 98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:50:00 1 0 NULL 1 NULL 0 1051496257.6480 0 0 0 0 44.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 19:55:00 1 0 NULL 0 NULL 0 -32267107.2450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:00:00 1 0 NULL 1 NULL 0 -30322764.1280 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:05:00 1 0 NULL 1 NULL 0 4027187861.5690 0 0 0 0 165.964 NULL NULL NULL NULL 0 0 0 2023/11/17 20:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:10:00 1 0 NULL 1 NULL 0 3623567899.1380 0 0 0 0 149.595 NULL NULL NULL NULL 0 0 0 2023/11/17 20:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:15:00 1 0 NULL 1 NULL 0 2841880378.5970 0 0 0 0 117.613 NULL NULL NULL NULL 0 0 0 2023/11/17 20:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:20:00 1 0 NULL 1 NULL 0 2244933416.7980 0 0 0 0 93.477 NULL NULL NULL NULL 0 0 0 2023/11/17 20:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:25:00 1 0 NULL 1 NULL 0 2863030435.6560 0 0 0 0 118.77 NULL NULL NULL NULL 0 0 0 2023/11/17 20:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:30:00 1 0 NULL 1 NULL 0 1904503784.6410 0 0 0 0 79.764 NULL NULL NULL NULL 0 0 0 2023/11/17 20:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:35:00 1 0 NULL 1 NULL 0 2438952364.8470 0 0 0 0 101.415 NULL NULL NULL NULL 0 0 0 2023/11/17 20:30:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:40:00 1 0 NULL 1 NULL 0 2404374386.37 0 0 0 0 100.112 NULL NULL NULL NULL 0 0 0 2023/11/17 20:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:45:00 1 0 NULL 1 NULL 0 483838402.8310 0 0 0 0 21.94 NULL NULL NULL NULL 0 0 0 2023/11/17 20:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:50:00 1 0 NULL 1 NULL 0 229032920.0450 0 0 0 0 11.564 NULL NULL NULL NULL 0 0 0 2023/11/17 20:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 20:55:00 1 0 NULL 0 NULL 0 -55281754.6920 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:00:00 1 0 NULL 0 NULL 0 -55407449.3540 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:05:00 1 0 NULL 1 NULL 0 638042862.1320 0 0 0 0 27.755 NULL NULL NULL NULL 0 0 0 2023/11/17 21:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:10:00 1 0 NULL 1 NULL 0 634495334.57 0 0 0 0 27.846 NULL NULL NULL NULL 0 0 0 2023/11/17 21:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:15:00 1 0 NULL 1 NULL 0 1074013155.4160 0 0 0 0 45.937 NULL NULL NULL NULL 0 0 0 2023/11/17 21:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:20:00 1 0 NULL 1 NULL 0 -52304385.61 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:25:00 1 0 NULL 0 NULL 0 -54778930.8770 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:30:00 1 0 NULL 1 NULL 0 -49933804.9660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 21:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:35:00 1 0 NULL 1 NULL 0 -12582643.9120 0 0 0 0 1.487 NULL NULL NULL NULL 0 0 0 2023/11/17 21:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:40:00 1 0 NULL 0 NULL 0 -52297928.4560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:45:00 1 0 NULL 1 NULL 0 3636295874.3410 0 0 0 0 150.225 NULL NULL NULL NULL 0 0 0 2023/11/17 21:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:50:00 1 0 NULL 1 NULL 0 -51935117.2980 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 21:55:00 1 0 NULL 0 NULL 0 -54402795.21 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:00:00 1 0 NULL 1 NULL 0 -51909940.0510 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:05:00 1 0 NULL 1 NULL 0 -46272644.3910 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:10:00 1 0 NULL 1 NULL 0 -48524238.9060 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:15:00 1 0 NULL 1 NULL 0 365490378.6370 0 0 0 0 16.983 NULL NULL NULL NULL 0 0 0 2023/11/17 22:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:20:00 1 0 NULL 1 NULL 0 319918669.0540 0 0 0 0 15.099 NULL NULL NULL NULL 0 0 0 2023/11/17 22:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:25:00 1 0 NULL 1 NULL 0 279776932.5480 0 0 0 0 13.463 NULL NULL NULL NULL 0 0 0 2023/11/17 22:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:30:00 1 0 NULL 0 NULL 0 -50757875.4790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:35:00 1 0 NULL 0 NULL 0 -49364760.0730 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:40:00 1 0 NULL 1 NULL 0 -47241907.5760 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:45:00 1 0 NULL 1 NULL 0 -47915767.6690 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:50:00 1 0 NULL 0 NULL 0 -50172567.1080 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 22:55:00 1 0 NULL 0 NULL 0 -50056650.4350 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:00:00 1 0 NULL 1 NULL 0 -47573958.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:05:00 1 0 NULL 1 NULL 0 -45568109.9610 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:10:00 1 0 NULL 1 NULL 0 -45494464.1590 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:15:00 1 0 NULL 0 NULL 0 -47995783.19 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:20:00 1 0 NULL 1 NULL 0 -16025744.1730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:25:00 1 0 NULL 1 NULL 0 -15966430.1110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:30:00 1 0 NULL 1 NULL 0 -15990262.8730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:35:00 1 0 NULL 1 NULL 0 -15922778.3050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:40:00 1 0 NULL 1 NULL 0 -7172455.1420 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:45:00 1 0 NULL 1 NULL 0 -3762172.28 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/17 23:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:50:00 1 0 NULL 1 NULL 0 -14168482.45 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/17 23:55:00 1 0 NULL 1 NULL 0 -15275361.3130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:00:00 1 0 NULL 1 NULL 0 -5203508.2810 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 23:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:05:00 1 0 NULL 1 NULL 0 2006985581.7150 0 0 79.765 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:10:00 1 0 NULL 1 NULL 0 1713933363.3980 0 0 68.28 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:15:00 1 0 NULL 1 NULL 0 1324747813.8940 0 0 52.898 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:20:00 1 0 NULL 1 NULL 0 933413090.4160 0 0 37.427 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:25:00 1 0 NULL 1 NULL 0 550689839.8880 0 0 22.2 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:30:00 1 0 NULL 1 NULL 0 218937418.1080 0 0 8.395 0.416 0.81 NULL NULL NULL NULL 0 0 0 2023/11/18 00:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:35:00 1 0 NULL 1 NULL 0 7579525926.7580 0 0 0 0.416 308.996 NULL NULL NULL NULL 0 0 0 2023/11/18 00:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:40:00 1 0 NULL 1 NULL 0 7336915650.9690 0 0 0 0.416 299.12 NULL NULL NULL NULL 0 0 0 2023/11/18 00:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:45:00 1 0 NULL 1 NULL 0 6045182791.2170 0 0 0 0.416 246.546 NULL NULL NULL NULL 0 0 0 2023/11/18 00:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:50:00 1 0 NULL 1 NULL 0 4782443540.40 0 0 0 0.416 195.149 NULL NULL NULL NULL 0 0 0 2023/11/18 00:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 00:55:00 1 0 NULL 1 NULL 0 3533422992.0770 0 0 0 0.416 144.306 NULL NULL NULL NULL 0 0 0 2023/11/18 00:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:00:00 1 0 NULL 1 NULL 0 2826754356.2650 0 0 0 0.416 115.54 NULL NULL NULL NULL 0 0 0 2023/11/18 00:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:05:00 1 0 NULL 1 NULL 0 1453322777.0730 0 0 0 0.416 59.628 NULL NULL NULL NULL 0 0 0 2023/11/18 01:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:10:00 1 0 NULL 1 NULL 0 499129476.38 0 0 0 0.416 20.781 NULL NULL NULL NULL 0 0 0 2023/11/18 01:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:15:00 1 0 NULL 1 NULL 0 485407380.7220 0 0 0 0.416 20.229 NULL NULL NULL NULL 0 0 0 2023/11/18 01:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:20:00 1 0 NULL 1 NULL 0 17265665.6770 0 0 0 0.416 1.17 NULL NULL NULL NULL 0 0 0 2023/11/18 01:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:25:00 1 0 NULL 1 NULL 0 -11434172.2560 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:30:00 1 0 NULL 1 NULL 0 -3823519.21 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:35:00 1 0 NULL 1 NULL 0 1498287664.4310 0 0 0 0.416 61.408 NULL NULL NULL NULL 0 0 0 2023/11/18 01:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:40:00 1 0 NULL 1 NULL 0 608001450.7020 0 0 0 0.416 25.168 NULL NULL NULL NULL 0 0 0 2023/11/18 01:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:45:00 1 0 NULL 1 NULL 0 -7888393.7570 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 01:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:50:00 1 0 NULL 1 NULL 0 -10396306.4160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 01:55:00 1 0 NULL 1 NULL 0 8316084.7680 0 0 0 0.416 0.76 NULL NULL NULL NULL 0 0 0 2023/11/18 01:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:00:00 1 0 NULL 1 NULL 0 -2731101.26 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:05:00 1 0 NULL 1 NULL 0 -8814993.7150 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:10:00 1 0 NULL 1 NULL 0 -7598613.6930 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:15:00 1 0 NULL 1 NULL 0 -7628193.4030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:20:00 1 0 NULL 1 NULL 0 -7639055.3820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:25:00 1 0 NULL 1 NULL 0 -2417482.6810 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:30:00 1 0 NULL 1 NULL 0 -9895427.42 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 02:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:35:00 1 0 NULL 1 NULL 0 -2318028.8590 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:40:00 1 0 NULL 1 NULL 0 -9895921.9650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:45:00 1 0 NULL 1 NULL 0 -8500955.7880 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:50:00 1 0 NULL 1 NULL 0 -9903525.7990 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 02:55:00 1 0 NULL 1 NULL 0 -3502096.8430 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/18 02:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:00:00 1 0 NULL 1 NULL 0 -2234303.9630 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:05:00 1 0 NULL 1 NULL 0 -9867178.7940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:10:00 1 0 NULL 1 NULL 0 -9517507.4350 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 03:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:15:00 1 0 NULL 1 NULL 0 -7178170.5820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:20:00 1 0 NULL 1 NULL 0 -9624785.8940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:25:00 1 0 NULL 1 NULL 0 -878688.5970 0 0 0 0.416 0.36 NULL NULL NULL NULL 0 0 0 2023/11/18 03:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:30:00 1 0 NULL 1 NULL 0 14145537.0950 0 0 0 0.416 0.97 NULL NULL NULL NULL 0 0 0 2023/11/18 03:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:35:00 1 0 NULL 1 NULL 0 -7149770.39 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:40:00 1 0 NULL 1 NULL 0 -9596739.4250 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:45:00 1 0 NULL 1 NULL 0 88789129.03 0 0 0 0.416 3.995 NULL NULL NULL NULL 0 0 0 2023/11/18 03:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:50:00 1 0 NULL 1 NULL 0 4426114.4320 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/18 03:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 03:55:00 1 0 NULL 1 NULL 0 5669292.5630 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/18 03:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DISPATCH CASESOLUTION 1 2023/11/18 04:00:00 1 0 NULL 1 NULL 0 8250793.4510 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/18 03:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -I DREGION NULL 2 SETTLEMENTDATE RUNNO REGIONID INTERVENTION RRP EEP ROP APCFLAG MARKETSUSPENDEDFLAG TOTALDEMAND DEMANDFORECAST DISPATCHABLEGENERATION DISPATCHABLELOAD NETINTERCHANGE EXCESSGENERATION LOWER5MINDISPATCH LOWER5MINIMPORT LOWER5MINLOCALDISPATCH LOWER5MINLOCALPRICE LOWER5MINLOCALREQ LOWER5MINPRICE LOWER5MINREQ LOWER5MINSUPPLYPRICE LOWER60SECDISPATCH LOWER60SECIMPORT LOWER60SECLOCALDISPATCH LOWER60SECLOCALPRICE LOWER60SECLOCALREQ LOWER60SECPRICE LOWER60SECREQ LOWER60SECSUPPLYPRICE LOWER6SECDISPATCH LOWER6SECIMPORT LOWER6SECLOCALDISPATCH LOWER6SECLOCALPRICE LOWER6SECLOCALREQ LOWER6SECPRICE LOWER6SECREQ LOWER6SECSUPPLYPRICE RAISE5MINDISPATCH RAISE5MINIMPORT RAISE5MINLOCALDISPATCH RAISE5MINLOCALPRICE RAISE5MINLOCALREQ RAISE5MINPRICE RAISE5MINREQ RAISE5MINSUPPLYPRICE RAISE60SECDISPATCH RAISE60SECIMPORT RAISE60SECLOCALDISPATCH RAISE60SECLOCALPRICE RAISE60SECLOCALREQ RAISE60SECPRICE RAISE60SECREQ RAISE60SECSUPPLYPRICE RAISE6SECDISPATCH RAISE6SECIMPORT RAISE6SECLOCALDISPATCH RAISE6SECLOCALPRICE RAISE6SECLOCALREQ RAISE6SECPRICE RAISE6SECREQ RAISE6SECSUPPLYPRICE AGGREGATEDISPATCHERROR AVAILABLEGENERATION AVAILABLELOAD INITIALSUPPLY CLEAREDSUPPLY LOWERREGIMPORT LOWERREGLOCALDISPATCH LOWERREGLOCALREQ LOWERREGREQ RAISEREGIMPORT RAISEREGLOCALDISPATCH RAISEREGLOCALREQ RAISEREGREQ RAISE5MINLOCALVIOLATION RAISEREGLOCALVIOLATION RAISE60SECLOCALVIOLATION RAISE6SECLOCALVIOLATION LOWER5MINLOCALVIOLATION LOWERREGLOCALVIOLATION LOWER60SECLOCALVIOLATION LOWER6SECLOCALVIOLATION RAISE5MINVIOLATION RAISEREGVIOLATION RAISE60SECVIOLATION RAISE6SECVIOLATION LOWER5MINVIOLATION LOWERREGVIOLATION LOWER60SECVIOLATION LOWER6SECVIOLATION RAISE6SECRRP RAISE6SECROP RAISE6SECAPCFLAG RAISE60SECRRP RAISE60SECROP RAISE60SECAPCFLAG RAISE5MINRRP RAISE5MINROP RAISE5MINAPCFLAG RAISEREGRRP RAISEREGROP RAISEREGAPCFLAG LOWER6SECRRP LOWER6SECROP LOWER6SECAPCFLAG LOWER60SECRRP LOWER60SECROP LOWER60SECAPCFLAG LOWER5MINRRP LOWER5MINROP LOWER5MINAPCFLAG LOWERREGRRP LOWERREGROP LOWERREGAPCFLAG NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv +I DISPATCH CASESOLUTION 1 SETTLEMENTDATE RUNNO INTERVENTION CASESUBTYPE SOLUTIONSTATUS SPDVERSION NONPHYSICALLOSSES TOTALOBJECTIVE TOTALAREAGENVIOLATION TOTALINTERCONNECTORVIOLATION TOTALGENERICVIOLATION TOTALRAMPRATEVIOLATION TOTALUNITMWCAPACITYVIOLATION TOTAL5MINVIOLATION TOTALREGVIOLATION TOTAL6SECVIOLATION TOTAL60SECVIOLATION TOTALASPROFILEVIOLATION TOTALFASTSTARTVIOLATION TOTALENERGYOFFERVIOLATION LASTCHANGED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:05:00 1 0 NULL 0 NULL 0 -18891916.3260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:10:00 1 0 NULL 0 NULL 0 -18991572.1050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:15:00 1 0 NULL 0 NULL 0 -18873654.0430 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:20:00 1 0 NULL 0 NULL 0 -18814533.9560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:25:00 1 0 NULL 0 NULL 0 -18955411.5810 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:30:00 1 0 NULL 1 NULL 0 -16453600.9790 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:35:00 1 0 NULL 0 NULL 0 -18918229.6050 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:40:00 1 0 NULL 0 NULL 0 -18903552.1320 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:45:00 1 0 NULL 0 NULL 0 -18956195.7840 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:50:00 1 0 NULL 0 NULL 0 -19068752.4310 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 04:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 04:55:00 1 0 NULL 1 NULL 0 -16547387.8360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 04:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:00:00 1 0 NULL 1 NULL 0 -14134931.5130 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 04:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:05:00 1 0 NULL 0 NULL 0 -19099921.3580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:10:00 1 0 NULL 0 NULL 0 -18967112.4580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:15:00 1 0 NULL 0 NULL 0 -19075483.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:20:00 1 0 NULL 0 NULL 0 -19184913.3440 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:25:00 1 0 NULL 1 NULL 0 -14343450.8660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 05:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:30:00 1 0 NULL 0 NULL 0 -19331879.1490 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:35:00 1 0 NULL 0 NULL 0 -19582540.7460 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:40:00 1 0 NULL 0 NULL 0 -19995441.8250 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:45:00 1 0 NULL 0 NULL 0 -20392189.2680 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:50:00 1 0 NULL 0 NULL 0 -20620333.3450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 05:55:00 1 0 NULL 0 NULL 0 -21362524.3070 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:00:00 1 0 NULL 0 NULL 0 -21489793.8170 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 05:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:05:00 1 0 NULL 1 NULL 0 2094013133.35 0 0 0 0 86.091 NULL NULL NULL NULL 0 0 0 2023/11/17 06:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:10:00 1 0 NULL 1 NULL 0 1338468636.7650 0 0 0 0 55.418 NULL NULL NULL NULL 0 0 0 2023/11/17 06:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:15:00 1 0 NULL 1 NULL 0 548289035.8710 0 0 0 0 23.281 NULL NULL NULL NULL 0 0 0 2023/11/17 06:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:20:00 1 0 NULL 1 NULL 0 2773061028.0780 0 0 0 0 113.806 NULL NULL NULL NULL 0 0 0 2023/11/17 06:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:25:00 1 0 NULL 1 NULL 0 1505021017.9440 0 0 0 0 62.326 NULL NULL NULL NULL 0 0 0 2023/11/17 06:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:30:00 1 0 NULL 1 NULL 0 490360523.2630 0 0 0 0 21.085 NULL NULL NULL NULL 0 0 0 2023/11/17 06:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:35:00 1 0 NULL 1 NULL 0 2187250227.8070 0 0 0 0 90.128 NULL NULL NULL NULL 0 0 0 2023/11/17 06:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:40:00 1 0 NULL 1 NULL 0 2664659481.8670 0 0 8.851 0 91.522 NULL NULL NULL NULL 0 0 0 2023/11/17 06:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:45:00 1 0 NULL 1 NULL 0 1062603019.6170 0 0 0 0 44.402 NULL NULL NULL NULL 0 0 0 2023/11/17 06:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:50:00 1 0 NULL 1 NULL 0 586841408.2250 0 0 0 0 25.053 NULL NULL NULL NULL 0 0 0 2023/11/17 06:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 06:55:00 1 0 NULL 0 NULL 0 -28726654.8030 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 06:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:00:00 1 0 NULL 1 NULL 0 1485033818.2130 0 0 0 0 61.621 NULL NULL NULL NULL 0 0 0 2023/11/17 06:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:05:00 1 0 NULL 1 NULL 0 1216625476.3170 0 0 0 0 50.703 NULL NULL NULL NULL 0 0 0 2023/11/17 07:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:10:00 1 0 NULL 1 NULL 0 1305259053.6630 0 0 0 0 54.355 NULL NULL NULL NULL 0 0 0 2023/11/17 07:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:15:00 1 0 NULL 1 NULL 0 591119444.4970 0 0 0 0 25.29 NULL NULL NULL NULL 0 0 0 2023/11/17 07:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:20:00 1 0 NULL 1 NULL 0 742149171.6520 0 0 0 0.416 30.151 NULL NULL NULL NULL 0 0 0 2023/11/17 07:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:25:00 1 0 NULL 1 NULL 0 766417739.3210 0 0 0 0.416 31.141 NULL NULL NULL NULL 0 0 0 2023/11/17 07:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:30:00 1 0 NULL 1 NULL 0 352861875.1030 0 0 0 0.416 14.315 NULL NULL NULL NULL 0 0 0 2023/11/17 07:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:35:00 1 0 NULL 1 NULL 0 8817834722.1480 0 0 0 0.416 358.351 NULL NULL NULL NULL 0 0 0 2023/11/17 07:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:40:00 1 0 NULL 1 NULL 0 2359869476.2250 0 0 0 0.416 95.668 NULL NULL NULL NULL 0 0 0 2023/11/17 07:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:45:00 1 0 NULL 1 NULL 0 1365028867.05 0 0 0 0.416 55.296 NULL NULL NULL NULL 0 0 0 2023/11/17 07:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:50:00 1 0 NULL 1 NULL 0 15272157.7030 0 0 0 0.416 0.51 NULL NULL NULL NULL 0 0 0 2023/11/17 07:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 07:55:00 1 0 NULL 1 NULL 0 5022568.40 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 07:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:00:00 1 0 NULL 1 NULL 0 2071536.4830 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 07:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:05:00 1 0 NULL 1 NULL 0 5881035925.2910 0 0 0 0.416 239.213 NULL NULL NULL NULL 0 0 0 2023/11/17 08:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:10:00 1 0 NULL 1 NULL 0 4892605710.2240 0 0 0 0.416 198.989 NULL NULL NULL NULL 0 0 0 2023/11/17 08:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:15:00 1 0 NULL 1 NULL 0 3819130532.2320 0 0 0 0.416 155.302 NULL NULL NULL NULL 0 0 0 2023/11/17 08:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:20:00 1 0 NULL 1 NULL 0 2677901325.5920 0 0 0 0.416 108.846 NULL NULL NULL NULL 0 0 0 2023/11/17 08:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:25:00 1 0 NULL 1 NULL 0 1509812889.8560 0 0 0 0.416 61.311 NULL NULL NULL NULL 0 0 0 2023/11/17 08:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:30:00 1 0 NULL 1 NULL 0 168787982.9490 0 0 0 0.416 6.736 NULL NULL NULL NULL 0 0 0 2023/11/17 08:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:35:00 1 0 NULL 1 NULL 0 7564605.6680 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 08:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:40:00 1 0 NULL 1 NULL 0 3536720.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:45:00 1 0 NULL 1 NULL 0 3379078.5810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:50:00 1 0 NULL 1 NULL 0 123950548.9330 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 08:55:00 1 0 NULL 1 NULL 0 122839634.4540 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:00:00 1 0 NULL 1 NULL 0 122216116.48 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 08:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:05:00 1 0 NULL 1 NULL 0 3424718.80 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:10:00 1 0 NULL 1 NULL 0 3271843.5390 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:15:00 1 0 NULL 1 NULL 0 3257726.6290 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:20:00 1 0 NULL 1 NULL 0 3586935.9820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:25:00 1 0 NULL 1 NULL 0 5994413.4070 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:30:00 1 0 NULL 1 NULL 0 3610284.7060 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:35:00 1 0 NULL 1 NULL 0 4456441.6050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 09:30:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:40:00 1 0 NULL 1 NULL 0 18731332.1660 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 09:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:45:00 1 0 NULL 1 NULL 0 6422848.2190 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:50:00 1 0 NULL 1 NULL 0 6444921.5360 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 09:55:00 1 0 NULL 1 NULL 0 6859042.8620 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 09:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:00:00 1 0 NULL 1 NULL 0 5911282.9530 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/17 09:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:05:00 1 0 NULL 1 NULL 0 5201965.0710 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:10:00 1 0 NULL 1 NULL 0 4376118.4030 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:15:00 1 0 NULL 1 NULL 0 4013503.9750 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:20:00 1 0 NULL 1 NULL 0 6348990.8130 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:25:00 1 0 NULL 1 NULL 0 13747527.9270 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 10:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:30:00 1 0 NULL 1 NULL 0 6571442.6830 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:35:00 1 0 NULL 1 NULL 0 14211057.60 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 10:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:40:00 1 0 NULL 1 NULL 0 7504324.2550 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 10:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:45:00 1 0 NULL 1 NULL 0 8528550.0170 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 10:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:50:00 1 0 NULL 1 NULL 0 4225305.82 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 10:55:00 1 0 NULL 1 NULL 0 21485872.2540 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 10:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:00:00 1 0 NULL 1 NULL 0 4226007.7930 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 10:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:05:00 1 0 NULL 1 NULL 0 4172215.6160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:10:00 1 0 NULL 1 NULL 0 6683973.9840 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:15:00 1 0 NULL 1 NULL 0 3830504.4820 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:20:00 1 0 NULL 1 NULL 0 11791856.5180 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 11:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:25:00 1 0 NULL 1 NULL 0 6975406.9030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 11:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:30:00 1 0 NULL 1 NULL 0 4534686.4890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:35:00 1 0 NULL 1 NULL 0 4487944.2080 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:30:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:40:00 1 0 NULL 1 NULL 0 4280498.3490 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:45:00 1 0 NULL 1 NULL 0 4225721.8810 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:40:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:50:00 1 0 NULL 1 NULL 0 4102987.3650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 11:55:00 1 0 NULL 1 NULL 0 22144271.3020 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 11:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:00:00 1 0 NULL 1 NULL 0 9140815.3220 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 11:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:05:00 1 0 NULL 1 NULL 0 9116493.9280 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:10:00 1 0 NULL 1 NULL 0 10003655.0370 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:05:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:15:00 1 0 NULL 1 NULL 0 18079517.01 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:20:00 1 0 NULL 1 NULL 0 27406117.0570 0 0 0 0.416 0.91 NULL NULL NULL NULL 0 0 0 2023/11/17 12:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:25:00 1 0 NULL 1 NULL 0 4320521.9670 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 12:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:30:00 1 0 NULL 1 NULL 0 2476488974.4520 0 0 0 0.416 100.343 NULL NULL NULL NULL 0 0 0 2023/11/17 12:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:35:00 1 0 NULL 1 NULL 0 1244271669.4330 0 0 0 0.416 50.318 NULL NULL NULL NULL 0 0 0 2023/11/17 12:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:40:00 1 0 NULL 1 NULL 0 27240398.8090 0 0 0 0.416 0.912 NULL NULL NULL NULL 0 0 0 2023/11/17 12:35:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:45:00 1 0 NULL 1 NULL 0 165936351.37 0 0 6.3 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:50:00 1 0 NULL 1 NULL 0 165859462.5740 0 0 5.5 0.416 0.92 NULL NULL NULL NULL 0 0 0 2023/11/17 12:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 12:55:00 1 0 NULL 1 NULL 0 8521883.3220 0 0 0 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 12:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:00:00 1 0 NULL 1 NULL 0 7561068.9480 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 12:55:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:05:00 1 0 NULL 1 NULL 0 4643264.7460 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:10:00 1 0 NULL 1 NULL 0 7234913.6530 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:15:00 1 0 NULL 1 NULL 0 7173770.6820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:10:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:20:00 1 0 NULL 1 NULL 0 7113525.2630 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:15:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:25:00 1 0 NULL 1 NULL 0 4677430.2130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:20:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:30:00 1 0 NULL 1 NULL 0 7137757.2140 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:25:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:35:00 1 0 NULL 1 NULL 0 3968739.0110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:40:00 1 0 NULL 1 NULL 0 6180673.3160 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 13:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:45:00 1 0 NULL 1 NULL 0 11551903.7220 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/17 13:40:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:50:00 1 0 NULL 1 NULL 0 3872835.5480 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 13:45:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 13:55:00 1 0 NULL 1 NULL 0 17473325.51 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/17 13:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:00:00 1 0 NULL 1 NULL 0 3750810.7370 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/17 13:55:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:05:00 1 0 NULL 1 NULL 0 13604150.2940 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 14:00:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:10:00 1 0 NULL 1 NULL 0 527721659.4740 0 0 0 0.416 21.32 NULL NULL NULL NULL 0 0 0 2023/11/17 14:05:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:15:00 1 0 NULL 1 NULL 0 6294109.5750 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:10:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:20:00 1 0 NULL 1 NULL 0 21318486.3070 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/17 14:15:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:25:00 1 0 NULL 1 NULL 0 6046850.4370 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 14:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:30:00 1 0 NULL 1 NULL 0 3612256.4120 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:35:00 1 0 NULL 1 NULL 0 5906741.4970 0 0 0 0.416 0.21 NULL NULL NULL NULL 0 0 0 2023/11/17 14:30:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:40:00 1 0 NULL 1 NULL 0 72617.3270 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:45:00 1 0 NULL 1 NULL 0 272811.3590 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:40:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:50:00 1 0 NULL 1 NULL 0 548861020.6880 0 0 7.337 0.416 7.337 NULL NULL NULL NULL 0 0 0 2023/11/17 14:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 14:55:00 1 0 NULL 1 NULL 0 14953704.1040 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/17 14:50:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:00:00 1 0 NULL 1 NULL 0 76782.3890 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 14:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:05:00 1 0 NULL 1 NULL 0 1829348191.2050 0 0 72.095 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 15:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:10:00 1 0 NULL 1 NULL 0 1410714484.4390 0 0 55.817 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:15:00 1 0 NULL 1 NULL 0 1070070453.6940 0 0 42.332 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:20:00 1 0 NULL 1 NULL 0 741446570.9290 0 0 29.271 0.416 0.16 NULL NULL NULL NULL 0 0 0 2023/11/17 15:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:25:00 1 0 NULL 1 NULL 0 604683647.8940 0 0 14.659 2.808 2.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:20:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:30:00 1 0 NULL 1 NULL 0 182919191.4570 0 0 0 2.808 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:25:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:35:00 1 0 NULL 1 NULL 0 351791624.4190 0 0 0 5.002 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:40:00 1 0 NULL 0 NULL 0 -32479057.79 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:35:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:45:00 1 0 NULL 1 NULL 0 -21112431.6520 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 15:40:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:50:00 1 0 NULL 1 NULL 0 -29981122.65 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:45:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 15:55:00 1 0 NULL 1 NULL 0 -30107374.6380 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 15:50:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:00:00 1 0 NULL 0 NULL 0 -32339890.8670 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 15:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:05:00 1 0 NULL 1 NULL 0 80864316.1390 0 0 0 0 4.65 NULL NULL NULL NULL 0 0 0 2023/11/17 16:00:07 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:10:00 1 0 NULL 0 NULL 0 -34074408.4470 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:05:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:15:00 1 0 NULL 0 NULL 0 -35444084.7180 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:10:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:20:00 1 0 NULL 1 NULL 0 -30480486.3150 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 16:15:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:25:00 1 0 NULL 1 NULL 0 -33015540.6270 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:20:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:30:00 1 0 NULL 1 NULL 0 -24879655.5480 0 0 0 0 0.46 NULL NULL NULL NULL 0 0 0 2023/11/17 16:25:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:35:00 1 0 NULL 0 NULL 0 -41333053.0260 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:30:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:40:00 1 0 NULL 0 NULL 0 -41329313.6780 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:35:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:45:00 1 0 NULL 0 NULL 0 -41833547.9990 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:40:10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:50:00 1 0 NULL 0 NULL 0 -42551070.1150 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 16:45:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 16:55:00 1 0 NULL 1 NULL 0 -40165172.80 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:50:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:00:00 1 0 NULL 1 NULL 0 -43463696.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 16:55:09 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:05:00 1 0 NULL 1 NULL 0 402372864.7860 0 0 0 0 18.24 NULL NULL NULL NULL 0 0 0 2023/11/17 17:00:08 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:10:00 1 0 NULL 1 NULL 0 179581774.6110 0 0 0 0 9.16 NULL NULL NULL NULL 0 0 0 2023/11/17 17:05:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:15:00 1 0 NULL 0 NULL 0 -44774717.5690 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:20:00 1 0 NULL 0 NULL 0 -44827330.6740 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:15:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:25:00 1 0 NULL 0 NULL 0 -44178924.7790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:30:00 1 0 NULL 0 NULL 0 -43916506.8830 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:35:00 1 0 NULL 0 NULL 0 -41189433.84 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:40:00 1 0 NULL 0 NULL 0 -40402133.1480 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 17:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:45:00 1 0 NULL 1 NULL 0 -34454714.9820 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:50:00 1 0 NULL 1 NULL 0 -33579028.50 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 17:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 17:55:00 1 0 NULL 1 NULL 0 -36094950.5570 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:50:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:00:00 1 0 NULL 1 NULL 0 -36433457.0950 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 17:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:05:00 1 0 NULL 1 NULL 0 -34430611.6970 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:10:00 1 0 NULL 0 NULL 0 -36891093.4820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:05:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:15:00 1 0 NULL 0 NULL 0 -37374801.1820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:20:00 1 0 NULL 1 NULL 0 -34255085.9680 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:25:00 1 0 NULL 1 NULL 0 -33215504.3040 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:30:00 1 0 NULL 0 NULL 0 -35196263.3820 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:35:00 1 0 NULL 1 NULL 0 -32334724.6370 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:40:00 1 0 NULL 0 NULL 0 -34191415.0880 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:45:00 1 0 NULL 1 NULL 0 -31336602.66 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 18:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:50:00 1 0 NULL 0 NULL 0 -34153952.3790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 18:55:00 1 0 NULL 0 NULL 0 -34067481.2860 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:00:00 1 0 NULL 0 NULL 0 -34161469.6580 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 18:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:05:00 1 0 NULL 0 NULL 0 -25740682.46 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:10:00 1 0 NULL 1 NULL 0 888159560.5950 0 0 0 0 37.27 NULL NULL NULL NULL 0 0 0 2023/11/17 19:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:15:00 1 0 NULL 1 NULL 0 883712099.9530 0 0 0 0 37.23 NULL NULL NULL NULL 0 0 0 2023/11/17 19:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:20:00 1 0 NULL 1 NULL 0 881329177.0250 0 0 0 0 37.22 NULL NULL NULL NULL 0 0 0 2023/11/17 19:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:25:00 1 0 NULL 1 NULL 0 899723467.1060 0 0 0 0 37.98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:30:00 1 0 NULL 1 NULL 0 872181619.7320 0 0 0 0 36.87 NULL NULL NULL NULL 0 0 0 2023/11/17 19:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:35:00 1 0 NULL 1 NULL 0 6225595163.4330 0 0 0 0 254.68 NULL NULL NULL NULL 0 0 0 2023/11/17 19:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:40:00 1 0 NULL 1 NULL 0 4900041508.0970 0 0 0 0 200.82 NULL NULL NULL NULL 0 0 0 2023/11/17 19:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:45:00 1 0 NULL 1 NULL 0 2374895739.2760 0 0 0 0 98 NULL NULL NULL NULL 0 0 0 2023/11/17 19:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:50:00 1 0 NULL 1 NULL 0 1051496257.6480 0 0 0 0 44.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 19:55:00 1 0 NULL 0 NULL 0 -32267107.2450 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 19:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:00:00 1 0 NULL 1 NULL 0 -30322764.1280 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 19:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:05:00 1 0 NULL 1 NULL 0 4027187861.5690 0 0 0 0 165.964 NULL NULL NULL NULL 0 0 0 2023/11/17 20:00:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:10:00 1 0 NULL 1 NULL 0 3623567899.1380 0 0 0 0 149.595 NULL NULL NULL NULL 0 0 0 2023/11/17 20:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:15:00 1 0 NULL 1 NULL 0 2841880378.5970 0 0 0 0 117.613 NULL NULL NULL NULL 0 0 0 2023/11/17 20:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:20:00 1 0 NULL 1 NULL 0 2244933416.7980 0 0 0 0 93.477 NULL NULL NULL NULL 0 0 0 2023/11/17 20:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:25:00 1 0 NULL 1 NULL 0 2863030435.6560 0 0 0 0 118.77 NULL NULL NULL NULL 0 0 0 2023/11/17 20:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:30:00 1 0 NULL 1 NULL 0 1904503784.6410 0 0 0 0 79.764 NULL NULL NULL NULL 0 0 0 2023/11/17 20:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:35:00 1 0 NULL 1 NULL 0 2438952364.8470 0 0 0 0 101.415 NULL NULL NULL NULL 0 0 0 2023/11/17 20:30:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:40:00 1 0 NULL 1 NULL 0 2404374386.37 0 0 0 0 100.112 NULL NULL NULL NULL 0 0 0 2023/11/17 20:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:45:00 1 0 NULL 1 NULL 0 483838402.8310 0 0 0 0 21.94 NULL NULL NULL NULL 0 0 0 2023/11/17 20:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:50:00 1 0 NULL 1 NULL 0 229032920.0450 0 0 0 0 11.564 NULL NULL NULL NULL 0 0 0 2023/11/17 20:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 20:55:00 1 0 NULL 0 NULL 0 -55281754.6920 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:00:00 1 0 NULL 0 NULL 0 -55407449.3540 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 20:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:05:00 1 0 NULL 1 NULL 0 638042862.1320 0 0 0 0 27.755 NULL NULL NULL NULL 0 0 0 2023/11/17 21:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:10:00 1 0 NULL 1 NULL 0 634495334.57 0 0 0 0 27.846 NULL NULL NULL NULL 0 0 0 2023/11/17 21:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:15:00 1 0 NULL 1 NULL 0 1074013155.4160 0 0 0 0 45.937 NULL NULL NULL NULL 0 0 0 2023/11/17 21:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:20:00 1 0 NULL 1 NULL 0 -52304385.61 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:25:00 1 0 NULL 0 NULL 0 -54778930.8770 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:30:00 1 0 NULL 1 NULL 0 -49933804.9660 0 0 0 0 0.2 NULL NULL NULL NULL 0 0 0 2023/11/17 21:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:35:00 1 0 NULL 1 NULL 0 -12582643.9120 0 0 0 0 1.487 NULL NULL NULL NULL 0 0 0 2023/11/17 21:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:40:00 1 0 NULL 0 NULL 0 -52297928.4560 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:45:00 1 0 NULL 1 NULL 0 3636295874.3410 0 0 0 0 150.225 NULL NULL NULL NULL 0 0 0 2023/11/17 21:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:50:00 1 0 NULL 1 NULL 0 -51935117.2980 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 21:55:00 1 0 NULL 0 NULL 0 -54402795.21 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 21:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:00:00 1 0 NULL 1 NULL 0 -51909940.0510 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 21:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:05:00 1 0 NULL 1 NULL 0 -46272644.3910 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:10:00 1 0 NULL 1 NULL 0 -48524238.9060 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:15:00 1 0 NULL 1 NULL 0 365490378.6370 0 0 0 0 16.983 NULL NULL NULL NULL 0 0 0 2023/11/17 22:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:20:00 1 0 NULL 1 NULL 0 319918669.0540 0 0 0 0 15.099 NULL NULL NULL NULL 0 0 0 2023/11/17 22:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:25:00 1 0 NULL 1 NULL 0 279776932.5480 0 0 0 0 13.463 NULL NULL NULL NULL 0 0 0 2023/11/17 22:20:05 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:30:00 1 0 NULL 0 NULL 0 -50757875.4790 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:35:00 1 0 NULL 0 NULL 0 -49364760.0730 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:40:00 1 0 NULL 1 NULL 0 -47241907.5760 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:45:00 1 0 NULL 1 NULL 0 -47915767.6690 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:50:00 1 0 NULL 0 NULL 0 -50172567.1080 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 22:55:00 1 0 NULL 0 NULL 0 -50056650.4350 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 22:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:00:00 1 0 NULL 1 NULL 0 -47573958.7360 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 22:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:05:00 1 0 NULL 1 NULL 0 -45568109.9610 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:10:00 1 0 NULL 1 NULL 0 -45494464.1590 0 0 0 0 0.1 NULL NULL NULL NULL 0 0 0 2023/11/17 23:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:15:00 1 0 NULL 0 NULL 0 -47995783.19 0 0 0 0 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:20:00 1 0 NULL 1 NULL 0 -16025744.1730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:25:00 1 0 NULL 1 NULL 0 -15966430.1110 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:30:00 1 0 NULL 1 NULL 0 -15990262.8730 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:35:00 1 0 NULL 1 NULL 0 -15922778.3050 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:40:00 1 0 NULL 1 NULL 0 -7172455.1420 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:35:04 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:45:00 1 0 NULL 1 NULL 0 -3762172.28 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/17 23:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:50:00 1 0 NULL 1 NULL 0 -14168482.45 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/17 23:55:00 1 0 NULL 1 NULL 0 -15275361.3130 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/17 23:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:00:00 1 0 NULL 1 NULL 0 -5203508.2810 0 0 0 0.416 0.41 NULL NULL NULL NULL 0 0 0 2023/11/17 23:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:05:00 1 0 NULL 1 NULL 0 2006985581.7150 0 0 79.765 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:10:00 1 0 NULL 1 NULL 0 1713933363.3980 0 0 68.28 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:15:00 1 0 NULL 1 NULL 0 1324747813.8940 0 0 52.898 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:20:00 1 0 NULL 1 NULL 0 933413090.4160 0 0 37.427 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 00:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:25:00 1 0 NULL 1 NULL 0 550689839.8880 0 0 22.2 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 00:20:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:30:00 1 0 NULL 1 NULL 0 218937418.1080 0 0 8.395 0.416 0.81 NULL NULL NULL NULL 0 0 0 2023/11/18 00:25:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:35:00 1 0 NULL 1 NULL 0 7579525926.7580 0 0 0 0.416 308.996 NULL NULL NULL NULL 0 0 0 2023/11/18 00:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:40:00 1 0 NULL 1 NULL 0 7336915650.9690 0 0 0 0.416 299.12 NULL NULL NULL NULL 0 0 0 2023/11/18 00:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:45:00 1 0 NULL 1 NULL 0 6045182791.2170 0 0 0 0.416 246.546 NULL NULL NULL NULL 0 0 0 2023/11/18 00:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:50:00 1 0 NULL 1 NULL 0 4782443540.40 0 0 0 0.416 195.149 NULL NULL NULL NULL 0 0 0 2023/11/18 00:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 00:55:00 1 0 NULL 1 NULL 0 3533422992.0770 0 0 0 0.416 144.306 NULL NULL NULL NULL 0 0 0 2023/11/18 00:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:00:00 1 0 NULL 1 NULL 0 2826754356.2650 0 0 0 0.416 115.54 NULL NULL NULL NULL 0 0 0 2023/11/18 00:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:05:00 1 0 NULL 1 NULL 0 1453322777.0730 0 0 0 0.416 59.628 NULL NULL NULL NULL 0 0 0 2023/11/18 01:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:10:00 1 0 NULL 1 NULL 0 499129476.38 0 0 0 0.416 20.781 NULL NULL NULL NULL 0 0 0 2023/11/18 01:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:15:00 1 0 NULL 1 NULL 0 485407380.7220 0 0 0 0.416 20.229 NULL NULL NULL NULL 0 0 0 2023/11/18 01:10:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:20:00 1 0 NULL 1 NULL 0 17265665.6770 0 0 0 0.416 1.17 NULL NULL NULL NULL 0 0 0 2023/11/18 01:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:25:00 1 0 NULL 1 NULL 0 -11434172.2560 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:30:00 1 0 NULL 1 NULL 0 -3823519.21 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:35:00 1 0 NULL 1 NULL 0 1498287664.4310 0 0 0 0.416 61.408 NULL NULL NULL NULL 0 0 0 2023/11/18 01:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:40:00 1 0 NULL 1 NULL 0 608001450.7020 0 0 0 0.416 25.168 NULL NULL NULL NULL 0 0 0 2023/11/18 01:35:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:45:00 1 0 NULL 1 NULL 0 -7888393.7570 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 01:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:50:00 1 0 NULL 1 NULL 0 -10396306.4160 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 01:45:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 01:55:00 1 0 NULL 1 NULL 0 8316084.7680 0 0 0 0.416 0.76 NULL NULL NULL NULL 0 0 0 2023/11/18 01:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:00:00 1 0 NULL 1 NULL 0 -2731101.26 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 01:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:05:00 1 0 NULL 1 NULL 0 -8814993.7150 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:10:00 1 0 NULL 1 NULL 0 -7598613.6930 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:15:00 1 0 NULL 1 NULL 0 -7628193.4030 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:20:00 1 0 NULL 1 NULL 0 -7639055.3820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 02:15:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:25:00 1 0 NULL 1 NULL 0 -2417482.6810 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:30:00 1 0 NULL 1 NULL 0 -9895427.42 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 02:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:35:00 1 0 NULL 1 NULL 0 -2318028.8590 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:40:00 1 0 NULL 1 NULL 0 -9895921.9650 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:45:00 1 0 NULL 1 NULL 0 -8500955.7880 0 0 0 0.416 0.06 NULL NULL NULL NULL 0 0 0 2023/11/18 02:40:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:50:00 1 0 NULL 1 NULL 0 -9903525.7990 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 02:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 02:55:00 1 0 NULL 1 NULL 0 -3502096.8430 0 0 0 0.416 0.26 NULL NULL NULL NULL 0 0 0 2023/11/18 02:50:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:00:00 1 0 NULL 1 NULL 0 -2234303.9630 0 0 0 0.416 0.31 NULL NULL NULL NULL 0 0 0 2023/11/18 02:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:05:00 1 0 NULL 1 NULL 0 -9867178.7940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:00:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:10:00 1 0 NULL 1 NULL 0 -9517507.4350 0 0 0 0.416 0.01 NULL NULL NULL NULL 0 0 0 2023/11/18 03:05:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:15:00 1 0 NULL 1 NULL 0 -7178170.5820 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:10:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:20:00 1 0 NULL 1 NULL 0 -9624785.8940 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:15:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:25:00 1 0 NULL 1 NULL 0 -878688.5970 0 0 0 0.416 0.36 NULL NULL NULL NULL 0 0 0 2023/11/18 03:20:02 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:30:00 1 0 NULL 1 NULL 0 14145537.0950 0 0 0 0.416 0.97 NULL NULL NULL NULL 0 0 0 2023/11/18 03:25:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:35:00 1 0 NULL 1 NULL 0 -7149770.39 0 0 0 0.416 0.1 NULL NULL NULL NULL 0 0 0 2023/11/18 03:30:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:40:00 1 0 NULL 1 NULL 0 -9596739.4250 0 0 0 0.416 0 NULL NULL NULL NULL 0 0 0 2023/11/18 03:35:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:45:00 1 0 NULL 1 NULL 0 88789129.03 0 0 0 0.416 3.995 NULL NULL NULL NULL 0 0 0 2023/11/18 03:40:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:50:00 1 0 NULL 1 NULL 0 4426114.4320 0 0 0 0.416 0.56 NULL NULL NULL NULL 0 0 0 2023/11/18 03:45:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 03:55:00 1 0 NULL 1 NULL 0 5669292.5630 0 0 0 0.416 0.61 NULL NULL NULL NULL 0 0 0 2023/11/18 03:50:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DISPATCH CASESOLUTION 1 2023/11/18 04:00:00 1 0 NULL 1 NULL 0 8250793.4510 0 0 0 0.416 0.71 NULL NULL NULL NULL 0 0 0 2023/11/18 03:55:03 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +I DREGION NULL 2 SETTLEMENTDATE RUNNO REGIONID INTERVENTION RRP EEP ROP APCFLAG MARKETSUSPENDEDFLAG TOTALDEMAND DEMANDFORECAST DISPATCHABLEGENERATION DISPATCHABLELOAD NETINTERCHANGE EXCESSGENERATION LOWER5MINDISPATCH LOWER5MINIMPORT LOWER5MINLOCALDISPATCH LOWER5MINLOCALPRICE LOWER5MINLOCALREQ LOWER5MINPRICE LOWER5MINREQ LOWER5MINSUPPLYPRICE LOWER60SECDISPATCH LOWER60SECIMPORT LOWER60SECLOCALDISPATCH LOWER60SECLOCALPRICE LOWER60SECLOCALREQ LOWER60SECPRICE LOWER60SECREQ LOWER60SECSUPPLYPRICE LOWER6SECDISPATCH LOWER6SECIMPORT LOWER6SECLOCALDISPATCH LOWER6SECLOCALPRICE LOWER6SECLOCALREQ LOWER6SECPRICE LOWER6SECREQ LOWER6SECSUPPLYPRICE RAISE5MINDISPATCH RAISE5MINIMPORT RAISE5MINLOCALDISPATCH RAISE5MINLOCALPRICE RAISE5MINLOCALREQ RAISE5MINPRICE RAISE5MINREQ RAISE5MINSUPPLYPRICE RAISE60SECDISPATCH RAISE60SECIMPORT RAISE60SECLOCALDISPATCH RAISE60SECLOCALPRICE RAISE60SECLOCALREQ RAISE60SECPRICE RAISE60SECREQ RAISE60SECSUPPLYPRICE RAISE6SECDISPATCH RAISE6SECIMPORT RAISE6SECLOCALDISPATCH RAISE6SECLOCALPRICE RAISE6SECLOCALREQ RAISE6SECPRICE RAISE6SECREQ RAISE6SECSUPPLYPRICE AGGREGATEDISPATCHERROR AVAILABLEGENERATION AVAILABLELOAD INITIALSUPPLY CLEAREDSUPPLY LOWERREGIMPORT LOWERREGLOCALDISPATCH LOWERREGLOCALREQ LOWERREGREQ RAISEREGIMPORT RAISEREGLOCALDISPATCH RAISEREGLOCALREQ RAISEREGREQ RAISE5MINLOCALVIOLATION RAISEREGLOCALVIOLATION RAISE60SECLOCALVIOLATION RAISE6SECLOCALVIOLATION LOWER5MINLOCALVIOLATION LOWERREGLOCALVIOLATION LOWER60SECLOCALVIOLATION LOWER6SECLOCALVIOLATION RAISE5MINVIOLATION RAISEREGVIOLATION RAISE60SECVIOLATION RAISE6SECVIOLATION LOWER5MINVIOLATION LOWERREGVIOLATION LOWER60SECVIOLATION LOWER6SECVIOLATION RAISE6SECRRP RAISE6SECROP RAISE6SECAPCFLAG RAISE60SECRRP RAISE60SECROP RAISE60SECAPCFLAG RAISE5MINRRP RAISE5MINROP RAISE5MINAPCFLAG RAISEREGRRP RAISEREGROP RAISEREGAPCFLAG LOWER6SECRRP LOWER6SECROP LOWER6SECAPCFLAG LOWER60SECRRP LOWER60SECROP LOWER60SECAPCFLAG LOWER5MINRRP LOWER5MINROP LOWER5MINAPCFLAG LOWERREGRRP LOWERREGROP LOWERREGAPCFLAG NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -2222,11 +2222,11 @@ filename=true, auto_detect=false) where C1 = 'D' and C2 = 'DREGION' and C4 = '2' ---- -D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -from read_csv("data/csv/bug_10283.csv", +from read_csv("{DATA_DIR}/csv/bug_10283.csv", Skip=1, header =0, sample_size=-1, @@ -2369,5 +2369,5 @@ ignore_errors = true, auto_detect=false) where C1 = 'D' and C2 = 'DREGION' and C4 = '2' ---- -D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv -D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL data/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 NSW1 0 88.56012 0 88.56012 0 0 6392.2 23 5694.97 0 -697.23 0 NULL NULL 98.43 NULL NULL NULL NULL NULL NULL NULL 103 NULL NULL NULL NULL NULL NULL NULL 91 NULL NULL NULL NULL NULL NULL NULL 105 NULL NULL NULL NULL NULL NULL NULL 164 NULL NULL NULL NULL NULL NULL NULL 171.23 NULL NULL NULL NULL NULL 2.14388 8571.20053 207 6391.82764 6408.98 NULL 72 NULL NULL NULL 32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv +D DREGION NULL 2 2023/11/17 04:05:00 1 QLD1 0 83.25 0 83.25 0 0 6012.9 1 6405.92 0 393.02 0 NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 47 NULL NULL NULL NULL NULL NULL NULL 16 NULL NULL NULL NULL NULL NULL NULL 69 NULL NULL NULL NULL NULL NULL NULL 165 NULL NULL NULL NULL NULL NULL NULL 158.59 NULL NULL NULL NULL NULL 12.04781 9607.10393 98 6020.25977 6017.04 NULL 10 NULL NULL NULL 121 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 47.48 47.48 0 8.25 8.25 0 0.38 0.38 0 25.18 25.18 0 0.39 0.39 0 0.39 0.39 0 0.15 0.15 0 5.9 5.9 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL {DATA_DIR}/csv/bug_10283.csv diff --git a/test/sql/copy/csv/code_cov/buffer_manager_finalize.test b/test/sql/copy/csv/code_cov/buffer_manager_finalize.test index 6bcb5f512b26..a676b38eac72 100644 --- a/test/sql/copy/csv/code_cov/buffer_manager_finalize.test +++ b/test/sql/copy/csv/code_cov/buffer_manager_finalize.test @@ -9,14 +9,14 @@ statement ok CREATE TABLE t1 AS select i, (i+1) as j from range(0,3000) tbl(i) statement ok -COPY t1 TO '__TEST_DIR__/t1.csv' (FORMAT CSV, DELIMITER '|', HEADER); +COPY t1 TO '{TEMP_DIR}/t1.csv' (FORMAT CSV, DELIMITER '|', HEADER); query I -select count(*) from '__TEST_DIR__/t1.csv' +select count(*) from '{TEMP_DIR}/t1.csv' ---- 3000 query I -select count(*) from read_csv('data/csv/empty.csv', columns=STRUCT_PACK(d := 'BIGINT'), header=0, auto_detect = false) +select count(*) from read_csv('{DATA_DIR}/csv/empty.csv', columns=STRUCT_PACK(d := 'BIGINT'), header=0, auto_detect = false) ---- 0 diff --git a/test/sql/copy/csv/code_cov/csv_dialect_detection.test b/test/sql/copy/csv/code_cov/csv_dialect_detection.test index 9d7ef2ea749d..b27a78954984 100644 --- a/test/sql/copy/csv/code_cov/csv_dialect_detection.test +++ b/test/sql/copy/csv/code_cov/csv_dialect_detection.test @@ -6,16 +6,16 @@ statement ok PRAGMA enable_verification query I -SELECT * from read_csv_auto('data/csv/escape.csv', escape=']', header = 0) +SELECT * from read_csv_auto('{DATA_DIR}/csv/escape.csv', escape=']', header = 0) ---- "bla" query I -SELECT * from read_csv_auto('data/csv/escape.csv', header = 0) +SELECT * from read_csv_auto('{DATA_DIR}/csv/escape.csv', header = 0) ---- "]"bla]"" statement error -SELECT * from read_csv_auto('data/csv/no_opt.csv', delim = ';') +SELECT * from read_csv_auto('{DATA_DIR}/csv/no_opt.csv', delim = ';') ---- It was not possible to automatically detect the CSV parsing dialect diff --git a/test/sql/copy/csv/code_cov/csv_exact_buffer_size.test b/test/sql/copy/csv/code_cov/csv_exact_buffer_size.test index 7f53ce9f28a3..943ea4f47e91 100644 --- a/test/sql/copy/csv/code_cov/csv_exact_buffer_size.test +++ b/test/sql/copy/csv/code_cov/csv_exact_buffer_size.test @@ -6,18 +6,18 @@ statement ok PRAGMA enable_verification query II -FROM read_csv('data/csv/auto/issue_1254_rn.csv', buffer_size=10) +FROM read_csv('{DATA_DIR}/csv/auto/issue_1254_rn.csv', buffer_size=10) ---- 1 2 1 2 query II -FROM read_csv('data/csv/auto/issue_1254_rn.csv', buffer_size=8) +FROM read_csv('{DATA_DIR}/csv/auto/issue_1254_rn.csv', buffer_size=8) ---- 1 2 1 2 query I -select count(*) from read_csv_auto('data/csv/small_file.csv', buffer_size = 7) +select count(*) from read_csv_auto('{DATA_DIR}/csv/small_file.csv', buffer_size = 7) ---- 2 diff --git a/test/sql/copy/csv/code_cov/csv_sniffer_header.test b/test/sql/copy/csv/code_cov/csv_sniffer_header.test index 8982a42c9235..3300bed86d27 100644 --- a/test/sql/copy/csv/code_cov/csv_sniffer_header.test +++ b/test/sql/copy/csv/code_cov/csv_sniffer_header.test @@ -6,12 +6,12 @@ statement ok PRAGMA enable_verification query I -SELECT count(*) from read_csv_auto('data/csv/header_left_space.csv') +SELECT count(*) from read_csv_auto('{DATA_DIR}/csv/header_left_space.csv') ---- 3 statement ok -create table t as select * from read_csv_auto('data/csv/header_normalize.csv', normalize_names=1) +create table t as select * from read_csv_auto('{DATA_DIR}/csv/header_normalize.csv', normalize_names=1) query IIIIII describe t @@ -24,7 +24,7 @@ _3b BIGINT YES NULL NULL NULL query III -FROM read_csv(['data/csv/auto/sample.csv','data/csv/auto/sample.csv','data/csv/auto/sample.csv']) +FROM read_csv(['{DATA_DIR}/csv/auto/sample.csv','{DATA_DIR}/csv/auto/sample.csv','{DATA_DIR}/csv/auto/sample.csv']) ---- c1 pedro 1992 c2 mark 1992 diff --git a/test/sql/copy/csv/code_cov/csv_state_machine_invalid_utf.test b/test/sql/copy/csv/code_cov/csv_state_machine_invalid_utf.test index 4b2eb46de75c..cb1d740c5315 100644 --- a/test/sql/copy/csv/code_cov/csv_state_machine_invalid_utf.test +++ b/test/sql/copy/csv/code_cov/csv_state_machine_invalid_utf.test @@ -4,35 +4,35 @@ # Error during sniffing statement error -from read_csv_auto('data/csv/test/invalid_utf.csv') +from read_csv_auto('{DATA_DIR}/csv/test/invalid_utf.csv') ---- Invalid unicode (byte sequence mismatch) detected statement error -from read_csv_auto('data/csv/test/invalid_utf.csv') +from read_csv_auto('{DATA_DIR}/csv/test/invalid_utf.csv') ---- CSV Error on Line: 1 # Error during parsing statement error -from read_csv('data/csv/test/invalid_utf.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') +from read_csv('{DATA_DIR}/csv/test/invalid_utf.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') ---- Invalid unicode (byte sequence mismatch) detected. statement error -from read_csv('data/csv/test/invalid_utf.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') +from read_csv('{DATA_DIR}/csv/test/invalid_utf.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') ---- CSV Error on Line: 1 # Test ignore errors over more complex file statement error -from read_csv('data/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') ---- CSV Error on Line: 11 query III -from read_csv('data/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true) ---- valid valid valid valid valid valid @@ -57,7 +57,7 @@ valid valid valid # Test error in the second vector statement ok -create table t as from read_csv('data/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true) +create table t as from read_csv('{DATA_DIR}/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true) query I select count(*) from t @@ -65,18 +65,18 @@ select count(*) from t 3030 statement error -from read_csv('data/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',') ---- CSV Error on Line: 3001 # Test borked utf-8 within quotes statement error -from read_csv('data/csv/test/invalid_utf_quoted.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_quoted.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"') ---- CSV Error on Line: 11 query III -from read_csv('data/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', ignore_errors=true) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', ignore_errors=true) ---- valid valid valid valid valid valid @@ -101,17 +101,17 @@ valid valid valid # Test Invalid Header statement error -from read_csv('data/csv/test/invalid_utf_header.csv', delim = ',', quote = '"') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_header.csv', delim = ',', quote = '"') ---- Invalid unicode (byte sequence mismatch) detected. statement error -from read_csv('data/csv/test/invalid_utf_header.csv', header=1, delim = ',', quote = '"') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_header.csv', header=1, delim = ',', quote = '"') ---- Invalid unicode (byte sequence mismatch) detected. query III -from read_csv('data/csv/test/invalid_utf_header.csv', header=1, delim = ',', quote = '"', ignore_errors = true) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_header.csv', header=1, delim = ',', quote = '"', ignore_errors = true) ---- valid valid valid valid valid valid @@ -126,12 +126,12 @@ valid valid valid # Test invalid unicode in between a quoted newline statement error -from read_csv('data/csv/test/invalid_utf_quoted_nl.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"') +from read_csv('{DATA_DIR}/csv/test/invalid_utf_quoted_nl.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"') ---- CSV Error on Line: 11 query III -from read_csv('data/csv/test/invalid_utf_quoted_nl.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', ignore_errors=true) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_quoted_nl.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', ignore_errors=true) ---- valid valid valid valid valid valid @@ -156,13 +156,13 @@ valid valid valid # Test error between buffers statement error -from read_csv('data/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', buffer_size = 198) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', quote = '"', buffer_size = 198) ---- CSV Error on Line: 11 # Test error between buffers (with ignore_errors set) query III -from read_csv('data/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true, buffer_size = 198) +from read_csv('{DATA_DIR}/csv/test/invalid_utf_complex.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', ignore_errors=true, buffer_size = 198) ---- valid valid valid valid valid valid @@ -188,22 +188,22 @@ valid valid valid # We get a casting error statement error -SELECT * FROM read_csv('data/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'INTEGER[]'} ) +SELECT * FROM read_csv('{DATA_DIR}/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'INTEGER[]'} ) ---- Invalid unicode (byte sequence mismatch) detected. statement error -SELECT * FROM read_csv('data/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'INTEGER[]'} ) +SELECT * FROM read_csv('{DATA_DIR}/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'INTEGER[]'} ) ---- CSV Error on Line: 11 # We get a invalid unicode error statement error -SELECT * FROM read_csv('data/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'VARCHAR'} ) +SELECT * FROM read_csv('{DATA_DIR}/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'VARCHAR'} ) ---- Invalid unicode (byte sequence mismatch) detected. statement error -SELECT * FROM read_csv('data/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'VARCHAR'} ) +SELECT * FROM read_csv('{DATA_DIR}/csv/test/invalid_utf_list.csv', header=0, auto_detect=false, quote = '"',columns = {'col1': 'VARCHAR'} ) ---- CSV Error on Line: 11 \ No newline at end of file diff --git a/test/sql/copy/csv/code_cov/csv_type_detection.test b/test/sql/copy/csv/code_cov/csv_type_detection.test index 6c81c9c28abf..d5377da910ba 100644 --- a/test/sql/copy/csv/code_cov/csv_type_detection.test +++ b/test/sql/copy/csv/code_cov/csv_type_detection.test @@ -6,21 +6,21 @@ statement ok PRAGMA enable_verification statement error -from read_csv_auto('data/csv/invalid_utf8.csv', auto_detect = false, columns={'c01': 'VARCHAR'} ) +from read_csv_auto('{DATA_DIR}/csv/invalid_utf8.csv', auto_detect = false, columns={'c01': 'VARCHAR'} ) ---- Invalid unicode (byte sequence mismatch) detected query I -select * from read_csv_auto('data/csv/empty.csv') +select * from read_csv_auto('{DATA_DIR}/csv/empty.csv') ---- query II -select * from read_csv_auto('data/csv/small_file.csv', sample_size=1) +select * from read_csv_auto('{DATA_DIR}/csv/small_file.csv', sample_size=1) ---- 1 2 5 3 query I -select * from read_csv_auto('data/csv/date_format_percentage.csv') +select * from read_csv_auto('{DATA_DIR}/csv/date_format_percentage.csv') ---- 336%584%3205 diff --git a/test/sql/copy/csv/code_cov/csv_type_refinement.test b/test/sql/copy/csv/code_cov/csv_type_refinement.test index 7f2869f77f0d..bad12942bf8c 100644 --- a/test/sql/copy/csv/code_cov/csv_type_refinement.test +++ b/test/sql/copy/csv/code_cov/csv_type_refinement.test @@ -6,11 +6,11 @@ statement ok PRAGMA enable_verification query I -select count(*) from read_csv_auto('data/csv/borked_date.csv', header = 0) +select count(*) from read_csv_auto('{DATA_DIR}/csv/borked_date.csv', header = 0) ---- 2070 query I -select count(*) from read_csv_auto('data/csv/big_not_bool.csv', header = 0) +select count(*) from read_csv_auto('{DATA_DIR}/csv/big_not_bool.csv', header = 0) ---- 2450 diff --git a/test/sql/copy/csv/column_names.test b/test/sql/copy/csv/column_names.test index ad02054d6e27..50ae00dcd04c 100644 --- a/test/sql/copy/csv/column_names.test +++ b/test/sql/copy/csv/column_names.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE t1 AS SELECT * FROM read_csv_auto('data/csv/test/issue2518.csv', header=False, columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, auto_detect = false) +CREATE TABLE t1 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/issue2518.csv', header=False, columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, auto_detect = false) query IIIII SELECT rsID, chr, pos, refb, altb FROM t1 @@ -23,7 +23,7 @@ SELECT rsID, chr, pos, refb, altb FROM t1 1184 6 187649 T A,C,G statement ok -CREATE TABLE t2 AS SELECT * FROM read_csv_auto('data/csv/test/issue2518.csv', header=False, columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, AUTO_DETECT=0) +CREATE TABLE t2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/issue2518.csv', header=False, columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, AUTO_DETECT=0) query IIIII SELECT rsID, chr, pos, refb, altb FROM t2 @@ -41,7 +41,7 @@ SELECT rsID, chr, pos, refb, altb FROM t2 statement ok -CREATE TABLE t3 AS SELECT * FROM read_csv_auto('data/csv/test/issue2518.csv', columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, auto_detect = false) +CREATE TABLE t3 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/issue2518.csv', columns={'rsID':'INT', 'CHR': 'VARCHAR', 'POS': 'INT','REFB': 'VARCHAR','ALTB':'VARCHAR'}, auto_detect = false) query IIIII SELECT rsID, chr, pos, refb, altb FROM t3 diff --git a/test/sql/copy/csv/copy_disable_parallelism.test b/test/sql/copy/csv/copy_disable_parallelism.test index 7b6206580895..ed1448242e06 100644 --- a/test/sql/copy/csv/copy_disable_parallelism.test +++ b/test/sql/copy/csv/copy_disable_parallelism.test @@ -10,6 +10,6 @@ statement ok CREATE TABLE test (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY test FROM 'data/csv/test/test.csv'; +COPY test FROM '{DATA_DIR}/csv/test/test.csv'; ---- 5000 diff --git a/test/sql/copy/csv/csv_copy_sniffer.test b/test/sql/copy/csv/csv_copy_sniffer.test index 106488cc0b62..f647215a0cd4 100644 --- a/test/sql/copy/csv/csv_copy_sniffer.test +++ b/test/sql/copy/csv/csv_copy_sniffer.test @@ -19,4 +19,4 @@ CREATE TABLE sales ( saletime TIMESTAMP); statement ok -COPY sales FROM 'data/csv/sales_snippet.csv' (TIMESTAMPFORMAT '%m/%d/%Y %I:%M:%S', IGNORE_ERRORS true); +COPY sales FROM '{DATA_DIR}/csv/sales_snippet.csv' (TIMESTAMPFORMAT '%m/%d/%Y %I:%M:%S', IGNORE_ERRORS true); diff --git a/test/sql/copy/csv/csv_decimal_separator.test b/test/sql/copy/csv/csv_decimal_separator.test index 4cf6d7bb0bbb..446cc6fbe82a 100644 --- a/test/sql/copy/csv/csv_decimal_separator.test +++ b/test/sql/copy/csv/csv_decimal_separator.test @@ -7,12 +7,12 @@ PRAGMA enable_verification # period-separated decimal doesn't parse statement error -CREATE TABLE decimal_separators AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'double', 'periods': 'double'}, delim=';', decimal_separator=',') +CREATE TABLE decimal_separators AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'double', 'periods': 'double'}, delim=';', decimal_separator=',') ---- Line: 2 statement ok -CREATE TABLE decimal_separators AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'double'}, delim=';', decimal_separator=',') +CREATE TABLE decimal_separators AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'double'}, delim=';', decimal_separator=',') query II SELECT commas, periods FROM decimal_separators; @@ -30,18 +30,18 @@ DOUBLE VARCHAR # reading the commas column as decimal fails when decimal separator is set to '.' statement error -CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'decimal', 'periods': 'decimal'}, delim=';', decimal_separator='.') +CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'decimal', 'periods': 'decimal'}, delim=';', decimal_separator='.') ---- Line: 2 # reading the commas column as float fails when decimal separator is set to '.' statement error -CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'float', 'periods': 'decimal'}, delim=';', decimal_separator='.') +CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'float', 'periods': 'decimal'}, delim=';', decimal_separator='.') ---- Line: 2 statement ok -CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'decimal'}, delim=';', decimal_separator=',') +CREATE TABLE decimal_separators2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'commas': 'decimal'}, delim=';', decimal_separator=',') query II SELECT commas, periods FROM decimal_separators2; @@ -60,7 +60,7 @@ DECIMAL(18,3) VARCHAR # no separator specified => commas get read as varchar statement ok -CREATE TABLE decimal_separators3 AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators.csv', column_types={'periods': 'decimal'}, delim=';') +CREATE TABLE decimal_separators3 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators.csv', column_types={'periods': 'decimal'}, delim=';') query II SELECT commas, periods FROM decimal_separators3; @@ -74,7 +74,7 @@ SELECT commas, periods FROM decimal_separators3; # in a comma-delimited file, comma as decimal separator is OK when quoted statement ok -CREATE TABLE decimal_separators4 AS SELECT * FROM read_csv_auto('data/csv/decimal_separators/decimal_separators_csv.csv', column_types={'commas': 'double'}, quote='"',delim=',',decimal_separator=',') +CREATE TABLE decimal_separators4 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/decimal_separators_csv.csv', column_types={'commas': 'double'}, quote='"',delim=',',decimal_separator=',') query II SELECT commas, periods FROM decimal_separators4; @@ -89,12 +89,12 @@ DOUBLE VARCHAR # unsupported separator characters result in error statement error -SELECT * FROM read_csv_auto('data/csv/decimal_separators/invalid_char.csv', column_types={'foo': 'double'}, decimal_separator='ö') +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/invalid_char.csv', column_types={'foo': 'double'}, decimal_separator='ö') ---- Binder Error: Unsupported parameter for DECIMAL_SEPARATOR: should be '.' or ',' # data with mixed separators will fail reading statement error -SELECT * FROM read_csv_auto('data/csv/decimal_separators/mixed_format_fail.csv', column_types={'foo': 'double'}, decimal_separator=',', skip=0) +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/decimal_separators/mixed_format_fail.csv', column_types={'foo': 'double'}, decimal_separator=',', skip=0) ---- Line: 4 diff --git a/test/sql/copy/csv/csv_dtypes.test b/test/sql/copy/csv/csv_dtypes.test index 6c6532ef29c0..777a4fc51841 100644 --- a/test/sql/copy/csv/csv_dtypes.test +++ b/test/sql/copy/csv/csv_dtypes.test @@ -6,52 +6,52 @@ statement ok PRAGMA enable_verification query II -select typeof(Year), typeof(Quarter) from 'data/csv/real/ontime_sample.csv' LIMIT 1; +select typeof(Year), typeof(Quarter) from '{DATA_DIR}/csv/real/ontime_sample.csv' LIMIT 1; ---- BIGINT BIGINT query II -select typeof(Year), typeof(Quarter) from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes={'Quarter': 'TINYINT'}) LIMIT 1 +select typeof(Year), typeof(Quarter) from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes={'Quarter': 'TINYINT'}) LIMIT 1 ---- BIGINT TINYINT # case insensitivity for struct query II -select typeof(Year), typeof(Quarter) from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes={'quArTeR': 'TINYINT'}) LIMIT 1 +select typeof(Year), typeof(Quarter) from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes={'quArTeR': 'TINYINT'}) LIMIT 1 ---- BIGINT TINYINT query II -select typeof(Year), typeof(Quarter) from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes=['INT', 'TINYINT']) LIMIT 1 +select typeof(Year), typeof(Quarter) from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes=['INT', 'TINYINT']) LIMIT 1 ---- INTEGER TINYINT # mix of struct and list parameters statement error -select * from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes=['INT'], column_types={'Quarter': 'TINYINT'}) LIMIT 1 +select * from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes=['INT'], column_types={'Quarter': 'TINYINT'}) LIMIT 1 ---- can only be supplied once # invalid list type statement error -select * from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes=[42]) LIMIT 1 +select * from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes=[42]) LIMIT 1 ---- requires a list of types # invalid type statement error -select * from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes=['unknown_type']) LIMIT 1 +select * from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes=['unknown_type']) LIMIT 1 ---- unknown_type # invalid struct type statement error -select * from read_csv_auto('data/csv/real/ontime_sample.csv', dtypes={'Quarter': 42}) LIMIT 1 +select * from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', dtypes={'Quarter': 42}) LIMIT 1 ---- requires a type specification as string # too many sql types provided in list statement error -select * from read_csv_auto('data/csv/auto/int_bol.csv', dtypes=['varchar', 'varchar', 'varchar']) LIMIT 1 +select * from read_csv_auto('{DATA_DIR}/csv/auto/int_bol.csv', dtypes=['varchar', 'varchar', 'varchar']) LIMIT 1 ---- 3 types were provided, but CSV file only has 2 columns diff --git a/test/sql/copy/csv/csv_duck_fuzz.test b/test/sql/copy/csv/csv_duck_fuzz.test index a7f766b73fe6..12560f3b5b95 100644 --- a/test/sql/copy/csv/csv_duck_fuzz.test +++ b/test/sql/copy/csv/csv_duck_fuzz.test @@ -11,11 +11,11 @@ PRAGMA enable_verification foreach parameter sep delim quote new_line escape nullstr columns auto_type_candidates header auto_detect sample_size all_varchar dateformat timestampformat normalize_names compression skip max_line_size maximum_line_size ignore_errors store_rejects rejects_table rejects_scan rejects_limit force_not_null buffer_size decimal_separator parallel null_padding allow_quoted_nulls column_types dtypes types names column_names comment encoding strict_mode statement maybe -SELECT NULL FROM sniff_csv('data/csv/14512.csv', ${parameter} := NULL) +SELECT NULL FROM sniff_csv('{DATA_DIR}/csv/14512.csv', ${parameter} := NULL) ---- statement maybe -SELECT NULL FROM read_csv('data/csv/14512.csv', ${parameter} := NULL) +SELECT NULL FROM read_csv('{DATA_DIR}/csv/14512.csv', ${parameter} := NULL) ---- endloop diff --git a/test/sql/copy/csv/csv_enum.test b/test/sql/copy/csv/csv_enum.test index c329a089200b..eaf1f82b5d12 100644 --- a/test/sql/copy/csv/csv_enum.test +++ b/test/sql/copy/csv/csv_enum.test @@ -9,7 +9,7 @@ statement ok CREATE TYPE bla AS ENUM ('Y', 'N'); query I -select * from read_csv_auto('data/csv/response.csv', header = 0) +select * from read_csv_auto('{DATA_DIR}/csv/response.csv', header = 0) ---- Y Y @@ -17,7 +17,7 @@ N Null query I -FROM read_csv('data/csv/response.csv', columns={'response': 'bla'}, nullstr = 'Null'); +FROM read_csv('{DATA_DIR}/csv/response.csv', columns={'response': 'bla'}, nullstr = 'Null'); ---- Y Y @@ -26,7 +26,7 @@ NULL query I -FROM read_csv_auto('data/csv/response.csv', types={'column0': 'bla'}, nullstr = 'Null', header = 0); +FROM read_csv_auto('{DATA_DIR}/csv/response.csv', types={'column0': 'bla'}, nullstr = 'Null', header = 0); ---- Y Y @@ -34,7 +34,7 @@ N NULL statement error -FROM read_csv_auto('data/csv/response.csv', auto_type_candidates=['bla'], nullstr = 'Null'); +FROM read_csv_auto('{DATA_DIR}/csv/response.csv', auto_type_candidates=['bla'], nullstr = 'Null'); ---- Auto Type Candidate of type ENUM is not accepted as a valid input diff --git a/test/sql/copy/csv/csv_enum_storage.test b/test/sql/copy/csv/csv_enum_storage.test index e63b2b4d2417..ea8818b89825 100644 --- a/test/sql/copy/csv/csv_enum_storage.test +++ b/test/sql/copy/csv/csv_enum_storage.test @@ -7,7 +7,7 @@ PRAGMA enable_verification # load the DB from disk -load __TEST_DIR__/test_csv_enum.db +load {TEMP_DIR}/test_csv_enum.db statement ok CREATE TYPE bla AS ENUM ('Y', 'N'); @@ -15,7 +15,7 @@ CREATE TYPE bla AS ENUM ('Y', 'N'); restart query I -select * from read_csv_auto('data/csv/response.csv', header = 0) +select * from read_csv_auto('{DATA_DIR}/csv/response.csv', header = 0) ---- Y Y @@ -23,7 +23,7 @@ N Null query I -FROM read_csv('data/csv/response.csv', columns={'response': 'bla'}, nullstr = 'Null'); +FROM read_csv('{DATA_DIR}/csv/response.csv', columns={'response': 'bla'}, nullstr = 'Null'); ---- Y Y @@ -32,7 +32,7 @@ NULL query I -FROM read_csv_auto('data/csv/response.csv', types={'column0': 'bla'}, nullstr = 'Null', header = 0); +FROM read_csv_auto('{DATA_DIR}/csv/response.csv', types={'column0': 'bla'}, nullstr = 'Null', header = 0); ---- Y Y @@ -40,6 +40,6 @@ N NULL statement error -FROM read_csv_auto('data/csv/response.csv', auto_type_candidates=['bla'], nullstr = 'Null'); +FROM read_csv_auto('{DATA_DIR}/csv/response.csv', auto_type_candidates=['bla'], nullstr = 'Null'); ---- Auto Type Candidate of type ENUM is not accepted as a valid input diff --git a/test/sql/copy/csv/csv_error_message.test b/test/sql/copy/csv/csv_error_message.test index 0d88461178da..43ffee7005ea 100644 --- a/test/sql/copy/csv/csv_error_message.test +++ b/test/sql/copy/csv/csv_error_message.test @@ -8,32 +8,32 @@ PRAGMA enable_verification # Test columns error statement error -FROM read_csv('data/csv/15473.csv', delim = ',', columns = {'A' : 'VARCHAR','B' : 'VARCHAR','C' : 'VARCHAR','D' : 'VARCHAR'}) +FROM read_csv('{DATA_DIR}/csv/15473.csv', delim = ',', columns = {'A' : 'VARCHAR','B' : 'VARCHAR','C' : 'VARCHAR','D' : 'VARCHAR'}) ---- Columns are set as: "columns = { 'A' : 'VARCHAR', 'B' : 'VARCHAR', 'C' : 'VARCHAR', 'D' : 'VARCHAR'}", and they contain: 4 columns. It does not match the number of columns found by the sniffer: 3. Verify the columns parameter is correctly set. statement ok -COPY (SELECT i::VARCHAR i FROM range(103) tbl(i) UNION ALL SELECT 'hello') TO '__TEST_DIR__/int_parse_error.csv' (HEADER, DELIMITER '|') +COPY (SELECT i::VARCHAR i FROM range(103) tbl(i) UNION ALL SELECT 'hello') TO '{TEMP_DIR}/int_parse_error.csv' (HEADER, DELIMITER '|') statement error -SELECT * FROM read_csv('__TEST_DIR__/int_parse_error.csv', columns={'i': 'INT'}) +SELECT * FROM read_csv('{TEMP_DIR}/int_parse_error.csv', columns={'i': 'INT'}) ---- Column at position: 0 Set type: INTEGER Sniffed type: VARCHAR statement error -SELECT * FROM read_csv('__TEST_DIR__/int_parse_error.csv', columns={'i': 'INT'}, header=True, auto_detect=false) +SELECT * FROM read_csv('{TEMP_DIR}/int_parse_error.csv', columns={'i': 'INT'}, header=True, auto_detect=false) ---- Line: 105 statement ok -COPY (SELECT i::VARCHAR i FROM range(103) tbl(i) UNION ALL SELECT 'hello') TO '__TEST_DIR__/int_parse_error.csv' (HEADER 0, DELIMITER '|') +COPY (SELECT i::VARCHAR i FROM range(103) tbl(i) UNION ALL SELECT 'hello') TO '{TEMP_DIR}/int_parse_error.csv' (HEADER 0, DELIMITER '|') statement error -SELECT * FROM read_csv('__TEST_DIR__/int_parse_error.csv', columns={'i': 'INT'}, header=False, auto_detect=false) +SELECT * FROM read_csv('{TEMP_DIR}/int_parse_error.csv', columns={'i': 'INT'}, header=False, auto_detect=false) ---- Line: 104 statement error -SELECT * FROM read_csv('__TEST_DIR__/int_parse_error.csv', columns={'i': 'INT'}, header=False, auto_detect=false) +SELECT * FROM read_csv('{TEMP_DIR}/int_parse_error.csv', columns={'i': 'INT'}, header=False, auto_detect=false) ---- Original Line: hello \ No newline at end of file diff --git a/test/sql/copy/csv/csv_external_access.test b/test/sql/copy/csv/csv_external_access.test index 9753303df7d2..12ffc1add718 100644 --- a/test/sql/copy/csv/csv_external_access.test +++ b/test/sql/copy/csv/csv_external_access.test @@ -9,28 +9,28 @@ statement ok CREATE TABLE date_test(d date); statement ok -COPY date_test FROM 'data/csv/test/date.csv'; +COPY date_test FROM '{DATA_DIR}/csv/test/date.csv'; statement ok SET enable_external_access=false; statement error -SELECT * FROM read_csv('data/csv/test/date.csv', columns = {'d': 'DATE'}); +SELECT * FROM read_csv('{DATA_DIR}/csv/test/date.csv', columns = {'d': 'DATE'}); ---- Permission Error statement error -SELECT * FROM read_csv_auto('data/csv/test/date.csv'); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/date.csv'); ---- Permission Error statement error -COPY date_test FROM 'data/csv/test/date.csv'; +COPY date_test FROM '{DATA_DIR}/csv/test/date.csv'; ---- Permission Error statement error -COPY date_test TO '__TEST_DIR__/date.csv' +COPY date_test TO '{TEMP_DIR}/date.csv' ---- Permission Error @@ -42,6 +42,6 @@ Cannot change enable_external_access setting while database is running # sniffer also respects external access flag statement error -FROM sniff_csv('data/csv/test/date.csv'); +FROM sniff_csv('{DATA_DIR}/csv/test/date.csv'); ---- Permission Error diff --git a/test/sql/copy/csv/csv_glob_fallback.test b/test/sql/copy/csv/csv_glob_fallback.test index 144ace96c1da..3ee0f9b4456b 100644 --- a/test/sql/copy/csv/csv_glob_fallback.test +++ b/test/sql/copy/csv/csv_glob_fallback.test @@ -6,12 +6,12 @@ statement ok PRAGMA enable_verification query III -SELECT * FROM 'data/csv/[avalon]_daily-avg.csv' +SELECT * FROM '{DATA_DIR}/csv/[avalon]_daily-avg.csv' ---- 1 2 3 3 4 5 4 5 6 statement error -SELECT * FROM 'data/csv/[avxalon]_daily-avg.csv' +SELECT * FROM '{DATA_DIR}/csv/[avxalon]_daily-avg.csv' ---- diff --git a/test/sql/copy/csv/csv_hive.test b/test/sql/copy/csv/csv_hive.test index e6899a1454dc..a2c55ea82ba5 100644 --- a/test/sql/copy/csv/csv_hive.test +++ b/test/sql/copy/csv/csv_hive.test @@ -11,71 +11,71 @@ CREATE TABLE test2 AS SELECT 2 as id, 'value2' as value; # filenames could allow you to parse hive partitions manually using SQL query III -select id, value, replace(filename, '\', '/') from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', FILENAME=1) order by id +SELECT id, value, parse_path(filename)[-4:] from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', FILENAME=1) order by id ---- -1 value1 data/csv/hive-partitioning/simple/part=a/date=2012-01-01/test.csv -2 value2 data/csv/hive-partitioning/simple/part=b/date=2013-01-01/test.csv +1 value1 [simple, 'part=a', 'date=2012-01-01', test.csv] +2 value2 [simple, 'part=b', 'date=2013-01-01', test.csv] # however this is just a lot nicer query IIII -select id, value, part, date from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by id +select id, value, part, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by id ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 # As long as the names match, we don't really mind since everything is a string anyway query IIII -select id, value, part, date from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by id +select id, value, part, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by id ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 # If the key names don't add up, we throw statement error -select * from read_csv_auto('data/csv/hive-partitioning/mismatching_names/*/*/test.csv', HIVE_PARTITIONING=1) +select * from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_names/*/*/test.csv', HIVE_PARTITIONING=1) ---- Hive partition mismatch # If the key names don't add up, we throw statement error -select * from read_csv_auto('data/csv/hive-partitioning/mismatching_count/*/*/test.csv', HIVE_PARTITIONING=1) +select * from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_count/*/*/test.csv', HIVE_PARTITIONING=1) ---- Hive partition mismatch # Now we do a bunch of filtering on the partitions, to test the file skipping mechanism query IIII -select id, value, part, date from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) where part='a' +select id, value, part, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) where part='a' ---- 1 value1 a 2012-01-01 query IIII -select id, value, part, date from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) where part='b' +select id, value, part, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) where part='b' ---- 2 value2 b 2013-01-01 query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where part_cast > 0 and part_cast < 5000; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where part_cast > 0 and part_cast < 5000; ---- 1 value1 1000 2012-01-01 query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where part_cast > 5000; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where part_cast > 5000; ---- 2 value2 9000 2013-01-01 query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast > CAST('2000-01-01' as DATE) and date_cast < CAST('2012-12-12' as DATE); +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast > CAST('2000-01-01' as DATE) and date_cast < CAST('2012-12-12' as DATE); ---- 1 value1 1000 2012-01-01 query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast > CAST('2000-01-01' as DATE) order by date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast > CAST('2000-01-01' as DATE) order by date_cast; ---- 1 value1 1000 2012-01-01 2 value2 9000 2013-01-01 query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast=CAST('2012-01-01' as DATE) OR part_cast=9000 ORDER BY date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where date_cast=CAST('2012-01-01' as DATE) OR part_cast=9000 ORDER BY date_cast; ---- 1 value1 1000 2012-01-01 2 value2 9000 2013-01-01 @@ -84,68 +84,68 @@ select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cas # Filtering out 0/2 files query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=9000) ORDER BY date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=9000) ORDER BY date_cast; ---- 1 value1 1000 2012-01-01 2 value2 9000 2013-01-01 # There should not be any filter operation remaining since it can be handled completely during pushdown by pruning file list query II -EXPLAIN select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=9000) ORDER BY date_cast; +EXPLAIN select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=9000) ORDER BY date_cast; ---- physical_plan :.*FILTER.* # Query filtering out first file query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=9000) ORDER BY date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=9000) ORDER BY date_cast; ---- 2 value2 9000 2013-01-01 # Again, we should not have a filter operator here query II -explain select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=9000) ORDER BY date_cast; +explain select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=9000) ORDER BY date_cast; ---- physical_plan :.*FILTER.* # Query filtering out second file query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=1337) ORDER BY date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=1337) ORDER BY date_cast; ---- 1 value1 1000 2012-01-01 # Again, we should not have a filter operator here query II -explain select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=1337) ORDER BY date_cast; +explain select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == '2012-01-011000') OR (part_cast=1337) ORDER BY date_cast; ---- physical_plan :.*FILTER.* # Filtering out both files query IIII -select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=1337) ORDER BY date_cast; +select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=1337) ORDER BY date_cast; ---- # Again, we should not have a filter operator here query II -EXPLAIN select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('data/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=1337) ORDER BY date_cast; +EXPLAIN select id, value, CAST(part AS INT) as part_cast, CAST(date AS DATE) as date_cast from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/types/*/*/test.csv', HIVE_PARTITIONING=1) where (date_cast=CAST('2012-01-01' as DATE) AND concat(date_cast::VARCHAR, part_cast::VARCHAR) == 'foobar') OR (part_cast=1337) ORDER BY date_cast; ---- physical_plan :.*FILTER.* # projection pushdown query I -select value from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 +select value from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 ---- value1 value2 query I -select part from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 +select part from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 ---- a b # project only some columns from a hive partition query I -select date from read_csv_auto('data/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 +select date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/different_order/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 ---- 2012-01-01 2013-01-01 diff --git a/test/sql/copy/csv/csv_hive_filename_union.test b/test/sql/copy/csv/csv_hive_filename_union.test index 15c10c80fabc..f76adc54424a 100644 --- a/test/sql/copy/csv/csv_hive_filename_union.test +++ b/test/sql/copy/csv/csv_hive_filename_union.test @@ -7,49 +7,49 @@ PRAGMA enable_verification # projection pushdown query I -select filename.replace('\', '/').split('/')[-2] from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 +select filename.replace('\', '/').split('/')[-2] from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 ---- date=2012-01-01 date=2013-01-01 query III -select part, filename.replace('\', '/').split('/')[-2], value from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 +select part, filename.replace('\', '/').split('/')[-2], value from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 ---- a date=2012-01-01 value1 b date=2013-01-01 value2 query III -select part, filename.replace('\', '/').split('/')[-2], value from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 +select part, filename.replace('\', '/').split('/')[-2], value from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 ---- a date=2012-01-01 value1 b date=2013-01-01 value2 query III -select * exclude(filename) from read_csv_auto('data/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 +select * exclude(filename) from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1) order by 1 ---- 99 world 2 xxx 42 1 query III -select * from read_csv_auto('data/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, UNION_BY_NAME=1) order by 1 +select * from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, UNION_BY_NAME=1) order by 1 ---- 99 world 2 xxx 42 1 query IIII -select * exclude(filename), filename.replace('\', '/').split('/')[-2] from read_csv_auto('data/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 +select * exclude(filename), filename.replace('\', '/').split('/')[-2] from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 ---- 99 world 2 part=2 xxx 42 1 part=1 query IIII -select part, filename.replace('\', '/').split('/')[-2], a, b from read_csv_auto('data/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 +select part, filename.replace('\', '/').split('/')[-2], a, b from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=1, FILENAME=1, UNION_BY_NAME=1) order by 1 ---- 1 part=1 xxx 42 2 part=2 99 world query II -select * exclude (filename) from read_csv_auto('data/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=0, FILENAME=1, UNION_BY_NAME=1) order by 1 +select * exclude (filename) from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_types/*/*.csv', HIVE_PARTITIONING=0, FILENAME=1, UNION_BY_NAME=1) order by 1 ---- 99 world xxx 42 @@ -57,18 +57,18 @@ xxx 42 # This can either throw a cast error or a schema mismatch error depending on what is executed first because of # parallelism statement error -select * from read_csv_auto(['data/csv/hive-partitioning/mismatching_contents/part=1/test.csv', 'data/csv/hive-partitioning/mismatching_contents/part=2/test.csv']) order by 1 +select * from read_csv_auto(['{DATA_DIR}/csv/hive-partitioning/mismatching_contents/part=1/test.csv', '{DATA_DIR}/csv/hive-partitioning/mismatching_contents/part=2/test.csv']) order by 1 ---- If you are trying to read files with different schemas, try setting union_by_name=True query III -select a, b, c from read_csv_auto('data/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1) order by 2 NULLS LAST +select a, b, c from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1) order by 2 NULLS LAST ---- 42 world NULL 42 NULL 1992-01-01 query IIII -select a, b, part, c from read_csv_auto('data/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1, HIVE_PARTITIONING=1) order by 2 NULLS LAST +select a, b, part, c from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1, HIVE_PARTITIONING=1) order by 2 NULLS LAST ---- 42 world 2 NULL 42 NULL 1 1992-01-01 diff --git a/test/sql/copy/csv/csv_line_too_long.test b/test/sql/copy/csv/csv_line_too_long.test index 46788c741021..4de0c0cb2650 100644 --- a/test/sql/copy/csv/csv_line_too_long.test +++ b/test/sql/copy/csv/csv_line_too_long.test @@ -11,17 +11,17 @@ CREATE TABLE T1 (name VARCHAR); foreach header true false statement error -COPY T1(name) from 'data/csv/line_too_long.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); +COPY T1(name) from '{DATA_DIR}/csv/line_too_long.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); ---- Maximum line size of 2000000 bytes exceeded statement error -COPY T1(name) from 'data/csv/line_too_long_with_newline.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); +COPY T1(name) from '{DATA_DIR}/csv/line_too_long_with_newline.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); ---- Possible Solution: Change the maximum length size, e.g., max_line_size=2097165 statement error -COPY T1(name) from 'data/csv/multiple_line_too_long.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); +COPY T1(name) from '{DATA_DIR}/csv/multiple_line_too_long.csv.gz' (DELIMITER ',', HEADER ${header} , COMPRESSION gzip, ALLOW_QUOTED_NULLS false); ---- Possible Solution: Change the maximum length size, e.g., max_line_size=2097165 diff --git a/test/sql/copy/csv/csv_names.test b/test/sql/copy/csv/csv_names.test index 940e6047bbd8..1c51ed069a61 100644 --- a/test/sql/copy/csv/csv_names.test +++ b/test/sql/copy/csv/csv_names.test @@ -7,138 +7,138 @@ PRAGMA enable_verification # Duplicate names shuold not be accepted statement error -from read_csv('data/csv/header_bug.csv', names=['col1', 'col1']) LIMIT 1; +from read_csv('{DATA_DIR}/csv/header_bug.csv', names=['col1', 'col1']) LIMIT 1; ---- read_csv names must have unique values # Empty Names should not be accepted statement error -from read_csv('data/csv/header_bug.csv', names=['']) LIMIT 1; +from read_csv('{DATA_DIR}/csv/header_bug.csv', names=['']) LIMIT 1; ---- read_csv names cannot have empty (or all whitespace) value statement error -from read_csv('data/csv/header_bug.csv', names=[' ', ' '], header = 0); +from read_csv('{DATA_DIR}/csv/header_bug.csv', names=[' ', ' '], header = 0); ---- read_csv names cannot have empty (or all whitespace) value # no names provided query IIII -select column00, column01, column02, column03 from 'data/csv/real/lineitem_sample.csv' LIMIT 1; +select column00, column01, column02, column03 from '{DATA_DIR}/csv/real/lineitem_sample.csv' LIMIT 1; ---- 1 15519 785 1 # override the names partially query IIII -select l_orderkey, l_partkey, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey']) LIMIT 1; +select l_orderkey, l_partkey, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey']) LIMIT 1; ---- 1 15519 785 1 # empty list query IIII -select column00, column01, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=[]) LIMIT 1; +select column00, column01, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=[]) LIMIT 1; ---- 1 15519 785 1 # specify all names query IIII -select l_orderkey, l_partkey, l_commitdate, l_comment from read_csv_auto('data/csv/real/lineitem_sample.csv', column_names=['l_orderkey', 'l_partkey', 'l_suppkey', 'l_linenumber', 'l_quantity', 'l_extendedprice', 'l_discount', 'l_tax', 'l_returnflag', 'l_linestatus', 'l_shipdate', 'l_commitdate', 'l_receiptdate', 'l_shipinstruct', 'l_shipmode', 'l_comment']) LIMIT 1; +select l_orderkey, l_partkey, l_commitdate, l_comment from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', column_names=['l_orderkey', 'l_partkey', 'l_suppkey', 'l_linenumber', 'l_quantity', 'l_extendedprice', 'l_discount', 'l_tax', 'l_returnflag', 'l_linestatus', 'l_shipdate', 'l_commitdate', 'l_receiptdate', 'l_shipinstruct', 'l_shipmode', 'l_comment']) LIMIT 1; ---- 1 15519 1996-02-12 egular courts above the # specify too many names statement error -select l_orderkey, l_partkey, l_commitdate, l_comment from read_csv_auto('data/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey', 'l_suppkey', 'l_linenumber', 'l_quantity', 'l_extendedprice', 'l_discount', 'l_tax', 'l_returnflag', 'l_linestatus', 'l_shipdate', 'l_commitdate', 'l_receiptdate', 'l_shipinstruct', 'l_shipmode', 'l_comment', 'xx']) LIMIT 1; +select l_orderkey, l_partkey, l_commitdate, l_comment from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey', 'l_suppkey', 'l_linenumber', 'l_quantity', 'l_extendedprice', 'l_discount', 'l_tax', 'l_returnflag', 'l_linestatus', 'l_shipdate', 'l_commitdate', 'l_receiptdate', 'l_shipinstruct', 'l_shipmode', 'l_comment', 'xx']) LIMIT 1; ---- -Error when sniffing file "data/csv/real/lineitem_sample.csv". +Error when sniffing file "{DATA_DIR}/csv/real/lineitem_sample.csv". # specify names on a file with a header query II -select yr, Quarter from read_csv_auto('data/csv/real/ontime_sample.csv', names=['yr']) LIMIT 1; +select yr, Quarter from read_csv_auto('{DATA_DIR}/csv/real/ontime_sample.csv', names=['yr']) LIMIT 1; ---- 1988 1 # NULL statement error -select column00, column01, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=NULL) LIMIT 1; +select column00, column01, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=NULL) LIMIT 1; ---- read_csv names cannot be NULL # specify the names twice statement error -select l_orderkey, l_partkey, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey'], column_names=['l_orderkey']) LIMIT 1; +select l_orderkey, l_partkey, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_partkey'], column_names=['l_orderkey']) LIMIT 1; ---- read_csv column_names/names can only be supplied once statement error -select l_orderkey, l_partkey, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=42) LIMIT 1; +select l_orderkey, l_partkey, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=42) LIMIT 1; ---- Failed to cast value: Unimplemented type for cast (INTEGER -> VARCHAR[]) # specify options delim and sep statement error -select column00 from read_csv_auto('data/csv/real/lineitem_sample.csv', delim='|', sep='|') LIMIT 1; +select column00 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', sep='|') LIMIT 1; ---- CSV Reader function option delim and sep are aliases, only one can be supplied # duplicate names statement error -select l_orderkey, l_partkey, column02, column03 from read_csv_auto('data/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_orderkey']) LIMIT 1; +select l_orderkey, l_partkey, column02, column03 from read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['l_orderkey', 'l_orderkey']) LIMIT 1; ---- read_csv names must have unique values. "l_orderkey" is repeated. query I -select Columns FROM sniff_csv('data/csv/header.csv', names = ['a']) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header.csv', names = ['a']) ---- [{'name': a, 'type': VARCHAR}] query I -FROM read_csv('data/csv/header.csv', names = ['a']) +FROM read_csv('{DATA_DIR}/csv/header.csv', names = ['a']) ---- line2 line3 query I -select Columns FROM sniff_csv('data/csv/header.csv', names = ['a'], header = false) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header.csv', names = ['a'], header = false) ---- [{'name': a, 'type': VARCHAR}] query I -FROM read_csv('data/csv/header.csv', names = ['a'], header = false) +FROM read_csv('{DATA_DIR}/csv/header.csv', names = ['a'], header = false) ---- line1 line2 line3 query I -select Columns FROM sniff_csv('data/csv/header_2.csv', names = ['a']) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header_2.csv', names = ['a']) ---- [{'name': a, 'type': VARCHAR}, {'name': line1_2, 'type': VARCHAR}, {'name': line1_3, 'type': VARCHAR}] query III -FROM read_csv('data/csv/header_2.csv', names = ['a']) +FROM read_csv('{DATA_DIR}/csv/header_2.csv', names = ['a']) ---- line2 line2_2 line2_3 line3 line3_2 line3_3 query I -select Columns FROM sniff_csv('data/csv/header_2.csv', names = ['a'], header=False) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header_2.csv', names = ['a'], header=False) ---- [{'name': a, 'type': VARCHAR}, {'name': column1, 'type': VARCHAR}, {'name': column2, 'type': VARCHAR}] statement error -select Columns FROM sniff_csv('data/csv/header_2.csv', names = ['a','b','c','d']) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header_2.csv', names = ['a','b','c','d']) ---- -Error when sniffing file "data/csv/header_2.csv" +Error when sniffing file "{DATA_DIR}/csv/header_2.csv" query I -select Columns FROM sniff_csv('data/csv/header_2.csv', names = ['a','b','c','d'], null_padding = True) +select Columns FROM sniff_csv('{DATA_DIR}/csv/header_2.csv', names = ['a','b','c','d'], null_padding = True) ---- [{'name': a, 'type': VARCHAR}, {'name': b, 'type': VARCHAR}, {'name': c, 'type': VARCHAR}, {'name': d, 'type': VARCHAR}] query IIII -FROM read_csv('data/csv/header_2.csv', names = ['a','b','c','d'], null_padding = True) +FROM read_csv('{DATA_DIR}/csv/header_2.csv', names = ['a','b','c','d'], null_padding = True) ---- line2 line2_2 line2_3 NULL line3 line3_2 line3_3 NULL \ No newline at end of file diff --git a/test/sql/copy/csv/csv_null_byte.test b/test/sql/copy/csv/csv_null_byte.test index a81d0a1a51d6..fef05ac6044a 100644 --- a/test/sql/copy/csv/csv_null_byte.test +++ b/test/sql/copy/csv/csv_null_byte.test @@ -6,27 +6,27 @@ statement ok PRAGMA enable_verification query III -select * from 'data/csv/nullbyte.csv'; +select * from '{DATA_DIR}/csv/nullbyte.csv'; ---- val1 val\02 val3 query III -select * from read_csv('data/csv/nullbyte.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR', 'col3': 'VARCHAR'}, delim='|'); +select * from read_csv('{DATA_DIR}/csv/nullbyte.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR', 'col3': 'VARCHAR'}, delim='|'); ---- val1 val\02 val3 query II -select * from 'data/csv/nullbyte_header.csv'; +select * from '{DATA_DIR}/csv/nullbyte_header.csv'; ---- val1 val2 query II -select * from read_csv('data/csv/nullbyte_header.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR'}, delim='|', header=False); +select * from read_csv('{DATA_DIR}/csv/nullbyte_header.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR'}, delim='|', header=False); ---- col1 col\02 val1 val2 query II -select * from read_csv('data/csv/nullbyte_header.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR'}, delim='|', header=True); +select * from read_csv('{DATA_DIR}/csv/nullbyte_header.csv', columns={'col1': 'VARCHAR', 'col2': 'VARCHAR'}, delim='|', header=True); ---- val1 val2 diff --git a/test/sql/copy/csv/csv_null_padding.test b/test/sql/copy/csv/csv_null_padding.test index de3d5893436b..bca6b1f5d69e 100644 --- a/test/sql/copy/csv/csv_null_padding.test +++ b/test/sql/copy/csv/csv_null_padding.test @@ -7,7 +7,7 @@ PRAGMA enable_verification # null padding with a header query IIII -FROM read_csv_auto('data/csv/nullpadding_header.csv', null_padding=True, comment = '') +FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_header.csv', null_padding=True, comment = '') ---- one two three four 1 a alice NULL @@ -15,7 +15,7 @@ one two three four # without null padding we can only read one column query I -FROM read_csv_auto('data/csv/nullpadding_header.csv', null_padding=False, header = 0) +FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_header.csv', null_padding=False, header = 0) ---- # this file has a bunch of gunk at the top one,two,three,four @@ -23,20 +23,20 @@ one,two,three,four 2,b,bob query I -FROM read_csv_auto('data/csv/nullpadding_header.csv', null_padding=False, skip=1, header = 0, comment = '') +FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_header.csv', null_padding=False, skip=1, header = 0, comment = '') ---- one,two,three,four 1,a,alice 2,b,bob query III -FROM read_csv_auto('data/csv/nullpadding_header.csv', null_padding=False, skip=2) +FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_header.csv', null_padding=False, skip=2) ---- 1 a alice 2 b bob query I -FROM read_csv_auto('data/csv/blank_line.csv', null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/blank_line.csv', null_padding=True) ---- 1 2 diff --git a/test/sql/copy/csv/csv_nullstr_list.test b/test/sql/copy/csv/csv_nullstr_list.test index 06ef99f55206..7c043334ee9d 100644 --- a/test/sql/copy/csv/csv_nullstr_list.test +++ b/test/sql/copy/csv/csv_nullstr_list.test @@ -7,7 +7,7 @@ PRAGMA enable_verification # Test List query III -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null']); ---- Pedro 31 1.73 Mark NULL NULL @@ -15,7 +15,7 @@ Thijs 26 NULL # Test Quoted query III -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null']); ---- Pedro 31 1.73 Mark NULL NULL @@ -23,7 +23,7 @@ Thijs 26 NULL #allow_quoted_nulls = false query III -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], allow_quoted_nulls = false); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], allow_quoted_nulls = false); ---- Pedro 31 1.73 Mark null (empty) @@ -31,47 +31,47 @@ Thijs 26 none # Test nullstr = [] statement error -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = []); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = []); ---- CSV Reader function option nullstr requires a non-empty list of possible null strings (varchar) as input # Test nullstr = ['a', NULL] statement error -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['a', NULL]); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['a', NULL]); ---- CSV Reader function option nullstr does not accept NULL values as a valid nullstr option # Test nullstr = NULL statement error -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = NULL); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = NULL); ---- CSV Reader function option nullstr requires a string or a list as input # Test nullstr = [42] statement error -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = [42]); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = [42]); ---- CSV Reader function option nullstr requires a non-empty list of possible null strings (varchar) as input # Test Null Strings equal to delim quote escape statement error -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['',',','null'], allow_quoted_nulls = false); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['',',','null'], allow_quoted_nulls = false); ---- DELIMITER must not appear in the NULL specification and vice versa statement error -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='\', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','"','null'], allow_quoted_nulls = false); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='\', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','"','null'], allow_quoted_nulls = false); ---- QUOTE must not appear in the NULL specification and vice versa statement error -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='\', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','\','null'], allow_quoted_nulls = false); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='\', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','\','null'], allow_quoted_nulls = false); ---- ESCAPE must not appear in the NULL specification and vice versa # What if we have repeated values in our nullstr list? query III -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null','','none','null'], allow_quoted_nulls = false); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null','','none','null'], allow_quoted_nulls = false); ---- Pedro 31 1.73 Mark null (empty) @@ -79,14 +79,14 @@ Thijs 26 none # Test with force_not_null query III -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['height']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['height']); ---- Pedro 31 1.73 Mark NULL (empty) Thijs 26 (empty) query III -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['age','height']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['age','height']); ---- Pedro 31 1.73 Mark (empty) (empty) @@ -94,14 +94,14 @@ Thijs 26 (empty) # Test Quoted query III -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['height']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['height']); ---- Pedro 31 1.73 Mark NULL (empty) Thijs 26 (empty) query III -FROM read_csv('data/csv/null/multiple_quoted_nulls.csv', auto_detect=true, delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null'], force_not_null = ['age','height'], ALL_VARCHAR = 1); +FROM read_csv('{DATA_DIR}/csv/null/multiple_quoted_nulls.csv', auto_detect=true, delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null'], force_not_null = ['age','height'], ALL_VARCHAR = 1); ---- Pedro 31 1.73 Mark (empty) (empty) @@ -109,14 +109,14 @@ Thijs 26 (empty) # Test with projection push-down query I -select height FROM read_csv('data/csv/null/multiple_nulls.csv', delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null']); +select height FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null']); ---- 1.73 NULL NULL query I -select age FROM read_csv('data/csv/null/multiple_nulls.csv', delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null']); +select age FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', delim=',', quote='"', escape='"', skip=0, header=true, nullstr = ['','none','null']); ---- 31 NULL @@ -124,7 +124,7 @@ NULL # Test force_not_null fails for made-up column statement error -FROM read_csv('data/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['dont_exist']); +FROM read_csv('{DATA_DIR}/csv/null/multiple_nulls.csv', auto_detect=false, delim=',', quote='"', escape='"', skip=0, header=true, columns={'name': 'VARCHAR', 'age': 'VARCHAR', 'height': 'VARCHAR'}, nullstr = ['','none','null'], force_not_null = ['dont_exist']); ---- "force_not_null" expected to find dont_exist, but it was not found in the table @@ -133,20 +133,20 @@ statement ok CREATE TABLE data (a VARCHAR, b VARCHAR, c VARCHAR) statement ok -COPY data FROM 'data/csv/null/multiple_nulls.csv' (nullstr ['','none','null'], HEADER 1); +COPY data FROM '{DATA_DIR}/csv/null/multiple_nulls.csv' (nullstr ['','none','null'], HEADER 1); statement error -COPY data FROM 'data/csv/null/multiple_nulls.csv' (nullstr NULL, HEADER 1); +COPY data FROM '{DATA_DIR}/csv/null/multiple_nulls.csv' (nullstr NULL, HEADER 1); ---- NULL is not supported statement error -COPY data FROM 'data/csv/null/multiple_nulls.csv' (nullstr [NULL], HEADER 1); +COPY data FROM '{DATA_DIR}/csv/null/multiple_nulls.csv' (nullstr [NULL], HEADER 1); ---- Binder Error: CSV Reader function option nullstr requires a non-empty list of possible null strings (varchar) as input statement error -COPY data FROM 'data/csv/null/multiple_nulls.csv' (nullstr [42], HEADER 1); +COPY data FROM '{DATA_DIR}/csv/null/multiple_nulls.csv' (nullstr [42], HEADER 1); ---- Binder Error: CSV Reader function option nullstr requires a non-empty list of possible null strings (varchar) as input @@ -158,6 +158,6 @@ Mark NULL NULL Thijs 26 NULL statement error -COPY data TO '__TEST_DIR__/multiple_nulls.csv' (nullstr ['a', 'b']); +COPY data TO '{TEMP_DIR}/multiple_nulls.csv' (nullstr ['a', 'b']); ---- CSV Writer function option nullstr only accepts one nullstr value. \ No newline at end of file diff --git a/test/sql/copy/csv/csv_quoted_newline_incorrect.test b/test/sql/copy/csv/csv_quoted_newline_incorrect.test index edc70a3a7967..4b6e41b62481 100644 --- a/test/sql/copy/csv/csv_quoted_newline_incorrect.test +++ b/test/sql/copy/csv/csv_quoted_newline_incorrect.test @@ -11,5 +11,5 @@ PRAGMA verify_parallelism # CSV reader skips malformed lines statement ok -from 'data/csv/csv_quoted_newline_odd.csv'; +from '{DATA_DIR}/csv/csv_quoted_newline_odd.csv'; diff --git a/test/sql/copy/csv/csv_windows_mixed_separators.test b/test/sql/copy/csv/csv_windows_mixed_separators.test index 64218404faeb..677874f40503 100644 --- a/test/sql/copy/csv/csv_windows_mixed_separators.test +++ b/test/sql/copy/csv/csv_windows_mixed_separators.test @@ -9,18 +9,18 @@ PRAGMA enable_verification # \ and / are interchangeable on Windows query I -SELECT * FROM 'data\csv/test/date.csv' +SELECT * FROM '{DATA_DIR}\csv/test/date.csv' ---- 2019-06-05 query I -SELECT * FROM glob('data/csv\test/*.csv') t(g) WHERE g LIKE '%date.csv' +SELECT parse_path(g)[-3:] FROM glob('{DATA_DIR}/csv\test/*.csv') t(g) WHERE g LIKE '%date.csv' ---- -data\csv\test\date.csv +[csv, test, date.csv] # also for attach statement ok -ATTACH '__TEST_DIR__/windows_test.db' AS s1 +ATTACH '{TEMP_DIR}/windows_test.db' AS s1 statement ok CREATE TABLE s1.tbl AS SELECT * FROM range(10) t(i); @@ -34,7 +34,7 @@ statement ok DETACH s1 statement ok -ATTACH '__TEST_DIR__\windows_test.db' AS s1 +ATTACH '{TEMP_DIR}\windows_test.db' AS s1 query I SELECT SUM(i) FROM s1.tbl diff --git a/test/sql/copy/csv/csv_write_gz.test_slow b/test/sql/copy/csv/csv_write_gz.test_slow index f17213fe82f9..cc3b84b7418d 100644 --- a/test/sql/copy/csv/csv_write_gz.test_slow +++ b/test/sql/copy/csv/csv_write_gz.test_slow @@ -8,13 +8,13 @@ PRAGMA enable_verification foreach csv_name greek_utf8.csv imdb_movie_info_escaped.csv lineitem_sample.csv ncvoter.csv nfc_normalization.csv ontime_sample.csv voter.tsv web_page.csv statement ok -CREATE TABLE csv_data AS SELECT * FROM 'data/csv/real/${csv_name}'; +CREATE TABLE csv_data AS SELECT * FROM '{DATA_DIR}/csv/real/${csv_name}'; statement ok -COPY csv_data TO '__TEST_DIR__/${csv_name}.gz' (COMPRESSION GZIP, HEADER 0); +COPY csv_data TO '{TEMP_DIR}/${csv_name}.gz' (COMPRESSION GZIP, HEADER 0); statement ok -CREATE TABLE csv_data_gz AS SELECT * FROM '__TEST_DIR__/${csv_name}.gz'; +CREATE TABLE csv_data_gz AS SELECT * FROM '{TEMP_DIR}/${csv_name}.gz'; query I SELECT COUNT(*) FROM (SELECT * FROM csv_data EXCEPT SELECT * FROM csv_data_gz) diff --git a/test/sql/copy/csv/csv_write_zstd.test_slow b/test/sql/copy/csv/csv_write_zstd.test_slow index 96dcf002c348..7e37b6019c02 100644 --- a/test/sql/copy/csv/csv_write_zstd.test_slow +++ b/test/sql/copy/csv/csv_write_zstd.test_slow @@ -12,13 +12,13 @@ PRAGMA enable_verification foreach csv_name greek_utf8.csv imdb_movie_info_escaped.csv lineitem_sample.csv ncvoter.csv nfc_normalization.csv ontime_sample.csv voter.tsv web_page.csv statement ok -CREATE TABLE csv_data AS SELECT * FROM 'data/csv/real/${csv_name}'; +CREATE TABLE csv_data AS SELECT * FROM '{DATA_DIR}/csv/real/${csv_name}'; statement ok -COPY csv_data TO '__TEST_DIR__/${csv_name}.zst' (COMPRESSION ZSTD, HEADER 0); +COPY csv_data TO '{TEMP_DIR}/${csv_name}.zst' (COMPRESSION ZSTD, HEADER 0); statement ok -CREATE TABLE csv_data_zst AS SELECT * FROM '__TEST_DIR__/${csv_name}.zst'; +CREATE TABLE csv_data_zst AS SELECT * FROM '{TEMP_DIR}/${csv_name}.zst'; query I SELECT COUNT(*) FROM (SELECT * FROM csv_data EXCEPT SELECT * FROM csv_data_zst) diff --git a/test/sql/copy/csv/duck_fuzz/test_internal_4048.test b/test/sql/copy/csv/duck_fuzz/test_internal_4048.test index 24dad061b6a7..2e4961994f02 100644 --- a/test/sql/copy/csv/duck_fuzz/test_internal_4048.test +++ b/test/sql/copy/csv/duck_fuzz/test_internal_4048.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement error -FROM sniff_csv('data/csv/14512.csv', names := [NULL]); +FROM sniff_csv('{DATA_DIR}/csv/14512.csv', names := [NULL]); ---- Binder Error: read_csv names parameter cannot have a NULL value diff --git a/test/sql/copy/csv/empty_first_line.test b/test/sql/copy/csv/empty_first_line.test index 614e01616e40..937c1e69d39e 100644 --- a/test/sql/copy/csv/empty_first_line.test +++ b/test/sql/copy/csv/empty_first_line.test @@ -6,14 +6,14 @@ statement ok PRAGMA enable_verification query II -SELECT * FROM read_csv_auto('data/csv/empty_first_line.csv', delim=' '); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/empty_first_line.csv', delim=' '); ---- a 1 b 2 c 3 query I -SELECT * FROM read_csv_auto('data/csv/empty_first_line.csv', delim='|', auto_detect=false, columns={'column00': 'VARCHAR'}, skip = 1); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/empty_first_line.csv', delim='|', auto_detect=false, columns={'column00': 'VARCHAR'}, skip = 1); ---- a 1 b 2 diff --git a/test/sql/copy/csv/empty_string_quote.test b/test/sql/copy/csv/empty_string_quote.test index c9fe5a502383..430767b9d376 100644 --- a/test/sql/copy/csv/empty_string_quote.test +++ b/test/sql/copy/csv/empty_string_quote.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE customer(c_customer_sk INTEGER, c_customer_id VARCHAR, c_current_cdemo_sk INTEGER, c_current_hdemo_sk INTEGER, c_current_addr_sk INTEGER, c_first_shipto_date_sk INTEGER, c_first_sales_date_sk INTEGER, c_salutation VARCHAR, c_first_name VARCHAR, c_last_name VARCHAR, c_preferred_cust_flag VARCHAR, c_birth_day INTEGER, c_birth_month INTEGER, c_birth_year INTEGER, c_birth_country VARCHAR, c_login VARCHAR, c_email_address VARCHAR, c_last_review_date_sk INTEGER); statement ok -COPY customer FROM 'data/csv/customer.csv' (FORMAT 'csv', quote '"', delimiter ',', header 0); +COPY customer FROM '{DATA_DIR}/csv/customer.csv' (FORMAT 'csv', quote '"', delimiter ',', header 0); query IIIIIIIIIIIIIIIIII SELECT * FROM customer @@ -29,7 +29,7 @@ statement ok CREATE TABLE customer_quoted_nulls(c_customer_sk INTEGER, c_customer_id VARCHAR, c_current_cdemo_sk INTEGER, c_current_hdemo_sk INTEGER, c_current_addr_sk INTEGER, c_first_shipto_date_sk INTEGER, c_first_sales_date_sk INTEGER, c_salutation VARCHAR, c_first_name VARCHAR, c_last_name VARCHAR, c_preferred_cust_flag VARCHAR, c_birth_day INTEGER, c_birth_month INTEGER, c_birth_year INTEGER, c_birth_country VARCHAR, c_login VARCHAR, c_email_address VARCHAR, c_last_review_date_sk INTEGER); statement ok -insert into customer_quoted_nulls select * from read_csv_auto('data/csv/customer.csv', allow_quoted_nulls=False) +insert into customer_quoted_nulls select * from read_csv_auto('{DATA_DIR}/csv/customer.csv', allow_quoted_nulls=False) query I SELECT COUNT(c_login) FROM customer_quoted_nulls diff --git a/test/sql/copy/csv/glob/copy_csv_glob.test b/test/sql/copy/csv/glob/copy_csv_glob.test index 9a765d543fde..0350d4fc6350 100644 --- a/test/sql/copy/csv/glob/copy_csv_glob.test +++ b/test/sql/copy/csv/glob/copy_csv_glob.test @@ -10,7 +10,7 @@ CREATE TABLE dates(d DATE); # simple globbing statement ok -COPY dates FROM 'data/csv/glob/a?/*.csv' (AUTO_DETECT 1); +COPY dates FROM '{DATA_DIR}/csv/glob/a?/*.csv' (AUTO_DETECT 1); query I SELECT * FROM dates ORDER BY 1 @@ -27,7 +27,7 @@ SELECT * FROM dates ORDER BY 1 # nothing matches the glob statement error -COPY dates FROM read_csv('data/csv/glob/*/a*a.csv', auto_detect=1) +COPY dates FROM read_csv('{DATA_DIR}/csv/glob/*/a*a.csv', auto_detect=1) ---- -syntax error at or near "'data/csv/glob/*/a*a.csv'" +syntax error at or near "'{DATA_DIR}/csv/glob/*/a*a.csv'" diff --git a/test/sql/copy/csv/glob/read_csv_glob.test b/test/sql/copy/csv/glob/read_csv_glob.test index 73806d513515..b8b2db7d464d 100644 --- a/test/sql/copy/csv/glob/read_csv_glob.test +++ b/test/sql/copy/csv/glob/read_csv_glob.test @@ -6,23 +6,23 @@ statement ok PRAGMA enable_verification query IIIII -select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv('data/csv/per_thread/*.csv') limit 1 +select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv('{DATA_DIR}/csv/per_thread/*.csv') limit 1 ---- VARCHAR BOOLEAN DOUBLE DOUBLE VARCHAR query IIIII -select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv(['data/csv/per_thread/c1.csv', 'data/csv/per_thread/c2.csv']) limit 1 +select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv(['{DATA_DIR}/csv/per_thread/c1.csv', '{DATA_DIR}/csv/per_thread/c2.csv']) limit 1 ---- VARCHAR BOOLEAN DOUBLE DOUBLE VARCHAR query IIIII -select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv(['data/csv/per_thread/c2.csv', 'data/csv/per_thread/c1.csv', 'data/csv/per_thread/c3.csv']) limit 1 +select typeof(#1),typeof(#2),typeof(#3),typeof(#4),typeof(#5) FROM read_csv(['{DATA_DIR}/csv/per_thread/c2.csv', '{DATA_DIR}/csv/per_thread/c1.csv', '{DATA_DIR}/csv/per_thread/c3.csv']) limit 1 ---- VARCHAR BOOLEAN DOUBLE DOUBLE VARCHAR # simple globbing query I -SELECT * FROM read_csv('data/csv/glob/a?/*.csv') ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/a?/*.csv') ORDER BY 1 ---- 2019-06-05 2019-06-15 @@ -35,7 +35,7 @@ SELECT * FROM read_csv('data/csv/glob/a?/*.csv') ORDER BY 1 2019-08-25 query I -SELECT * FROM read_csv('data/csv/glob/a?/a*.csv') ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/a?/a*.csv') ORDER BY 1 ---- 2019-06-05 2019-06-15 @@ -46,7 +46,7 @@ SELECT * FROM read_csv('data/csv/glob/a?/a*.csv') ORDER BY 1 # list parameter query I -SELECT * FROM read_csv(['data/csv/glob/a1/a1.csv', 'data/csv/glob/a2/a2.csv']) ORDER BY 1 +SELECT * FROM read_csv(['{DATA_DIR}/csv/glob/a1/a1.csv', '{DATA_DIR}/csv/glob/a2/a2.csv']) ORDER BY 1 ---- 2019-06-05 2019-06-15 @@ -56,7 +56,7 @@ SELECT * FROM read_csv(['data/csv/glob/a1/a1.csv', 'data/csv/glob/a2/a2.csv']) O 2019-07-25 query I -SELECT * FROM read_csv_auto(['data/csv/glob/a1/a1.csv', 'data/csv/glob/a2/a2.csv']) ORDER BY 1 +SELECT * FROM read_csv_auto(['{DATA_DIR}/csv/glob/a1/a1.csv', '{DATA_DIR}/csv/glob/a2/a2.csv']) ORDER BY 1 ---- 2019-06-05 2019-06-15 @@ -67,7 +67,7 @@ SELECT * FROM read_csv_auto(['data/csv/glob/a1/a1.csv', 'data/csv/glob/a2/a2.csv # multiple globs query I -SELECT * FROM read_csv(['data/csv/glob/a?/a*.csv', 'data/csv/glob/a?/a*.csv']) ORDER BY 1 +SELECT * FROM read_csv(['{DATA_DIR}/csv/glob/a?/a*.csv', '{DATA_DIR}/csv/glob/a?/a*.csv']) ORDER BY 1 ---- 2019-06-05 2019-06-05 @@ -84,7 +84,7 @@ SELECT * FROM read_csv(['data/csv/glob/a?/a*.csv', 'data/csv/glob/a?/a*.csv']) O # more asterisks for directories query I -SELECT * FROM read_csv('data/csv/*/a?/a*.csv') ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/*/a?/a*.csv') ORDER BY 1 ---- 2019-06-05 2019-06-15 @@ -94,7 +94,7 @@ SELECT * FROM read_csv('data/csv/*/a?/a*.csv') ORDER BY 1 2019-07-25 query II -SELECT a, b LIKE '%a1.csv%' FROM read_csv('data/csv/*/a?/a*.csv', filename=1) t1(a,b) ORDER BY 1 +SELECT a, b LIKE '%a1.csv%' FROM read_csv('{DATA_DIR}/csv/*/a?/a*.csv', filename=1) t1(a,b) ORDER BY 1 ---- 2019-06-05 1 2019-06-15 1 @@ -105,13 +105,13 @@ SELECT a, b LIKE '%a1.csv%' FROM read_csv('data/csv/*/a?/a*.csv', filename=1) t1 # read-csv auto fails here because of a type mismatch: most files contain dates, but one file contains integers statement error -SELECT * FROM read_csv('data/csv/glob/*/*.csv') ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/*/*.csv') ORDER BY 1 ---- Schema mismatch between globbed files. # forcing string parsing works query I -SELECT * FROM read_csv('data/csv/glob/*/*.csv', columns=STRUCT_PACK(d := 'STRING')) ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/*/*.csv', columns=STRUCT_PACK(d := 'STRING')) ORDER BY 1 ---- 1 2 @@ -127,7 +127,7 @@ SELECT * FROM read_csv('data/csv/glob/*/*.csv', columns=STRUCT_PACK(d := 'STRING 3 query II -SELECT a, b LIKE '%a_.csv' FROM read_csv('data/csv/glob/*/*.csv', columns=STRUCT_PACK(d := 'STRING'), filename=1) t(a,b) ORDER BY 1 +SELECT a, b LIKE '%a_.csv' FROM read_csv('{DATA_DIR}/csv/glob/*/*.csv', columns=STRUCT_PACK(d := 'STRING'), filename=1) t(a,b) ORDER BY 1 ---- 1 0 2 0 @@ -144,51 +144,51 @@ SELECT a, b LIKE '%a_.csv' FROM read_csv('data/csv/glob/*/*.csv', columns=STRUCT # test glob parsing query I -SELECT COUNT(*) FROM glob('data/csv/glob/*/*.csv') +SELECT COUNT(*) FROM glob('{DATA_DIR}/csv/glob/*/*.csv') ---- 5 query I -SELECT COUNT(*) FROM glob(['data/csv/glob/*/*.csv']) +SELECT COUNT(*) FROM glob(['{DATA_DIR}/csv/glob/*/*.csv']) ---- 5 query I -SELECT COUNT(*) FROM glob(['data/csv/glob/*/*.csv', 'data/csv/glob/*/*.csv']) +SELECT COUNT(*) FROM glob(['{DATA_DIR}/csv/glob/*/*.csv', '{DATA_DIR}/csv/glob/*/*.csv']) ---- 10 # we can also use windows file slashes query I -SELECT COUNT(*) FROM glob('data\csv\glob\*\*.csv') +SELECT COUNT(*) FROM glob('{DATA_DIR}\csv\glob\*\*.csv') ---- 5 # consecutive slashes are ignored query I -SELECT COUNT(*) FROM glob('data//csv///glob///*//////*.csv') +SELECT COUNT(*) FROM glob('{DATA_DIR}//csv///glob///*//////*.csv') ---- 5 # nothing matches the glob statement error -SELECT * FROM read_csv('data/csv/glob/*/a*a.csv') ORDER BY 1 +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/*/a*a.csv') ORDER BY 1 ---- -No files found that match the pattern "data/csv/glob/*/a*a.csv" +No files found that match the pattern "{DATA_DIR}/csv/glob/*/a*a.csv" statement error -SELECT * FROM read_csv(['data/csv/glob/*/a*a.csv']) ORDER BY 1 +SELECT * FROM read_csv(['{DATA_DIR}/csv/glob/*/a*a.csv']) ORDER BY 1 ---- -No files found that match the pattern "data/csv/glob/*/a*a.csv" +No files found that match the pattern "{DATA_DIR}/csv/glob/*/a*a.csv" statement error -SELECT * FROM read_csv_auto(['data/csv/glob/*/a*a.csv']) ORDER BY 1 +SELECT * FROM read_csv_auto(['{DATA_DIR}/csv/glob/*/a*a.csv']) ORDER BY 1 ---- -No files found that match the pattern "data/csv/glob/*/a*a.csv" +No files found that match the pattern "{DATA_DIR}/csv/glob/*/a*a.csv" query I -SELECT COUNT(*) FROM glob('data/csv/glob/*/a*a.csv') +SELECT COUNT(*) FROM glob('{DATA_DIR}/csv/glob/*/a*a.csv') ---- 0 @@ -206,7 +206,7 @@ require skip_reload # file_search_path with one path statement ok -set file_search_path='data/csv/glob'; +set file_search_path='{DATA_DIR}/csv/glob'; query I SELECT COUNT(*) FROM glob('*/*.csv'); @@ -215,7 +215,7 @@ SELECT COUNT(*) FROM glob('*/*.csv'); # file_search_path with multiple paths statement ok -set file_search_path='data/csv/glob/a1,data/csv/glob/a2'; +set file_search_path='{DATA_DIR}/csv/glob/a1,data/csv/glob/a2'; query I SELECT COUNT(*) FROM glob('*.csv'); @@ -224,7 +224,7 @@ SELECT COUNT(*) FROM glob('*.csv'); # file_search_path with a non-existent path statement ok -set file_search_path='data/csv/glob,garbage'; +set file_search_path='{DATA_DIR}/csv/glob,garbage'; query I SELECT COUNT(*) FROM glob('*/*.csv'); @@ -233,7 +233,7 @@ SELECT COUNT(*) FROM glob('*/*.csv'); # Only file_search_path is searched query I -SELECT COUNT(*) FROM glob('data/csv/glob/*/*.csv'); +SELECT COUNT(*) FROM glob('csv/glob/*/*.csv'); ---- 0 @@ -242,7 +242,7 @@ statement ok set file_search_path=''; query I -SELECT COUNT(*) FROM glob('data/csv/glob/*/*.csv'); +SELECT COUNT(*) FROM glob('{DATA_DIR}/csv/glob/*/*.csv'); ---- 5 @@ -282,6 +282,6 @@ statement ok SET threads=1; statement error -FROM read_csv('data/csv/glob/*/*.csv'); +FROM read_csv('{DATA_DIR}/csv/glob/*/*.csv'); ---- -Schema mismatch between globbed files. \ No newline at end of file +Schema mismatch between globbed files. diff --git a/test/sql/copy/csv/glob/read_csv_glob_crawl_partitioned.test_slow b/test/sql/copy/csv/glob/read_csv_glob_crawl_partitioned.test_slow index 9da0c0c8095e..5418e754e865 100644 --- a/test/sql/copy/csv/glob/read_csv_glob_crawl_partitioned.test_slow +++ b/test/sql/copy/csv/glob/read_csv_glob_crawl_partitioned.test_slow @@ -2,7 +2,7 @@ # description: Test glob ** # group: [glob] -# files from: 'data/csv/glob/crawl/' +# files from: '{DATA_DIR}/csv/glob/crawl/' statement ok PRAGMA enable_verification @@ -10,7 +10,7 @@ PRAGMA enable_verification # simple crawling # example from: https://stackoverflow.com/a/66744400 query II -SELECT * FROM read_csv('data/csv/glob/crawl/stackoverflow/**/*.csv', auto_detect=1) ORDER BY 2; +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/crawl/stackoverflow/**/*.csv', auto_detect=1) ORDER BY 2; ---- 0 0 1 1 @@ -21,7 +21,7 @@ SELECT * FROM read_csv('data/csv/glob/crawl/stackoverflow/**/*.csv', auto_detect # test with ** as the last entry query II -SELECT * FROM read_csv('data/csv/glob/crawl/stackoverflow/**', auto_detect=1) ORDER BY 2; +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/crawl/stackoverflow/**', auto_detect=1) ORDER BY 2; ---- 0 0 1 1 @@ -32,7 +32,7 @@ SELECT * FROM read_csv('data/csv/glob/crawl/stackoverflow/**', auto_detect=1) OR # test with nested same name dirs query I -SELECT * FROM read_csv('data/csv/glob/crawl/samename/**/*.csv', auto_detect=1); +SELECT * FROM read_csv('{DATA_DIR}/csv/glob/crawl/samename/**/*.csv', auto_detect=1); ---- 42 42 @@ -45,7 +45,7 @@ SELECT * FROM read_csv('data/csv/glob/crawl/samename/**/*.csv', auto_detect=1); # test with nested same name dirs, but with ** as last entry query I -SELECT sum(column0) FROM read_csv('data/csv/glob/crawl/samename/**', auto_detect=1); +SELECT sum(column0) FROM read_csv('{DATA_DIR}/csv/glob/crawl/samename/**', auto_detect=1); ---- 336 @@ -55,118 +55,118 @@ SELECT sum(column0) FROM read_csv('data/csv/glob/crawl/samename/**', auto_detect # touch file.csv {d00,d01,d02}/file.csv && touch {d00,d01,d02}/{d10,d11,d12}/file.csv && touch {d00,d01,d02}/{d10,d11,d12}/{d20,d21,d22}/file.csv && touch {d00,d01,d02}/{d10,d11,d12}/{d20,d21,d22}/mid/file.csv && touch {d00,d01,d02}/{d10,d11,d12}/{d20,d21,d22}/mid/{d40,d41,d42}/file.csv query I -SELECT count(*) FROM read_csv('data/csv/glob/crawl/d/**/*.csv', auto_detect=1); +SELECT count(*) FROM read_csv('{DATA_DIR}/csv/glob/crawl/d/**/*.csv', auto_detect=1); ---- 148 query I -SELECT count(*) FROM glob('data/csv/glob/crawl/d/**'); +SELECT count(*) FROM glob('{DATA_DIR}/csv/glob/crawl/d/**'); ---- 148 query I -SELECT sum(column0) FROM read_csv('data/csv/glob/crawl/d/**', auto_detect=1); +SELECT sum(column0) FROM read_csv('{DATA_DIR}/csv/glob/crawl/d/**', auto_detect=1); ---- 6216 query I -SELECT count(*) FROM glob('data/csv/glob/crawl/d/**/'); +SELECT count(*) FROM glob('{DATA_DIR}/csv/glob/crawl/d/**/'); ---- 148 query I -SELECT count(*) FROM glob('data/csv/glob/crawl/d/**/mid/*.csv'); +SELECT count(*) FROM glob('{DATA_DIR}/csv/glob/crawl/d/**/mid/*.csv'); ---- 27 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/mid/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/mid/*.csv'; ---- 27 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/mid/*/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/mid/*/*.csv'; ---- 81 query I -SELECT count(*) FROM glob('data/csv/glob/crawl/d/**/mid/*/'); +SELECT count(*) FROM glob('{DATA_DIR}/csv/glob/crawl/d/**/mid/*/'); ---- 81 statement error -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/mid/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/mid/**/*.csv'; ---- Cannot use multiple '**' in one path query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/???/*/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/???/*/*.csv'; ---- 144 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/???/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/???/**/*.csv'; ---- 144 statement error -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/mid/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/mid/**/*.csv'; ---- No files found that match the pattern query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/*/*/mid/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/*/*/mid/**/*.csv'; ---- 108 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/???/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/???/*.csv'; ---- 147 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/???/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/???/*.csv'; ---- 9 statement error -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/*/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/*/**/*.csv'; ---- Cannot use multiple '**' in one path query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/d2?/*/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/d2?/*/*.csv'; ---- 27 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/*/d2?/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/*/d2?/**/*.csv'; ---- 135 query I -SELECT sum(column0) FROM read_csv('data/csv/glob/crawl/d/*/*/d2?/**', auto_detect=1); +SELECT sum(column0) FROM read_csv('{DATA_DIR}/csv/glob/crawl/d/*/*/d2?/**', auto_detect=1); ---- 5670 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/d?0/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/d?0/*.csv'; ---- 40 query I -SELECT count(*) FROM 'data/csv/glob/crawl/d/*/**/d?0/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/*/**/d?0/*.csv'; ---- 39 statement error -SELECT count(*) FROM 'data/csv/glob/crawl/d/**/**/**/**/*.csv'; +SELECT count(*) FROM '{DATA_DIR}/csv/glob/crawl/d/**/**/**/**/*.csv'; ---- Cannot use multiple '**' in one path # Test with hidden files. By default python does not return hidden files, but duckdb does. query II -FROM read_csv_auto('data/csv/glob/crawl/hidden/**'); +FROM read_csv_auto('{DATA_DIR}/csv/glob/crawl/hidden/**'); ---- 42 42 42 42 @@ -177,64 +177,64 @@ statement ok CREATE TABLE t0 AS SELECT (i%2) AS c_2, (i%3) AS c_3, (i*i) AS c_pow FROM RANGE(0,10) tbl(i); statement ok -COPY t0 TO '__TEST_DIR__/partitioned0' (PARTITION_BY(c_2,c_3)); +COPY t0 TO '{TEMP_DIR}/partitioned0' (PARTITION_BY(c_2,c_3)); query I -from glob('__TEST_DIR__/partitioned0/*'); +from glob('{TEMP_DIR}/partitioned0/*'); ---- query I -select count(*) from glob('__TEST_DIR__/partitioned0/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*'); ---- 0 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/*'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/**') +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/**') ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/*/**') +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/*/**') ---- 0 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**/*'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/**'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*/**'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/data_0.csv'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**/data_0.csv'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/*/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/**/*/data_0.csv') ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/c_3=0/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/**/c_3=0/data_0.csv') ---- 2 query I -select count(*) from glob('__TEST_DIR__/partitioned0/c_2=0/**/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/c_2=0/**/data_0.csv') ---- 3 @@ -242,65 +242,65 @@ select count(*) from glob('__TEST_DIR__/partitioned0/c_2=0/**/data_0.csv') # put a file with a different name in the partitioned0 directory statement ok -COPY t0 TO '__TEST_DIR__/partitioned0/data_1.csv'; +COPY t0 TO '{TEMP_DIR}/partitioned0/data_1.csv'; query I -select count(*) from glob('__TEST_DIR__/partitioned0/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*'); ---- 1 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*'); ---- 1 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/*'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/**') +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/**') ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/*/*/**') +select count(*) from glob('{TEMP_DIR}/partitioned0/*/*/*/**') ---- 0 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**'); ---- 7 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/*'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**/*'); ---- 7 query I -select count(*) from glob('__TEST_DIR__/partitioned0/*/**'); +select count(*) from glob('{TEMP_DIR}/partitioned0/*/**'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/data_0.csv'); +select count(*) from glob('{TEMP_DIR}/partitioned0/**/data_0.csv'); ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/*/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/**/*/data_0.csv') ---- 6 query I -select count(*) from glob('__TEST_DIR__/partitioned0/**/c_3=0/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/**/c_3=0/data_0.csv') ---- 2 query I -select count(*) from glob('__TEST_DIR__/partitioned0/c_2=0/**/data_0.csv') +select count(*) from glob('{TEMP_DIR}/partitioned0/c_2=0/**/data_0.csv') ---- 3 @@ -308,12 +308,12 @@ require notwindows # symlink test: symlinks will not be searched query I -FROM read_csv_auto('data/csv/glob/crawl/.symbolic_link/**'); +FROM read_csv_auto('{DATA_DIR}/csv/glob/crawl/.symbolic_link/**'); ---- -42 statement error -SELECT sum(column0) FROM read_csv('data/csv/glob/crawl/d/**/', auto_detect=1); +SELECT sum(column0) FROM read_csv('{DATA_DIR}/csv/glob/crawl/d/**/', auto_detect=1); ---- Could not read from file diff --git a/test/sql/copy/csv/glob/test_unmatch_globs.test b/test/sql/copy/csv/glob/test_unmatch_globs.test index d6ade5557fb6..433f4939b381 100644 --- a/test/sql/copy/csv/glob/test_unmatch_globs.test +++ b/test/sql/copy/csv/glob/test_unmatch_globs.test @@ -6,26 +6,26 @@ statement ok PRAGMA enable_verification query III -FROM 'data/csv/glob_dif_dialect/14166/__200*.csv'; +FROM '{DATA_DIR}/csv/glob_dif_dialect/14166/__200*.csv'; ---- 2000-01-01 10 80.9189441112103 2000-01-02 5 109.16581782022259 query III -FROM read_csv(['data/csv/glob_dif_dialect/14166/__2000.csv', 'data/csv/glob_dif_dialect/14166/__2001.csv', 'data/csv/glob_dif_dialect/14166/empty.csv']); +FROM read_csv(['{DATA_DIR}/csv/glob_dif_dialect/14166/__2000.csv', '{DATA_DIR}/csv/glob_dif_dialect/14166/__2001.csv', '{DATA_DIR}/csv/glob_dif_dialect/14166/empty.csv']); ---- 2000-01-01 10 80.9189441112103 2000-01-02 5 109.16581782022259 query III -FROM read_csv(['data/csv/glob_dif_dialect/14166/__2000.csv','data/csv/glob_dif_dialect/14166/matching_types.csv']); +FROM read_csv(['{DATA_DIR}/csv/glob_dif_dialect/14166/__2000.csv','{DATA_DIR}/csv/glob_dif_dialect/14166/matching_types.csv']); ---- 2000-01-01 10 80.9189441112103 2000-01-02 5 109.16581782022259 # Globbing with different dialects query III -FROM 'data/csv/glob_dif_dialect/f_*.csv' order by all +FROM '{DATA_DIR}/csv/glob_dif_dialect/f_*.csv' order by all ---- 1 alice alice@email.com 1 alice alice@email.com diff --git a/test/sql/copy/csv/inconsistent_cells_error.test b/test/sql/copy/csv/inconsistent_cells_error.test index e9a0c7f3054f..10f3ff1b5660 100644 --- a/test/sql/copy/csv/inconsistent_cells_error.test +++ b/test/sql/copy/csv/inconsistent_cells_error.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIII -from 'data/csv/inconsistent_cells.csv' +from '{DATA_DIR}/csv/inconsistent_cells.csv' ---- 1 2 3 4 5 1 2 3 4 5 diff --git a/test/sql/copy/csv/integer_exponent.test b/test/sql/copy/csv/integer_exponent.test index 711ac18560f8..e627b499cc89 100644 --- a/test/sql/copy/csv/integer_exponent.test +++ b/test/sql/copy/csv/integer_exponent.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query III -from 'data/csv/integer_exponent.csv' +from '{DATA_DIR}/csv/integer_exponent.csv' ---- 1 2 3.0 4 5 6.0 diff --git a/test/sql/copy/csv/issue_6690.test b/test/sql/copy/csv/issue_6690.test index 82b981f0cea0..ea8e05e207de 100644 --- a/test/sql/copy/csv/issue_6690.test +++ b/test/sql/copy/csv/issue_6690.test @@ -6,10 +6,10 @@ statement ok PRAGMA enable_verification statement ok -FROM read_csv_auto('data/csv/comma_decimal_null.csv',SEP=',',SAMPLE_SIZE=-1,decimal_separator=',') +FROM read_csv_auto('{DATA_DIR}/csv/comma_decimal_null.csv',SEP=',',SAMPLE_SIZE=-1,decimal_separator=',') query I -SELECT FINANZ_STATO_FSC FROM read_csv_auto('data/csv/comma_decimal_null.csv',SEP=',',SAMPLE_SIZE=-1,decimal_separator=',') +SELECT FINANZ_STATO_FSC FROM read_csv_auto('{DATA_DIR}/csv/comma_decimal_null.csv',SEP=',',SAMPLE_SIZE=-1,decimal_separator=',') ---- 80000000.0 1400000.0 diff --git a/test/sql/copy/csv/issue_6764.test b/test/sql/copy/csv/issue_6764.test index 3e256586e292..a351c1837642 100644 --- a/test/sql/copy/csv/issue_6764.test +++ b/test/sql/copy/csv/issue_6764.test @@ -6,38 +6,38 @@ statement ok PRAGMA enable_verification query III -FROM read_csv_auto('data/csv/issue6764.csv', all_varchar=true, skip=1, null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', all_varchar=true, skip=1, null_padding=True) ---- 1 a alice 2 b bob query III -FROM read_csv_auto('data/csv/issue6764.csv', all_varchar=true, header=false, skip=1, null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', all_varchar=true, header=false, skip=1, null_padding=True) ---- 1 a alice 2 b bob query IIII -FROM read_csv_auto('data/csv/issue6764.csv', null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', null_padding=True) ---- 1 a alice NULL 2 b bob NULL query IIII -FROM read_csv_auto('data/csv/issue6764.csv', all_varchar=true, null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', all_varchar=true, null_padding=True) ---- 1 a alice NULL 2 b bob NULL query IIII -FROM read_csv_auto('data/csv/issue6764.csv', header=false, null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', header=false, null_padding=True) ---- one two three four 1 a alice NULL 2 b bob NULL query IIII -FROM read_csv_auto('data/csv/issue6764.csv', all_varchar=true, header=false, skip=0, sep=',', null_padding=True) +FROM read_csv_auto('{DATA_DIR}/csv/issue6764.csv', all_varchar=true, header=false, skip=0, sep=',', null_padding=True) ---- one two three four 1 a alice NULL diff --git a/test/sql/copy/csv/leading_zeros_autodetect.test b/test/sql/copy/csv/leading_zeros_autodetect.test index 446bf8d1562d..555160682e18 100644 --- a/test/sql/copy/csv/leading_zeros_autodetect.test +++ b/test/sql/copy/csv/leading_zeros_autodetect.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE leading_zeros AS SELECT * FROM read_csv_auto('data/csv/leading_zeros.csv') +CREATE TABLE leading_zeros AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/leading_zeros.csv') query I SELECT CODGEO FROM leading_zeros LIMIT 1; @@ -19,7 +19,7 @@ SELECT typeof(CODGEO) FROM leading_zeros LIMIT 1; VARCHAR statement ok -CREATE TABLE leading_zeros2 AS SELECT * FROM read_csv_auto('data/csv/leading_zeros2.csv') +CREATE TABLE leading_zeros2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/leading_zeros2.csv') query IIIIIII SELECT * FROM leading_zeros2; diff --git a/test/sql/copy/csv/maximum_line_size.test_slow b/test/sql/copy/csv/maximum_line_size.test_slow index f0b9adab98a8..ae1512f0ec6a 100644 --- a/test/sql/copy/csv/maximum_line_size.test_slow +++ b/test/sql/copy/csv/maximum_line_size.test_slow @@ -10,33 +10,33 @@ CREATE TABLE test (a INTEGER, b VARCHAR, c INTEGER); # Linesize exceeds maximum_line_size statement error -insert into test select * from read_csv('data/csv/test/test_long_line.csv', columns={'a': 'INTEGER', 'b': 'VARCHAR', 'c': 'INTEGER'}, maximum_line_size=0); +insert into test select * from read_csv('{DATA_DIR}/csv/test/test_long_line.csv', columns={'a': 'INTEGER', 'b': 'VARCHAR', 'c': 'INTEGER'}, maximum_line_size=0); ---- Possible Solution: Change the maximum length size, e.g., max_line_size=10009 # Single line too long # "a".repeat(2 * 1024 * 1024 + 10); statement error -select * from read_csv_auto('data/csv/issue_8320_1.csv.gz'); +select * from read_csv_auto('{DATA_DIR}/csv/issue_8320_1.csv.gz'); ---- Maximum line size of 2000000 bytes exceeded # Single line too long, but with actual newline at the end # "a".repeat(2 * 1024 * 1024 + 10) + "\n"; statement error -select * from read_csv_auto('data/csv/issue_8320_2.csv.gz'); +select * from read_csv_auto('{DATA_DIR}/csv/issue_8320_2.csv.gz'); ---- Possible Solution: Change the maximum length size, e.g., max_line_size=2097165 # Multiple lines too long # String value = "a".repeat(2 * 1024 * 1024 + 10) + "\n"; String data = value + value + value + value; statement error -select * from read_csv_auto('data/csv/issue_8320_3.csv.gz'); +select * from read_csv_auto('{DATA_DIR}/csv/issue_8320_3.csv.gz'); ---- Possible Solution: Change the maximum length size, e.g., max_line_size=2097165 # Add a test to verify we throw if max line size below a buffer size statement error -select * from read_csv_auto('data/csv/issue_8320_3.csv.gz', max_line_size = 2097152, buffer_size = 10); +select * from read_csv_auto('{DATA_DIR}/csv/issue_8320_3.csv.gz', max_line_size = 2097152, buffer_size = 10); ---- Buffer Size of 10 must be a higher value than the maximum line size 2097152 \ No newline at end of file diff --git a/test/sql/copy/csv/multidelimiter/test_2_byte_delimiter.test b/test/sql/copy/csv/multidelimiter/test_2_byte_delimiter.test index 3a0ef461f270..d82c91253cc9 100644 --- a/test/sql/copy/csv/multidelimiter/test_2_byte_delimiter.test +++ b/test/sql/copy/csv/multidelimiter/test_2_byte_delimiter.test @@ -7,7 +7,7 @@ PRAGMA enable_verification query II -FROM read_csv('data/csv/multidelimiter/aa_delim_small.csv', delim = 'aa', header = False, buffer_size = 8) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aa_delim_small.csv', delim = 'aa', header = False, buffer_size = 8) ---- 1 2 1 a2 @@ -17,7 +17,7 @@ FROM read_csv('data/csv/multidelimiter/aa_delim_small.csv', delim = 'aa', header loop buffer_size 9 13 query II -FROM read_csv('data/csv/multidelimiter/ab_delim.csv', delim = 'ab', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/ab_delim.csv', delim = 'ab', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -40,7 +40,7 @@ FROM read_csv('data/csv/multidelimiter/ab_delim.csv', delim = 'ab', header = Fal 1a b2 query II -FROM read_csv('data/csv/multidelimiter/aa_delim.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aa_delim.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -67,7 +67,7 @@ endloop loop buffer_size 13 17 query II -FROM read_csv('data/csv/multidelimiter/aa_delim_quoted.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aa_delim_quoted.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -90,7 +90,7 @@ FROM read_csv('data/csv/multidelimiter/aa_delim_quoted.csv', delim = 'aa', heade 1a a2 query II -FROM read_csv('data/csv/multidelimiter/aa_delim_quoted_2.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aa_delim_quoted_2.csv', delim = 'aa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -116,7 +116,7 @@ endloop query IIII -FROM read_csv('data/csv/multidelimiter/many_bytes.csv', delim = '\|', header = False) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/many_bytes.csv', delim = '\|', header = False) ---- thisisaverysuberverylargestring thisisaverysuberverylargestring thisisaverysuberverylargestring NULL thisisaverysuberverylargestring thisisaverysuberverylargestring thisisaverysuberverylargestring NULL diff --git a/test/sql/copy/csv/multidelimiter/test_3_4_byte_delimiter.test b/test/sql/copy/csv/multidelimiter/test_3_4_byte_delimiter.test index c980a871f8c2..fb62a23fece1 100644 --- a/test/sql/copy/csv/multidelimiter/test_3_4_byte_delimiter.test +++ b/test/sql/copy/csv/multidelimiter/test_3_4_byte_delimiter.test @@ -9,7 +9,7 @@ PRAGMA enable_verification loop buffer_size 10 15 query II -FROM read_csv('data/csv/multidelimiter/aaa_delim.csv', delim = 'aaa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaa_delim.csv', delim = 'aaa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -32,7 +32,7 @@ FROM read_csv('data/csv/multidelimiter/aaa_delim.csv', delim = 'aaa', header = F 1ab 2 query II -FROM read_csv('data/csv/multidelimiter/aaaa_delim.csv', delim = 'aaaa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaaa_delim.csv', delim = 'aaaa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 @@ -55,7 +55,7 @@ FROM read_csv('data/csv/multidelimiter/aaaa_delim.csv', delim = 'aaaa', header = 1ab 2 query II -FROM read_csv('data/csv/multidelimiter/aaaa_delim_rn.csv', delim = 'aaaa', header = False, buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaaa_delim_rn.csv', delim = 'aaaa', header = False, buffer_size = ${buffer_size}) ---- 1 2 1 2 diff --git a/test/sql/copy/csv/multidelimiter/test_abac.test b/test/sql/copy/csv/multidelimiter/test_abac.test index a452211be823..e34f3fe85ef7 100644 --- a/test/sql/copy/csv/multidelimiter/test_abac.test +++ b/test/sql/copy/csv/multidelimiter/test_abac.test @@ -3,12 +3,12 @@ # group: [multidelimiter] query II -FROM read_csv('data/csv/multidelimiter/aaab_delim.csv', delim = 'AAAB') +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaab_delim.csv', delim = 'AAAB') ---- A C query II -FROM read_csv('data/csv/multidelimiter/aab_delim.csv', delim = 'AAB') +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aab_delim.csv', delim = 'AAB') ---- A C @@ -19,7 +19,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR, c VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (DELIMITER 'ABAC', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (DELIMITER 'ABAC', AUTO_DETECT FALSE); ---- 1 @@ -33,7 +33,7 @@ statement ok DELETE FROM abac_tbl; # query I -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (DELIMITER 'ABAC', QUOTE 'ABABABABABAB', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (DELIMITER 'ABAC', QUOTE 'ABABABABABAB', AUTO_DETECT FALSE); # ---- # 1 # @@ -57,7 +57,7 @@ DELETE FROM abac_tbl; # CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR, c VARCHAR); # # query I -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac_mix.csv' (DELIMITER 'ABAD', QUOTE 'ABAB', ESCAPE 'ABAC', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac_mix.csv' (DELIMITER 'ABAD', QUOTE 'ABAB', ESCAPE 'ABAC', AUTO_DETECT FALSE); # ---- # 1 # @@ -75,7 +75,7 @@ DELETE FROM abac_tbl; # CREATE TABLE abac_tbl (a VARCHAR); # # query I -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac_incomplete_quote.csv' (QUOTE 'ABABABABAB', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac_incomplete_quote.csv' (QUOTE 'ABABABABAB', AUTO_DETECT FALSE); # ---- # 1 # @@ -94,7 +94,7 @@ DELETE FROM abac_tbl; # CREATE TABLE abac_tbl (a VARCHAR); # # query I -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac_newline_in_quote.csv' (QUOTE 'ABABABABAB', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac_newline_in_quote.csv' (QUOTE 'ABABABABAB', AUTO_DETECT FALSE); # ---- # 2 # @@ -112,7 +112,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/simple_unterminated_quote.csv' (QUOTE '"', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/simple_unterminated_quote.csv' (QUOTE '"', AUTO_DETECT FALSE, strict_mode TRUE); ---- Line: 1 @@ -125,7 +125,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/file_ends_in_quoted_value.csv' (QUOTE '"', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/file_ends_in_quoted_value.csv' (QUOTE '"', AUTO_DETECT FALSE); ---- 1 @@ -143,7 +143,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/file_ends_in_quoted_value.csv' (QUOTE '"', DELIMITER 'AAAB', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/file_ends_in_quoted_value.csv' (QUOTE '"', DELIMITER 'AAAB', AUTO_DETECT FALSE); ---- 1 @@ -161,7 +161,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_quote_with_escape.csv' (QUOTE '"', ESCAPE '|', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_quote_with_escape.csv' (QUOTE '"', ESCAPE '|', AUTO_DETECT FALSE, strict_mode TRUE); ---- Value with unterminated quote found. @@ -173,7 +173,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_quote_escape.csv' (QUOTE '"', ESCAPE '"', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_quote_escape.csv' (QUOTE '"', ESCAPE '"', AUTO_DETECT FALSE, strict_mode TRUE); ---- Value with unterminated quote found. @@ -185,7 +185,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_escape.csv' (QUOTE '"', ESCAPE '''', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_escape.csv' (QUOTE '"', ESCAPE '''', AUTO_DETECT FALSE, strict_mode TRUE); ---- 0 @@ -197,7 +197,7 @@ DROP TABLE abac_tbl # CREATE TABLE abac_tbl (a VARCHAR); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/complex_unterminated_quote.csv' (QUOTE 'ABABAC', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/complex_unterminated_quote.csv' (QUOTE 'ABABAC', AUTO_DETECT FALSE); # ---- # 0 # @@ -209,12 +209,12 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/incomplete_multibyte_delimiter.csv' (DELIMITER 'ABAC', AUTO_DETECT FALSE, quote '"', strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/incomplete_multibyte_delimiter.csv' (DELIMITER 'ABAC', AUTO_DETECT FALSE, quote '"', strict_mode TRUE); ---- Value with unterminated quote found. query I -COPY abac_tbl FROM 'data/csv/multidelimiter/incomplete_multibyte_delimiter.csv' (DELIMITER 'AB', AUTO_DETECT FALSE , quote '"'); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/incomplete_multibyte_delimiter.csv' (DELIMITER 'AB', AUTO_DETECT FALSE , quote '"'); ---- 1 @@ -226,7 +226,7 @@ DROP TABLE abac_tbl # CREATE TABLE abac_tbl (a VARCHAR); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_quote_with_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAB', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_quote_with_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAB', AUTO_DETECT FALSE); # ---- # line 1 # @@ -238,7 +238,7 @@ DROP TABLE abac_tbl # CREATE TABLE abac_tbl (a VARCHAR); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_quote_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAC', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_quote_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAC', AUTO_DETECT FALSE); # ---- # line 1 # @@ -250,7 +250,7 @@ DROP TABLE abac_tbl # CREATE TABLE abac_tbl (a VARCHAR); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAB', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_escape_complex.csv' (QUOTE 'ABAC', ESCAPE 'ABAB', AUTO_DETECT FALSE); # ---- # line 1 # @@ -262,19 +262,19 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (QUOTE 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (QUOTE 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (ESCAPE 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (ESCAPE 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (DELIMITER 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (DELIMITER 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', AUTO_DETECT FALSE); ---- The delimiter option cannot exceed a size of 4 bytes. # query I -# COPY abac_tbl FROM 'data/csv/multidelimiter/abac.csv' (QUOTE 'BLABLABLA', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/abac.csv' (QUOTE 'BLABLABLA', AUTO_DETECT FALSE); # ---- # 1 @@ -286,7 +286,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/carriage_feed_newline.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/carriage_feed_newline.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); ---- 2 @@ -304,7 +304,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/windows_newline.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/windows_newline.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); ---- 2 @@ -322,7 +322,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/unterminated_quote_multi_line.csv' (DELIMITER 'BA', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unterminated_quote_multi_line.csv' (DELIMITER 'BA', AUTO_DETECT FALSE, strict_mode TRUE); ---- Value with unterminated quote found. @@ -334,7 +334,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR); statement error -COPY abac_tbl FROM 'data/csv/multidelimiter/unquote_without_delimiter.csv' (DELIMITER 'BA', AUTO_DETECT FALSE, strict_mode TRUE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/unquote_without_delimiter.csv' (DELIMITER 'BA', AUTO_DETECT FALSE, strict_mode TRUE); ---- Value with unterminated quote found. @@ -346,7 +346,7 @@ DROP TABLE abac_tbl # CREATE TABLE abac_tbl (a VARCHAR, b VARCHAR); # # statement error -# COPY abac_tbl FROM 'data/csv/multidelimiter/escape_non_quote_escape_complex.csv' (DELIMITER 'BA', ESCAPE 'XX', AUTO_DETECT FALSE); +# COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/escape_non_quote_escape_complex.csv' (DELIMITER 'BA', ESCAPE 'XX', AUTO_DETECT FALSE); # # statement ok # DROP TABLE abac_tbl @@ -356,7 +356,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/trailing_delimiter_complex.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/trailing_delimiter_complex.csv' (DELIMITER 'BA', AUTO_DETECT FALSE); ---- 1 @@ -373,7 +373,7 @@ statement ok CREATE TABLE abac_tbl (a VARCHAR); query I -COPY abac_tbl FROM 'data/csv/multidelimiter/trailing_delimiter.csv' (DELIMITER '|', AUTO_DETECT FALSE); +COPY abac_tbl FROM '{DATA_DIR}/csv/multidelimiter/trailing_delimiter.csv' (DELIMITER '|', AUTO_DETECT FALSE); ---- 1 diff --git a/test/sql/copy/csv/multidelimiter/test_options_inconsistencies.test b/test/sql/copy/csv/multidelimiter/test_options_inconsistencies.test index 20ddececc7c8..986f3a7546e7 100644 --- a/test/sql/copy/csv/multidelimiter/test_options_inconsistencies.test +++ b/test/sql/copy/csv/multidelimiter/test_options_inconsistencies.test @@ -3,16 +3,16 @@ # group: [multidelimiter] statement error -FROM read_csv('data/csv/multidelimiter/aaa_delim.csv', delim = '"\', quote ='"') +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaa_delim.csv', delim = '"\', quote ='"') ---- QUOTE must not appear in the DELIMITER specification and vice versa statement error -FROM read_csv('data/csv/multidelimiter/aaa_delim.csv', delim = '\\', escape ='\') +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaa_delim.csv', delim = '\\', escape ='\') ---- ESCAPE must not appear in the DELIMITER specification and vice versa statement error -FROM read_csv('data/csv/multidelimiter/aaa_delim.csv', delim = '|#', comment ='#', auto_detect = false, columns = {'a': 'varchar'}) +FROM read_csv('{DATA_DIR}/csv/multidelimiter/aaa_delim.csv', delim = '|#', comment ='#', auto_detect = false, columns = {'a': 'varchar'}) ---- COMMENT must not appear in the DELIMITER specification and vice versa diff --git a/test/sql/copy/csv/null_padding_big.test b/test/sql/copy/csv/null_padding_big.test index e8268ff11194..1726559466c3 100644 --- a/test/sql/copy/csv/null_padding_big.test +++ b/test/sql/copy/csv/null_padding_big.test @@ -10,48 +10,48 @@ statement ok CREATE TABLE test (a VARCHAR, b INTEGER, c INTEGER); query I -INSERT INTO test SELECT * FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv', null_padding=True) +INSERT INTO test SELECT * FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv', null_padding=True) ---- 2501 query I -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv', null_padding=True) +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv', null_padding=True) ---- 2501 # one line that requires null padding query I -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv', null_padding=False, ignore_errors=True) +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv', null_padding=False, ignore_errors=True) ---- 2500 statement error -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv') +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv') ---- -Error when sniffing file "data/csv/nullpadding_big_mixed.csv". +Error when sniffing file "{DATA_DIR}/csv/nullpadding_big_mixed.csv". statement ok CREATE TABLE test2 (a VARCHAR, b INTEGER, c INTEGER, d INTEGER); # all nulls, several lines require NULL padding query I -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_commas.csv', null_padding=True) +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_commas.csv', null_padding=True) ---- 2500 query I -SELECT COUNT(*) FROM read_csv('data/csv/nullpadding_commas.csv', sep=',', columns={'a': INT, 'b': INT, 'c': INT, 'd': INT}, ignore_errors=True, null_padding=False) +SELECT COUNT(*) FROM read_csv('{DATA_DIR}/csv/nullpadding_commas.csv', sep=',', columns={'a': INT, 'b': INT, 'c': INT, 'd': INT}, ignore_errors=True, null_padding=False) ---- 2492 require notwindows statement error -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv', buffer_size=55) +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv', buffer_size=55) ---- It was not possible to automatically detect the CSV parsing dialect statement error -SELECT COUNT(*) FROM read_csv_auto('data/csv/nullpadding_big_mixed.csv', buffer_size=55, null_padding=False) +SELECT COUNT(*) FROM read_csv_auto('{DATA_DIR}/csv/nullpadding_big_mixed.csv', buffer_size=55, null_padding=False) ---- It was not possible to automatically detect the CSV parsing dialect diff --git a/test/sql/copy/csv/null_terminator.test b/test/sql/copy/csv/null_terminator.test index be69c2bed4d9..5b39625943c9 100644 --- a/test/sql/copy/csv/null_terminator.test +++ b/test/sql/copy/csv/null_terminator.test @@ -6,21 +6,21 @@ statement ok PRAGMA enable_verification query I -FROM 'data/csv/null_terminator.csv' +FROM '{DATA_DIR}/csv/null_terminator.csv' ---- query I -FROM read_csv('data/csv/null_terminator.csv', header = 0) +FROM read_csv('{DATA_DIR}/csv/null_terminator.csv', header = 0) ---- a\0b query I -FROM read_csv('data/csv/null_terminator.csv', header = 0, escape = '') +FROM read_csv('{DATA_DIR}/csv/null_terminator.csv', header = 0, escape = '') ---- a\0b query I -FROM read_csv('data/csv/null_terminator.csv', header = 0, delim = '\t', quote = '', escape = '') +FROM read_csv('{DATA_DIR}/csv/null_terminator.csv', header = 0, delim = '\t', quote = '', escape = '') ---- a\0b @@ -28,9 +28,9 @@ statement ok create or replace table t as (from values ('a' || chr(0) || 'b') t(i)); statement ok -copy t to '__TEST_DIR__/csv2tsv.tsv' (header false, delimiter '\t', escape '', quote ''); +copy t to '{TEMP_DIR}/csv2tsv.tsv' (header false, delimiter '\t', escape '', quote ''); query I -FROM read_csv('__TEST_DIR__/csv2tsv.tsv', header = 0) +FROM read_csv('{TEMP_DIR}/csv2tsv.tsv', header = 0) ---- a\0b \ No newline at end of file diff --git a/test/sql/copy/csv/null_value_wrong_type.test b/test/sql/copy/csv/null_value_wrong_type.test index 1c5eb9c6c7d9..37c888217946 100644 --- a/test/sql/copy/csv/null_value_wrong_type.test +++ b/test/sql/copy/csv/null_value_wrong_type.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIIII -describe from 'data/csv/null_mismatch.csv' +describe from '{DATA_DIR}/csv/null_mismatch.csv' ---- id BIGINT YES NULL NULL NULL name VARCHAR YES NULL NULL NULL diff --git a/test/sql/copy/csv/parallel/csv_parallel_buffer_size.test b/test/sql/copy/csv/parallel/csv_parallel_buffer_size.test index 16ae4724d9a4..351da34a97b3 100644 --- a/test/sql/copy/csv/parallel/csv_parallel_buffer_size.test +++ b/test/sql/copy/csv/parallel/csv_parallel_buffer_size.test @@ -7,22 +7,22 @@ statement ok PRAGMA verify_parallelism query III -SELECT sum(a), sum(b), sum(c) FROM read_csv('data/csv/test/multi_column_integer.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) +SELECT sum(a), sum(b), sum(c) FROM read_csv('{DATA_DIR}/csv/test/multi_column_integer.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) ---- 111111111 51866 3195 query I -SELECT sum(a) FROM read_csv('data/csv/test/multi_column_integer.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/multi_column_integer.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) ---- 111111111 query I -SELECT sum(a) FROM read_csv('data/csv/test/multi_column_integer_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/multi_column_integer_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER'), auto_detect='true', delim = '|', buffer_size=30) ---- 111111111 query IIII -select * from read_csv('data/csv/test/multi_column_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=30) +select * from read_csv('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=30) ---- 1 6370 371 p1 10 214 465 p2 @@ -35,7 +35,7 @@ select * from read_csv('data/csv/test/multi_column_string.csv', COLUMNS=STRUCT_ 100000000 15519 785 p9 query IIII -select * from read_csv('data/csv/test/multi_column_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=27) +select * from read_csv('{DATA_DIR}/csv/test/multi_column_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=27) ---- 1 6370 371 p1 10 214 465 p2 @@ -48,33 +48,33 @@ select * from read_csv('data/csv/test/multi_column_string_rn.csv', COLUMNS=STRU 100000000 15519 785 p9 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') ---- 111 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=100) +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string_rn.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=100) ---- 111 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string_rn_exc.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string_rn_exc.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') ---- 111 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string_rn_exc.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=80) +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string_rn_exc.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|', buffer_size=80) ---- 111 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='true', delim = '|') ---- 111 query I -SELECT sum(a) FROM read_csv('data/csv/test/new_line_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), quote ='"', escape ='"', comment = '', auto_detect='true', delim = '|', buffer_size=100, new_line = '\r\n', strict_mode = false) +SELECT sum(a) FROM read_csv('{DATA_DIR}/csv/test/new_line_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), quote ='"', escape ='"', comment = '', auto_detect='true', delim = '|', buffer_size=100, new_line = '\r\n', strict_mode = false) ---- 111 diff --git a/test/sql/copy/csv/parallel/csv_parallel_new_line.test_slow b/test/sql/copy/csv/parallel/csv_parallel_new_line.test_slow index 74fec200c960..509e51a97d57 100644 --- a/test/sql/copy/csv/parallel/csv_parallel_new_line.test_slow +++ b/test/sql/copy/csv/parallel/csv_parallel_new_line.test_slow @@ -14,7 +14,7 @@ loop i 27 100 # Test read_csv auto with \n query IIII -select * from read_csv_auto('data/csv/test/multi_column_string.csv', buffer_size=${i}) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', buffer_size=${i}) ---- 1 6370 371 p1 10 214 465 p2 @@ -28,7 +28,7 @@ select * from read_csv_auto('data/csv/test/multi_column_string.csv', buffer_size # Test read_csv auto with \r query IIII -select * from read_csv_auto('data/csv/auto/multi_column_string_r.csv', buffer_size=${i}) +select * from read_csv_auto('{DATA_DIR}/csv/auto/multi_column_string_r.csv', buffer_size=${i}) ---- 1 6370 371 p1 10 214 465 p2 @@ -42,12 +42,12 @@ select * from read_csv_auto('data/csv/auto/multi_column_string_r.csv', buffer_si # Test read_csv auto with mix \r and \n statement error -select * from read_csv_auto('data/csv/auto/multi_column_string_mix_r_n.csv', buffer_size=${i}) +select * from read_csv_auto('{DATA_DIR}/csv/auto/multi_column_string_mix_r_n.csv', buffer_size=${i}) ---- * Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. query IIII -select * from read_csv_auto('data/csv/auto/multi_column_string_mix_r_n.csv', buffer_size=${i}, strict_mode = False) +select * from read_csv_auto('{DATA_DIR}/csv/auto/multi_column_string_mix_r_n.csv', buffer_size=${i}, strict_mode = False) ---- 1 6370 371 p1 10 214 465 p2 @@ -62,7 +62,7 @@ select * from read_csv_auto('data/csv/auto/multi_column_string_mix_r_n.csv', buf # Test read_csv auto with \r\n query IIII -select * from read_csv_auto('data/csv/test/multi_column_string_rn.csv', buffer_size=${i}, header=False) +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string_rn.csv', buffer_size=${i}, header=False) ---- 1 6370 371 p1 10 214 465 p2 @@ -78,12 +78,12 @@ endloop # Test read_csv auto with mix \r, \n and \r\n (This must always run single threaded) statement error -select * from read_csv_auto('data/csv/auto/multi_column_string_mix.csv') +select * from read_csv_auto('{DATA_DIR}/csv/auto/multi_column_string_mix.csv') ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. query IIII -select * from read_csv_auto('data/csv/auto/multi_column_string_mix.csv', strict_mode=false) +select * from read_csv_auto('{DATA_DIR}/csv/auto/multi_column_string_mix.csv', strict_mode=false) ---- 1 6370 371 p1 10 214 465 p2 @@ -101,7 +101,7 @@ require notwindows # Test read_csv with user defined variable query IIII -select * from read_csv('data/csv/test/multi_column_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='false', delim = '|', new_line = '\n') +select * from read_csv('{DATA_DIR}/csv/test/multi_column_string.csv', COLUMNS=STRUCT_PACK(a := 'INTEGER', b := 'INTEGER', c := 'INTEGER', d := 'VARCHAR'), auto_detect='false', delim = '|', new_line = '\n') ---- 1 6370 371 p1 10 214 465 p2 @@ -114,7 +114,7 @@ select * from read_csv('data/csv/test/multi_column_string.csv', COLUMNS=STRUCT_ 100000000 15519 785 p9 query IIII -select * from read_csv_auto('data/csv/test/multi_column_string.csv', new_line = '\n') +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', new_line = '\n') ---- 1 6370 371 p1 10 214 465 p2 @@ -127,5 +127,5 @@ select * from read_csv_auto('data/csv/test/multi_column_string.csv', new_line 100000000 15519 785 p9 statement error -select * from read_csv_auto('data/csv/test/multi_column_string.csv', new_line = 'not_valid') +select * from read_csv_auto('{DATA_DIR}/csv/test/multi_column_string.csv', new_line = 'not_valid') ---- diff --git a/test/sql/copy/csv/parallel/parallel_csv_hive_partitioning.test b/test/sql/copy/csv/parallel/parallel_csv_hive_partitioning.test index 8cd1030d0b23..3ea820f89059 100644 --- a/test/sql/copy/csv/parallel/parallel_csv_hive_partitioning.test +++ b/test/sql/copy/csv/parallel/parallel_csv_hive_partitioning.test @@ -11,19 +11,19 @@ PRAGMA verify_parallelism # filenames could allow you to parse hive partitions manually using SQL query IIII -select id, value, filename.replace('\', '/').split('/')[-2], filename.replace('\', '/').split('/')[-3] from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', FILENAME=1) order by id +select id, value, filename.replace('\', '/').split('/')[-2], filename.replace('\', '/').split('/')[-3] from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', FILENAME=1) order by id ---- 1 value1 date=2012-01-01 part=a 2 value2 date=2013-01-01 part=b query IIII -select id, value, part, date from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by id +select id, value, part, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by id ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 query III -select part, value, date from read_csv_auto('data/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 +select part, value, date from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv', HIVE_PARTITIONING=1) order by 1 ---- a value1 2012-01-01 b value2 2013-01-01 diff --git a/test/sql/copy/csv/parallel/parallel_csv_union_by_name.test b/test/sql/copy/csv/parallel/parallel_csv_union_by_name.test index 823e129271a4..baac2e19bb98 100644 --- a/test/sql/copy/csv/parallel/parallel_csv_union_by_name.test +++ b/test/sql/copy/csv/parallel/parallel_csv_union_by_name.test @@ -14,27 +14,27 @@ PRAGMA verify_parallelism query IIII SELECT id, value, a, part -FROM read_csv_auto('data/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) ORDER BY id ---- 1 value1 aaa a 2 value2 NULL b query IIII -SELECT k, c, ts, replace(filename, '\', '/') -FROM read_csv_auto('data/csv/union-by-name/ubn[!1-2].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) +SELECT k, c, ts, parse_filename(filename) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn[!1-2].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) ORDER BY c ---- -6 3 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -6 5 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -6 6 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -NULL 100 Monday data/csv/union-by-name/ubn4.csv -NULL 200 Sunday data/csv/union-by-name/ubn4.csv -NULL 300 Friday data/csv/union-by-name/ubn4.csv +6 3 2003-06-30 12:03:10 ubn3.csv +6 5 2003-06-30 12:03:10 ubn3.csv +6 6 2003-06-30 12:03:10 ubn3.csv +NULL 100 Monday ubn4.csv +NULL 200 Sunday ubn4.csv +NULL 300 Friday ubn4.csv query IIIII SELECT a, b, c, ts, k -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY a, c, ts ---- NULL NULL 3 2003-06-30 12:03:10 6 @@ -55,7 +55,7 @@ mode unskip query TTTTT SELECT typeof(a), typeof(b), typeof(c), typeof(ts), typeof(k) -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) LIMIT 1; ---- VARCHAR BIGINT BIGINT VARCHAR BIGINT @@ -65,7 +65,7 @@ mode skip # projection pushdown query II SELECT c, k -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY c NULLS LAST, k NULLS LAST ---- 3 6 @@ -85,7 +85,7 @@ NULL NULL # projection pushdown query I SELECT ts -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY ts NULLS LAST ---- 2003-06-30 12:03:10 diff --git a/test/sql/copy/csv/parallel/test_5438.test b/test/sql/copy/csv/parallel/test_5438.test index 33d139591aba..4dd5bca67903 100644 --- a/test/sql/copy/csv/parallel/test_5438.test +++ b/test/sql/copy/csv/parallel/test_5438.test @@ -12,7 +12,7 @@ statement ok PRAGMA verify_parallelism query I -SELECT j->>'duck' FROM read_csv_auto('data/csv/test/5438.csv', delim='', columns={'j': 'JSON'}, auto_detect = false) +SELECT j->>'duck' FROM read_csv_auto('{DATA_DIR}/csv/test/5438.csv', delim='', columns={'j': 'JSON'}, auto_detect = false) ---- 1 2 diff --git a/test/sql/copy/csv/parallel/test_5566.test b/test/sql/copy/csv/parallel/test_5566.test index d0513bda09a1..fbb7bff81a93 100644 --- a/test/sql/copy/csv/parallel/test_5566.test +++ b/test/sql/copy/csv/parallel/test_5566.test @@ -10,7 +10,7 @@ statement ok PRAGMA enable_verification query I -select * from read_csv_auto('data/csv/auto/test_single_column.csv') +select * from read_csv_auto('{DATA_DIR}/csv/auto/test_single_column.csv') ---- 1 2 @@ -19,7 +19,7 @@ select * from read_csv_auto('data/csv/auto/test_single_column.csv') 5 query I -select * from read_csv_auto('data/csv/auto/test_single_column_rn.csv') +select * from read_csv_auto('{DATA_DIR}/csv/auto/test_single_column_rn.csv') ---- 1 2 @@ -28,14 +28,14 @@ select * from read_csv_auto('data/csv/auto/test_single_column_rn.csv') 5 query II -select foo, count(1) cnt from read_csv_auto('data/csv/auto/test_multiple_columns.csv') group by foo order by cnt desc +select foo, count(1) cnt from read_csv_auto('{DATA_DIR}/csv/auto/test_multiple_columns.csv') group by foo order by cnt desc ---- 1 102 2 100 3 98 query II -select foo, count(1) cnt from read_csv_auto('data/csv/auto/test_multiple_columns_rn.csv') group by foo order by cnt desc +select foo, count(1) cnt from read_csv_auto('{DATA_DIR}/csv/auto/test_multiple_columns_rn.csv') group by foo order by cnt desc ---- 1 102 2 100 diff --git a/test/sql/copy/csv/parallel/test_7578.test b/test/sql/copy/csv/parallel/test_7578.test index 81fcf1af7631..fe526706d86a 100644 --- a/test/sql/copy/csv/parallel/test_7578.test +++ b/test/sql/copy/csv/parallel/test_7578.test @@ -11,7 +11,7 @@ PRAGMA enable_verification query IIIIIIIIII select * -from read_csv('data/csv/bug_7578.csv', delim='\t', quote = '`', columns={ +from read_csv('{DATA_DIR}/csv/bug_7578.csv', delim='\t', quote = '`', columns={ 'transaction_id': 'VARCHAR', 'team_id': 'INT', 'direction': 'INT', @@ -36,7 +36,7 @@ pragma threads=2 statement error select * -from read_csv('data/csv/bug_7578.csv', delim='\t', columns={ +from read_csv('{DATA_DIR}/csv/bug_7578.csv', delim='\t', columns={ 'transaction_id': 'VARCHAR', 'team_id': 'INT', 'direction': 'INT', diff --git a/test/sql/copy/csv/parallel/test_7789.test_slow b/test/sql/copy/csv/parallel/test_7789.test_slow index da36a21978bc..5dfa3036977e 100644 --- a/test/sql/copy/csv/parallel/test_7789.test_slow +++ b/test/sql/copy/csv/parallel/test_7789.test_slow @@ -7,6 +7,6 @@ PRAGMA enable_verification query I select count(*) -from read_csv_auto('data/csv/CrashStatistics.csv', SAMPLE_SIZE = -1) +from read_csv_auto('{DATA_DIR}/csv/CrashStatistics.csv', SAMPLE_SIZE = -1) ---- 4980 diff --git a/test/sql/copy/csv/parallel/test_multiple_files.test b/test/sql/copy/csv/parallel/test_multiple_files.test index df1442177e3b..fcc972aaf8b3 100644 --- a/test/sql/copy/csv/parallel/test_multiple_files.test +++ b/test/sql/copy/csv/parallel/test_multiple_files.test @@ -10,7 +10,7 @@ statement ok PRAGMA enable_verification query IIII rowsort -select * from read_csv_auto('data/csv/auto/glob/[0-9].csv'); +select * from read_csv_auto('{DATA_DIR}/csv/auto/glob/[0-9].csv'); ---- 0 0 1.0 zero 1 1 1.1 one @@ -69,7 +69,7 @@ select * from read_csv_auto('data/csv/auto/glob/[0-9].csv'); 9 9 1.9 nine query IIII rowsort -select * from read_csv_auto('data/csv/auto/glob/[0-9].csv', buffer_size=100) +select * from read_csv_auto('{DATA_DIR}/csv/auto/glob/[0-9].csv', buffer_size=100) ---- 0 0 1.0 zero 1 1 1.1 one @@ -129,7 +129,7 @@ select * from read_csv_auto('data/csv/auto/glob/[0-9].csv', buffer_size=100) query IIII rowsort -select * from read_csv('data/csv/auto/glob/[0-9].csv', AUTO_DETECT=true) +select * from read_csv('{DATA_DIR}/csv/auto/glob/[0-9].csv', AUTO_DETECT=true) ---- 0 0 1.0 zero 1 1 1.1 one @@ -188,7 +188,7 @@ select * from read_csv('data/csv/auto/glob/[0-9].csv', AUTO_DETECT=true) 9 9 1.9 nine query IIII rowsort -select * from read_csv('data/csv/auto/glob/[0-9].csv', sample_size=-1, new_line = '\r\n', columns={'row_id':'BIGINT','integer':'INTEGER','float':'DOUBLE', 'text':'VARCHAR'}) +select * from read_csv('{DATA_DIR}/csv/auto/glob/[0-9].csv', sample_size=-1, new_line = '\r\n', columns={'row_id':'BIGINT','integer':'INTEGER','float':'DOUBLE', 'text':'VARCHAR'}) ---- 0 0 1.0 zero 1 1 1.1 one diff --git a/test/sql/copy/csv/parallel/test_parallel_csv.test b/test/sql/copy/csv/parallel/test_parallel_csv.test index a7aedae225fe..a13a5a199d88 100644 --- a/test/sql/copy/csv/parallel/test_parallel_csv.test +++ b/test/sql/copy/csv/parallel/test_parallel_csv.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIIIIIIIIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/14512_og.csv', buffer_size = 473, strict_mode = false, delim = ',', quote = '"', escape = '"') +FROM read_csv('{DATA_DIR}/csv/14512_og.csv', buffer_size = 473, strict_mode = false, delim = ',', quote = '"', escape = '"') ---- 00000579000098 13.99 EA PINE RIDGE CHENIN VOIGNIER 750.0 ML 1 13 NULL 1 NULL NULL NULL NULL NULL NULL DEFAULT BRAND NULL NULL NULL NULL BEER & WINE NULL NULL 7.25 {"sales_tax":{ "tax_type": "rate_percent", "value" :0.0725}} 00000609082001 3.99 EA MADELAINE MINI MILK CHOCOLATE TURKEY 1.0 OZ 1 13 NULL NULL NULL NULL NULL NULL NULL NULL MADELEINE NULL NULL NULL NULL CANDY NULL NULL 7.25 {"sales_tax":{ "tax_type": "rate_percent", "value" :0.0725}} @@ -14,13 +14,13 @@ FROM read_csv('data/csv/14512_og.csv', buffer_size = 473, strict_mode = false, d query III -select * from read_csv_auto('data/csv/dirty_line.csv', skip = 1) +select * from read_csv_auto('{DATA_DIR}/csv/dirty_line.csv', skip = 1) ---- 1.5 a 3 2.5 b 4 query II -select * from read_csv_auto('data/csv/null_string.csv', nullstr="null") +select * from read_csv_auto('{DATA_DIR}/csv/null_string.csv', nullstr="null") ---- 1 NULL NULL 2 @@ -28,14 +28,14 @@ NULL 2 # We need to add header = false here. Because with vector_size=2 the sniffer will think we have a header, since the # row 1 null has types INTEGER;VARCHAR at that point query II -select * from read_csv_auto('data/csv/null_string.csv', header = false) +select * from read_csv_auto('{DATA_DIR}/csv/null_string.csv', header = false) ---- a b 1 null null 2 query IIIIIIIIII -select * from read_csv_auto('data/csv/aws_locations.csv') +select * from read_csv_auto('{DATA_DIR}/csv/aws_locations.csv') ---- IAD Washington District of Columbia United States US 20 38.94449997 -77.45580292 North America United States, Mexico, & Canada ORD Chicago Illinois United States US 20 41.978611 -87.904722 North America United States, Mexico, & Canada diff --git a/test/sql/copy/csv/parallel/test_parallel_error_messages.test b/test/sql/copy/csv/parallel/test_parallel_error_messages.test index c3b4d3dd6071..0d9143af219a 100644 --- a/test/sql/copy/csv/parallel/test_parallel_error_messages.test +++ b/test/sql/copy/csv/parallel/test_parallel_error_messages.test @@ -17,22 +17,22 @@ statement ok SET preserve_insertion_order=${batched} statement error -SELECT * FROM read_csv('data/csv/missing_column.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True, auto_detect = false) +SELECT * FROM read_csv('{DATA_DIR}/csv/missing_column.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True, auto_detect = false) ---- Line: 7 statement error -SELECT * FROM read_csv('data/csv/wrongtype.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/wrongtype.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) ---- Column at position: 0 Set type: INTEGER Sniffed type: VARCHAR statement error -SELECT * FROM read_csv('data/csv/wrongtype.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True, auto_detect = false) +SELECT * FROM read_csv('{DATA_DIR}/csv/wrongtype.csv', sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True, auto_detect = false) ---- Line: 8 statement error -SELECT * FROM read_csv('data/csv/wrongtype.csv', sep=',', columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/wrongtype.csv', sep=',', columns={'h1': int, 'h2': varchar}, header=True) ---- Column at position: 0 Set type: INTEGER Sniffed type: VARCHAR @@ -41,45 +41,45 @@ mode skip # the first error is on line 10002 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes.csv', parallel=false, sep=',', columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes.csv', parallel=false, sep=',', columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 # the first error is on line 10002 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 # the first error is on line 10002 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes_rn.csv', parallel=false, sep=',', columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes_rn.csv', parallel=false, sep=',', columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 # the first error is on line 10001 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes_rn.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes_rn.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 statement error -SELECT * FROM read_csv('data/csv/error/mixedtypes_rn.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/mixedtypes_rn.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': int, 'h2': varchar}, header=True) ---- line 10002 statement error -SELECT * FROM read_csv('data/csv/error/quotednewlines.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': varchar, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/quotednewlines.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': varchar, 'h2': varchar}, header=True) ---- not supported for multithreading statement error -SELECT * FROM read_csv('data/csv/error/quotednewlines.csv', parallel=true, sep=',', buffer_size=200, columns={'h1': int, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/quotednewlines.csv', parallel=true, sep=',', buffer_size=200, columns={'h1': int, 'h2': varchar}, header=True) ---- not supported for multithreading @@ -88,55 +88,55 @@ foreach type bool int bigint hugeint float double 'decimal(4,1)' 'decimal(8,1)' # the first error is on line 3001 statement error -SELECT * FROM read_csv('data/csv/error/csv_error.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': ${type}, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/csv_error.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': ${type}, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3001 statement error -SELECT * FROM read_csv('data/csv/error/csv_error.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': ${type}, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/csv_error.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': ${type}, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3001 statement error -SELECT * FROM read_csv('data/csv/error/date.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': date, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/date.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': date, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3001 statement error -SELECT * FROM read_csv('data/csv/error/date.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': date, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/date.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': date, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3002 statement error -SELECT * FROM read_csv('data/csv/error/time.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': time, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/time.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': time, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3002 statement error -SELECT * FROM read_csv('data/csv/error/time.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': time, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/time.csv', parallel=false, sep=',', buffer_size=100, columns={'h1': time, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3002 statement error -SELECT * FROM read_csv('data/csv/error/timestamp.csv', parallel=true, sep=',', buffer_size=200, columns={'h1': timestamp, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/timestamp.csv', parallel=true, sep=',', buffer_size=200, columns={'h1': timestamp, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3000 statement error -SELECT * FROM read_csv('data/csv/error/timestamp.csv', parallel=false, sep=',', columns={'h1': timestamp, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/timestamp.csv', parallel=false, sep=',', columns={'h1': timestamp, 'h2': varchar}, header=True) ---- line 3002 # the first error is on line 3000 statement error -SELECT * FROM read_csv('data/csv/error/timestamp.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': timestamp, 'h2': varchar}, header=True) +SELECT * FROM read_csv('{DATA_DIR}/csv/error/timestamp.csv', parallel=true, sep=',', buffer_size=100, columns={'h1': timestamp, 'h2': varchar}, header=True) ---- line 3002 diff --git a/test/sql/copy/csv/plus_autodetect.test b/test/sql/copy/csv/plus_autodetect.test index 8d13b5f0b0c7..801bc4fde942 100644 --- a/test/sql/copy/csv/plus_autodetect.test +++ b/test/sql/copy/csv/plus_autodetect.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE phone_numbers AS SELECT * FROM read_csv_auto('data/csv/phonenumbers.csv') +CREATE TABLE phone_numbers AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/phonenumbers.csv') query I SELECT phone FROM phone_numbers; diff --git a/test/sql/copy/csv/pollock/test_field_delimiter.test b/test/sql/copy/csv/pollock/test_field_delimiter.test index b27d8fe738ca..7d9800fa10a4 100644 --- a/test/sql/copy/csv/pollock/test_field_delimiter.test +++ b/test/sql/copy/csv/pollock/test_field_delimiter.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification statement ok -FROM read_csv('data/csv/pollock/file_field_delimiter_0x20.csv', delim = ' ', escape = '"', quote='"', header = false, skip=1, +FROM read_csv('{DATA_DIR}/csv/pollock/file_field_delimiter_0x20.csv', delim = ' ', escape = '"', quote='"', header = false, skip=1, columns = {'Date':'VARCHAR','TIME':'VARCHAR','Qty':'VARCHAR','PRODUCTID':'VARCHAR','Price':'VARCHAR' ,'ProductType':'VARCHAR','ProductDescription':'VARCHAR','URL':'VARCHAR','Comments':'VARCHAR'}, auto_detect = false, strict_mode=FALSE, null_padding = true) diff --git a/test/sql/copy/csv/pollock/test_quotation_char.test b/test/sql/copy/csv/pollock/test_quotation_char.test index 39634504ff8e..b1ec09ce93b0 100644 --- a/test/sql/copy/csv/pollock/test_quotation_char.test +++ b/test/sql/copy/csv/pollock/test_quotation_char.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification statement ok -FROM read_csv('data/csv/pollock/file_quotation_char_0x27.csv', delim = ',', escape = '"', quote='''', +FROM read_csv('{DATA_DIR}/csv/pollock/file_quotation_char_0x27.csv', delim = ',', escape = '"', quote='''', columns = {'Date':'VARCHAR','TIME':'VARCHAR','Qty':'VARCHAR','PRODUCTID':'VARCHAR','Price':'VARCHAR' ,'ProductType':'VARCHAR','ProductDescription':'VARCHAR','URL':'VARCHAR','Comments':'VARCHAR'}, auto_detect = false, strict_mode=FALSE, null_padding = true) diff --git a/test/sql/copy/csv/read_csv_variable.test b/test/sql/copy/csv/read_csv_variable.test index 3ed57089259b..90d6ec3b73c1 100644 --- a/test/sql/copy/csv/read_csv_variable.test +++ b/test/sql/copy/csv/read_csv_variable.test @@ -6,15 +6,15 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE globbed_files AS FROM glob('data/csv/glob/a?/*.csv'); +CREATE TABLE globbed_files AS FROM glob('{DATA_DIR}/csv/glob/a?/*.csv'); statement ok SET VARIABLE csv_files=(SELECT LIST(file ORDER BY file) FROM globbed_files) query I -SELECT [x.replace('\', '/') for x in getvariable('csv_files')] +SELECT [parse_path(x)[-2:] for x in getvariable('csv_files')] ---- -[data/csv/glob/a1/a1.csv, data/csv/glob/a2/a2.csv, data/csv/glob/a3/b1.csv] +[[a1, a1.csv], [a2, a2.csv], [a3, b1.csv]] # simple globbing query I diff --git a/test/sql/copy/csv/recursive_csv_union_by_name.test b/test/sql/copy/csv/recursive_csv_union_by_name.test index 23249e3655d4..17d03cde0449 100644 --- a/test/sql/copy/csv/recursive_csv_union_by_name.test +++ b/test/sql/copy/csv/recursive_csv_union_by_name.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -create view r AS from read_csv_auto('data/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1, HIVE_PARTITIONING=1) +create view r AS from read_csv_auto('{DATA_DIR}/csv/hive-partitioning/mismatching_contents/*/*.csv', UNION_BY_NAME=1, HIVE_PARTITIONING=1) query II WITH RECURSIVE t(i, j) AS diff --git a/test/sql/copy/csv/recursive_read_csv.test b/test/sql/copy/csv/recursive_read_csv.test index 4c0f4426688f..734856d48964 100644 --- a/test/sql/copy/csv/recursive_read_csv.test +++ b/test/sql/copy/csv/recursive_read_csv.test @@ -7,7 +7,7 @@ PRAGMA enable_verification # first create a table from read csv, and use that in a recursive cte statement ok -create table r AS SELECT * FROM read_csv('data/csv/test/date.csv', columns=STRUCT_PACK(d := 'DATE'), header=0, auto_detect=0); +create table r AS SELECT * FROM read_csv('{DATA_DIR}/csv/test/date.csv', columns=STRUCT_PACK(d := 'DATE'), header=0, auto_detect=0); query II WITH RECURSIVE t(i) AS @@ -36,7 +36,7 @@ WITH RECURSIVE t(i) AS UNION ALL ( SELECT i+1, d - FROM t, read_csv('data/csv/test/date.csv', columns=STRUCT_PACK(d := 'DATE'), header=0, auto_detect=0) + FROM t, read_csv('{DATA_DIR}/csv/test/date.csv', columns=STRUCT_PACK(d := 'DATE'), header=0, auto_detect=0) WHERE i<5 ) ) @@ -49,7 +49,7 @@ SELECT * FROM t ORDER BY i; 5 2019-06-05 query I -SELECT * FROM read_csv_auto('data/csv/test/date.csv') +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/date.csv') ---- 2019-06-05 @@ -62,7 +62,7 @@ WITH RECURSIVE t(i) AS UNION ALL ( SELECT i+1, d - FROM t, read_csv('data/csv/test/date.csv', header=0, auto_detect=1) r(d) + FROM t, read_csv('{DATA_DIR}/csv/test/date.csv', header=0, auto_detect=1) r(d) WHERE i<5 ) ) diff --git a/test/sql/copy/csv/rejects/csv_buffer_size_rejects.test_slow b/test/sql/copy/csv/rejects/csv_buffer_size_rejects.test_slow index 96bfca002ec6..52aaa88ef788 100644 --- a/test/sql/copy/csv/rejects/csv_buffer_size_rejects.test_slow +++ b/test/sql/copy/csv/rejects/csv_buffer_size_rejects.test_slow @@ -15,7 +15,7 @@ loop buffer_size 7 11 query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/small_bad.csv', + '{DATA_DIR}/csv/small_bad.csv', buffer_size=${buffer_size}, store_rejects = true, columns = {'column0':'INTEGER', 'column1':'VARCHAR'}); @@ -40,7 +40,7 @@ loop buffer_size 5 10 # Ensure that we can get the schema if we reduce the sample size and ignore errors query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, buffer_size=${buffer_size}, store_rejects = true); @@ -50,8 +50,8 @@ BIGINT VARCHAR 11044 11044 2 query IIIIIIIIII rowsort SELECT * EXCLUDE (file_id, scan_id, user_arguments) FROM reject_scans order by #1; ---- -data/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL -data/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL +{DATA_DIR}/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL +{DATA_DIR}/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL query IIIIIIII rowsort SELECT * EXCLUDE (file_id, scan_id) FROM reject_errors order by all; @@ -67,4 +67,4 @@ DROP TABLE reject_errors; statement ok DROP TABLE reject_scans; -endloop \ No newline at end of file +endloop diff --git a/test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test b/test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test index 7017f7f814f8..c081371b6453 100644 --- a/test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test +++ b/test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test @@ -9,7 +9,7 @@ require notwindows statement ok SELECT * FROM read_csv( - 'data/csv/rejects/incorrect_columns/few_columns.csv', + '{DATA_DIR}/csv/rejects/incorrect_columns/few_columns.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'INTEGER', 'd': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1); @@ -34,7 +34,7 @@ DROP TABLE reject_scans; statement ok SELECT * FROM read_csv( - 'data/csv/rejects/incorrect_columns/many_columns.csv', + '{DATA_DIR}/csv/rejects/incorrect_columns/many_columns.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'INTEGER', 'd': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, strict_mode=True); @@ -56,7 +56,7 @@ DROP TABLE reject_scans; statement ok SELECT * FROM read_csv( - 'data/csv/rejects/incorrect_columns/mix_columns.csv', + '{DATA_DIR}/csv/rejects/incorrect_columns/mix_columns.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'INTEGER', 'd': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, strict_mode=True); @@ -85,7 +85,7 @@ DROP TABLE reject_scans; statement ok SELECT * FROM read_csv( - 'data/csv/rejects/incorrect_columns/small_mix.csv', + '{DATA_DIR}/csv/rejects/incorrect_columns/small_mix.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'INTEGER', 'd': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, strict_mode=True); @@ -106,7 +106,7 @@ DROP TABLE reject_scans; statement ok SELECT * FROM read_csv( - 'data/csv/rejects/incorrect_columns/*.csv', + '{DATA_DIR}/csv/rejects/incorrect_columns/*.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'INTEGER', 'd': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, strict_mode=True); diff --git a/test/sql/copy/csv/rejects/csv_rejects_auto.test b/test/sql/copy/csv/rejects/csv_rejects_auto.test index 0c2ef09e9042..566ab5e5d1b9 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_auto.test +++ b/test/sql/copy/csv/rejects/csv_rejects_auto.test @@ -9,7 +9,7 @@ require notwindows # Ensure that we can get the schema if we reduce the sample size and ignore errors query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, store_rejects=true); ---- @@ -32,7 +32,7 @@ DROP TABLE reject_scans; # Test with lots of errors query I SELECT SUM(num) FROM read_csv_auto( - 'data/csv/error/mismatch/half1.csv', + '{DATA_DIR}/csv/error/mismatch/half1.csv', header=true, sample_size=1, store_rejects=true) @@ -53,7 +53,7 @@ DROP TABLE reject_scans; # Test with more errors than STANDARD_VECTOR_SIZE query I SELECT SUM(num) FROM read_csv_auto( - 'data/csv/error/mismatch/half2.csv', + '{DATA_DIR}/csv/error/mismatch/half2.csv', header=true, ignore_errors=true, sample_size=1, diff --git a/test/sql/copy/csv/rejects/csv_rejects_flush_cast.test b/test/sql/copy/csv/rejects/csv_rejects_flush_cast.test index a40ae18bf46b..5f7539d8daec 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_flush_cast.test +++ b/test/sql/copy/csv/rejects/csv_rejects_flush_cast.test @@ -9,7 +9,7 @@ require notwindows query IIIII SELECT first(a), first(b), typeof(first(a)), typeof(first(b)), COUNT(*) FROM read_csv( - 'data/csv/error/flush_cast.csv', + '{DATA_DIR}/csv/error/flush_cast.csv', columns = {'a': 'DATE', 'b': 'VARCHAR'}, store_rejects = true, delim = ',', diff --git a/test/sql/copy/csv/rejects/csv_rejects_flush_message.test b/test/sql/copy/csv/rejects/csv_rejects_flush_message.test index 8cf25b447da2..72d15a52acfc 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_flush_message.test +++ b/test/sql/copy/csv/rejects/csv_rejects_flush_message.test @@ -9,7 +9,7 @@ require notwindows query I SELECT * FROM read_csv( - 'data/csv/rejects/flush.csv', + '{DATA_DIR}/csv/rejects/flush.csv', columns = {'a': 'DECIMAL'}, store_rejects = true); ---- diff --git a/test/sql/copy/csv/rejects/csv_rejects_maximum_line.test b/test/sql/copy/csv/rejects/csv_rejects_maximum_line.test index 7e01be673156..baffdaa46db9 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_maximum_line.test +++ b/test/sql/copy/csv/rejects/csv_rejects_maximum_line.test @@ -9,7 +9,7 @@ require notwindows statement ok SELECT * FROM read_csv( - 'data/csv/rejects/maximum_line/max_10.csv', + '{DATA_DIR}/csv/rejects/maximum_line/max_10.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, max_line_size=10); @@ -30,7 +30,7 @@ loop buffer_size 22 27 statement ok SELECT * FROM read_csv( - 'data/csv/rejects/maximum_line/max_10.csv', + '{DATA_DIR}/csv/rejects/maximum_line/max_10.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=10, buffer_size=${buffer_size}); @@ -50,7 +50,7 @@ endloop # Test over vector size file statement ok SELECT * FROM read_csv( - 'data/csv/rejects/maximum_line/over_vector.csv', + '{DATA_DIR}/csv/rejects/maximum_line/over_vector.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=20); @@ -70,7 +70,7 @@ DROP TABLE reject_scans; # Read Multiple Files statement ok SELECT * FROM read_csv( - 'data/csv/rejects/maximum_line/*.csv', + '{DATA_DIR}/csv/rejects/maximum_line/*.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=10); diff --git a/test/sql/copy/csv/rejects/csv_rejects_read.test b/test/sql/copy/csv/rejects/csv_rejects_read.test index 3fa5e5ea80bb..35cf578e818c 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_read.test +++ b/test/sql/copy/csv/rejects/csv_rejects_read.test @@ -8,7 +8,7 @@ require notwindows query III rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, store_rejects = true, auto_detect=true); ---- @@ -29,7 +29,7 @@ DROP TABLE reject_scans; # Test with multiple columns on the same row query III rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad2.csv', + '{DATA_DIR}/csv/error/mismatch/bad2.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'INTEGER'}, store_rejects = true, auto_detect=false); ---- @@ -51,7 +51,7 @@ DROP TABLE reject_scans; # Test with multiple files query III rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad*.csv', + '{DATA_DIR}/csv/error/mismatch/bad*.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, store_rejects = true, auto_detect=false); ---- @@ -76,7 +76,7 @@ DROP TABLE reject_scans; query III rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad*.csv', + '{DATA_DIR}/csv/error/mismatch/bad*.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, store_rejects = true,rejects_limit=2, ignore_errors=true, auto_detect=false); ---- @@ -100,7 +100,7 @@ DROP TABLE reject_scans; # Try with bigger files query I SELECT SUM(num) FROM read_csv( - 'data/csv/error/mismatch/big_bad.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true, auto_detect=false); ---- @@ -120,7 +120,7 @@ DROP TABLE reject_scans; query I SELECT SUM(num) FROM read_csv( - 'data/csv/error/mismatch/big_bad2.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad2.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true, auto_detect=false) ---- @@ -142,7 +142,7 @@ DROP TABLE reject_scans; # Test with multiple big files query I SELECT SUM(num) FROM read_csv( - 'data/csv/error/mismatch/big_*.csv', + '{DATA_DIR}/csv/error/mismatch/big_*.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true, auto_detect=false); ---- @@ -166,11 +166,11 @@ DROP TABLE reject_scans; query IIII rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/small1.csv', + '{DATA_DIR}/csv/error/mismatch/small1.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true) as L JOIN read_csv( - 'data/csv/error/mismatch/small2.csv', + '{DATA_DIR}/csv/error/mismatch/small2.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true) as R ON L.num = R.num; @@ -181,8 +181,8 @@ ON L.num = R.num; query IIIIIIIIIII SELECT * EXCLUDE (scan_id, file_id) FROM reject_scans ORDER BY ALL; ---- -data/csv/error/mismatch/small1.csv , (empty) (empty) \n 0 1 {'num': 'INTEGER','str': 'VARCHAR'} NULL NULL store_rejects=true -data/csv/error/mismatch/small2.csv , (empty) (empty) \n 0 1 {'num': 'INTEGER','str': 'VARCHAR'} NULL NULL store_rejects=true +{DATA_DIR}/csv/error/mismatch/small1.csv , (empty) (empty) \n 0 1 {'num': 'INTEGER','str': 'VARCHAR'} NULL NULL store_rejects=true +{DATA_DIR}/csv/error/mismatch/small2.csv , (empty) (empty) \n 0 1 {'num': 'INTEGER','str': 'VARCHAR'} NULL NULL store_rejects=true query IIIIIIII SELECT * EXCLUDE (scan_id, file_id) FROM reject_errors ORDER BY ALL; @@ -203,11 +203,11 @@ DROP TABLE reject_scans; query IIII rowsort SELECT * FROM read_csv( - 'data/csv/error/mismatch/small1.csv', + '{DATA_DIR}/csv/error/mismatch/small1.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true) as L JOIN read_csv( - 'data/csv/error/mismatch/small2.csv', + '{DATA_DIR}/csv/error/mismatch/small2.csv', columns = {'num': 'INTEGER', 'str': 'VARCHAR'}, store_rejects = true, rejects_limit=1) as R ON L.num = R.num; @@ -231,7 +231,7 @@ statement ok DROP TABLE reject_scans; query IIIII rowsort -FROM read_csv('data/csv/rejects/dr_who.csv', columns={ 'date': 'DATE', 'datetime': 'TIMESTAMPTZ', 'time': 'TIME', 'timestamp': 'TIMESTAMP', 'time_tz': 'TIMETZ' }, store_rejects=true); +FROM read_csv('{DATA_DIR}/csv/rejects/dr_who.csv', columns={ 'date': 'DATE', 'datetime': 'TIMESTAMPTZ', 'time': 'TIME', 'timestamp': 'TIMESTAMP', 'time_tz': 'TIMETZ' }, store_rejects=true); ---- @@ -253,7 +253,7 @@ DROP TABLE reject_scans; # Test rejects table with comments statement ok -FROM read_csv('data/csv/comments/error.csv', store_rejects = true, comment = '#'); +FROM read_csv('{DATA_DIR}/csv/comments/error.csv', store_rejects = true, comment = '#'); query IIIIIIII SELECT * EXCLUDE (scan_id, file_id) FROM reject_errors ORDER BY column_name; @@ -270,7 +270,7 @@ statement ok DROP TABLE reject_scans; query II -FROM read_csv('data/csv/error.csv', store_rejects=1, strict_mode=True); +FROM read_csv('{DATA_DIR}/csv/error.csv', store_rejects=1, strict_mode=True); ---- true false @@ -284,4 +284,4 @@ statement ok DROP TABLE reject_errors; statement ok -DROP TABLE reject_scans; \ No newline at end of file +DROP TABLE reject_scans; diff --git a/test/sql/copy/csv/rejects/csv_rejects_two_tables.test b/test/sql/copy/csv/rejects/csv_rejects_two_tables.test index 962ab36a3996..c2031198793d 100644 --- a/test/sql/copy/csv/rejects/csv_rejects_two_tables.test +++ b/test/sql/copy/csv/rejects/csv_rejects_two_tables.test @@ -9,7 +9,7 @@ require notwindows # Ensure that we can get the schema if we reduce the sample size and ignore errors query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, store_rejects=true); ---- @@ -19,8 +19,8 @@ BIGINT VARCHAR 11044 11044 2 query IIIIIIIIIIII SELECT * EXCLUDE (scan_id) FROM reject_scans order by all; ---- -0 data/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL sample_size=1, store_rejects=true -1 data/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL sample_size=1, store_rejects=true +0 {DATA_DIR}/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL sample_size=1, store_rejects=true +1 {DATA_DIR}/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL sample_size=1, store_rejects=true query IIIIIIIII SELECT * EXCLUDE (scan_id) FROM reject_errors order by all; @@ -33,7 +33,7 @@ SELECT * EXCLUDE (scan_id) FROM reject_errors order by all; # Test giving the name of errors table statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 'rejects_errors_2'); ---- @@ -44,7 +44,7 @@ drop table reject_scans; query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 'rejects_errors_2' ); @@ -54,8 +54,8 @@ BIGINT VARCHAR 11044 11044 2 query IIIIIIIIIIII SELECT * EXCLUDE (scan_id) FROM reject_scans order by all; ---- -0 data/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_table='rejects_errors_2', sample_size=1 -1 data/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_table='rejects_errors_2', sample_size=1 +0 {DATA_DIR}/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_table='rejects_errors_2', sample_size=1 +1 {DATA_DIR}/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_table='rejects_errors_2', sample_size=1 query IIIIIIIII SELECT * EXCLUDE (scan_id) FROM rejects_errors_2 order by all; @@ -71,7 +71,7 @@ drop table reject_errors; # Test giving the name of scans table query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_2'); ---- @@ -80,8 +80,8 @@ BIGINT VARCHAR 11044 11044 2 query IIIIIIIIIIII SELECT * EXCLUDE (scan_id) FROM rejects_scan_2 order by all; ---- -0 data/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_2', sample_size=1 -1 data/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_2', sample_size=1 +0 {DATA_DIR}/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_2', sample_size=1 +1 {DATA_DIR}/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_2', sample_size=1 query IIIIIIIII SELECT * EXCLUDE (scan_id) FROM reject_errors order by all; @@ -94,7 +94,7 @@ SELECT * EXCLUDE (scan_id) FROM reject_errors order by all; # Test giving the name of both tables query IIIII SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_3', rejects_table = 'rejects_errors_3' @@ -106,8 +106,8 @@ query IIIIIIIIIIII SELECT * EXCLUDE (scan_id) FROM rejects_scan_3 order by all; ---- -0 data/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_3', rejects_table='rejects_errors_3', sample_size=1 -1 data/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_3', rejects_table='rejects_errors_3', sample_size=1 +0 {DATA_DIR}/csv/error/mismatch/big_bad.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_3', rejects_table='rejects_errors_3', sample_size=1 +1 {DATA_DIR}/csv/error/mismatch/big_bad2.csv , (empty) (empty) \n 0 0 {'column0': 'BIGINT','column1': 'VARCHAR'} NULL NULL rejects_scan='rejects_scan_3', rejects_table='rejects_errors_3', sample_size=1 query IIIIIIIII SELECT * EXCLUDE (scan_id) FROM rejects_errors_3 order by all; @@ -130,7 +130,7 @@ create temporary table t (a integer); statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 't' ); @@ -141,7 +141,7 @@ Reject Error Table name "t" is already in use. Either drop the used name(s), or statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 't' ); @@ -150,7 +150,7 @@ Reject Scan Table name "t" is already in use. Either drop the used name(s), or g statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 't', rejects_scan = 't' @@ -161,7 +161,7 @@ The names of the rejects scan and rejects error tables can't be the same. Use di # Test giving the name of the tables with store_rejects and/or ignore_errors set to false throws statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_3', rejects_table = 'rejects_errors_3', @@ -172,7 +172,7 @@ STORE_REJECTS option is only supported when IGNORE_ERRORS is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, store_rejects = true, ignore_errors = false @@ -182,7 +182,7 @@ STORE_REJECTS option is only supported when IGNORE_ERRORS is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 'rejects_errors_3', ignore_errors = false @@ -192,7 +192,7 @@ STORE_REJECTS option is only supported when IGNORE_ERRORS is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_3', ignore_errors = false @@ -202,7 +202,7 @@ STORE_REJECTS option is only supported when IGNORE_ERRORS is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_3', rejects_table = 'rejects_errors_3', @@ -213,7 +213,7 @@ REJECTS_TABLE option is only supported when store_rejects is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_table = 'rejects_errors_3', store_rejects = false @@ -223,7 +223,7 @@ REJECTS_TABLE option is only supported when store_rejects is not manually set to statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'rejects_scan_3', store_rejects = false @@ -234,7 +234,7 @@ REJECTS_SCAN option is only supported when store_rejects is not manually set to # Add a test where both tables have the same name (This should fail, because they both have the same name) statement error SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'same_name_because_why_not', rejects_table = 'same_name_because_why_not', @@ -246,7 +246,7 @@ The names of the rejects scan and rejects error tables can't be the same. Use di # This hopefully doesn't fail because the names don't get registered if they fail. statement ok SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*), SUM(column0), MAX(len(column1)) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=1, rejects_scan = 'same_name_because_why_not', rejects_table = 'same_name_because_why_not_2', diff --git a/test/sql/copy/csv/rejects/csv_unquoted_rejects.test b/test/sql/copy/csv/rejects/csv_unquoted_rejects.test index c61b961318a9..81bb9298ea57 100644 --- a/test/sql/copy/csv/rejects/csv_unquoted_rejects.test +++ b/test/sql/copy/csv/rejects/csv_unquoted_rejects.test @@ -9,7 +9,7 @@ require notwindows query II SELECT * FROM read_csv( - 'data/csv/rejects/unquoted/basic.csv', + '{DATA_DIR}/csv/rejects/unquoted/basic.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, quote = '"', escape = '"', strict_mode=true); ---- @@ -24,7 +24,7 @@ query IIIIIIII rowsort SELECT regexp_replace(file_path, '\\', '/', 'g'), line, column_idx, column_name, error_type, csv_line,line_byte_position, byte_position FROM reject_scans inner join reject_errors on (reject_scans.scan_id = reject_errors.scan_id and reject_scans.file_id = reject_errors.file_id); ---- -data/csv/rejects/unquoted/basic.csv 5 1 a UNQUOTED VALUE "blaaaaaaaaaaaaaa"bla,4 29 29 +{DATA_DIR}/csv/rejects/unquoted/basic.csv 5 1 a UNQUOTED VALUE "blaaaaaaaaaaaaaa"bla,4 29 29 statement ok DROP TABLE reject_scans; @@ -34,7 +34,7 @@ DROP TABLE reject_errors; query II SELECT * FROM read_csv( - 'data/csv/rejects/unquoted/unquoted_new_line.csv', + '{DATA_DIR}/csv/rejects/unquoted/unquoted_new_line.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, store_rejects=true, auto_detect=false, header = 1, quote = '"', escape = '"', strict_mode=true); ---- @@ -49,7 +49,7 @@ query IIIIIII rowsort SELECT regexp_replace(file_path, '\\', '/', 'g'), line, column_idx, column_name, error_type, line_byte_position,byte_position FROM reject_scans inner join reject_errors on (reject_scans.scan_id = reject_errors.scan_id and reject_scans.file_id = reject_errors.file_id); ---- -data/csv/rejects/unquoted/unquoted_new_line.csv 5 1 a UNQUOTED VALUE 29 29 +{DATA_DIR}/csv/rejects/unquoted/unquoted_new_line.csv 5 1 a UNQUOTED VALUE 29 29 statement ok DROP TABLE reject_scans; @@ -59,7 +59,7 @@ DROP TABLE reject_errors; query I SELECT * FROM read_csv( - 'data/csv/rejects/unquoted/unquoted_last_value.csv', + '{DATA_DIR}/csv/rejects/unquoted/unquoted_last_value.csv', columns = {'a': 'VARCHAR'}, store_rejects=true, auto_detect=false, header = 0, quote = '"', escape = '"', strict_mode = true); ---- @@ -72,7 +72,7 @@ query IIIIIIII rowsort SELECT regexp_replace(file_path, '\\', '/', 'g'), line, column_idx, column_name, error_type, csv_line,line_byte_position, byte_position FROM reject_scans inner join reject_errors on (reject_scans.scan_id = reject_errors.scan_id and reject_scans.file_id = reject_errors.file_id); ---- -data/csv/rejects/unquoted/unquoted_last_value.csv 5 1 a UNQUOTED VALUE "bla 38 38 +{DATA_DIR}/csv/rejects/unquoted/unquoted_last_value.csv 5 1 a UNQUOTED VALUE "bla 38 38 statement ok DROP TABLE reject_scans; @@ -84,7 +84,7 @@ loop buffer_size 35 40 query II SELECT * FROM read_csv( - 'data/csv/rejects/unquoted/basic.csv', + '{DATA_DIR}/csv/rejects/unquoted/basic.csv', columns = {'a': 'VARCHAR', 'b': 'INTEGER'}, buffer_size=${buffer_size}, store_rejects=true, auto_detect=false, header = 1, quote = '"', escape = '"', strict_mode=true); @@ -101,7 +101,7 @@ query IIIIIIII rowsort SELECT regexp_replace(file_path, '\\', '/', 'g'), line, column_idx, column_name, error_type, csv_line,line_byte_position, byte_position FROM reject_scans inner join reject_errors on (reject_scans.scan_id = reject_errors.scan_id and reject_scans.file_id = reject_errors.file_id); ---- -data/csv/rejects/unquoted/basic.csv 5 1 a UNQUOTED VALUE "blaaaaaaaaaaaaaa"bla,4 29 29 +{DATA_DIR}/csv/rejects/unquoted/basic.csv 5 1 a UNQUOTED VALUE "blaaaaaaaaaaaaaa"bla,4 29 29 statement ok DROP TABLE reject_scans; @@ -109,4 +109,4 @@ DROP TABLE reject_scans; statement ok DROP TABLE reject_errors; -endloop \ No newline at end of file +endloop diff --git a/test/sql/copy/csv/rejects/test_invalid_parameters.test b/test/sql/copy/csv/rejects/test_invalid_parameters.test index 61624ccb09a9..7e28960ae82e 100644 --- a/test/sql/copy/csv/rejects/test_invalid_parameters.test +++ b/test/sql/copy/csv/rejects/test_invalid_parameters.test @@ -9,7 +9,7 @@ require notwindows # Test invalid arguments statement error SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, ignore_errors=false, store_rejects=true @@ -19,7 +19,7 @@ STORE_REJECTS option is only supported when IGNORE_ERRORS is not manually set to statement error SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, ignore_errors=true, rejects_table='') @@ -28,7 +28,7 @@ REJECTS_TABLE option cannot be empty statement error SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, ignore_errors=true, rejects_table='csv_rejects_table', @@ -38,7 +38,7 @@ UNION_BY_NAME is set to true statement error SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, ignore_errors=true, rejects_limit=10) @@ -47,7 +47,7 @@ REJECTS_LIMIT option is only supported when REJECTS_TABLE is set to a table name statement error SELECT * FROM read_csv( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', columns = {'col0': 'INTEGER', 'col1': 'INTEGER', 'col2': 'VARCHAR'}, ignore_errors=true, rejects_table='csv_rejects_table', @@ -58,7 +58,7 @@ REJECTS_LIMIT: cannot be negative # Test invalid arguments statement error SELECT * FROM read_csv_auto( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', ignore_errors=false, rejects_table='csv_rejects_table' ) @@ -67,7 +67,7 @@ option is only supported when IGNORE_ERRORS is not manually set to false statement error SELECT * FROM read_csv_auto( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', ignore_errors=true, rejects_table='') ---- @@ -75,7 +75,7 @@ REJECTS_TABLE option cannot be empty statement error SELECT * FROM read_csv_auto( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', ignore_errors=true, rejects_table='csv_rejects_table', union_by_name=true) @@ -84,7 +84,7 @@ UNION_BY_NAME is set to true statement error SELECT * FROM read_csv_auto( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', ignore_errors=true, rejects_limit=10) ---- @@ -92,7 +92,7 @@ REJECTS_LIMIT option is only supported when REJECTS_TABLE is set to a table name statement error SELECT * FROM read_csv_auto( - 'data/csv/error/mismatch/bad.csv', + '{DATA_DIR}/csv/error/mismatch/bad.csv', ignore_errors=true, rejects_table='csv_rejects_table', rejects_limit=-1) @@ -102,7 +102,7 @@ REJECTS_LIMIT: cannot be negative query III SELECT typeof(first(column0)), typeof(first(column1)), COUNT(*) FROM read_csv_auto( - 'data/csv/error/mismatch/big_bad*.csv', + '{DATA_DIR}/csv/error/mismatch/big_bad*.csv', sample_size=3000, rejects_table='csv_rejects_table', ignore_errors=true, header = 0); diff --git a/test/sql/copy/csv/rejects/test_invalid_utf_rejects.test b/test/sql/copy/csv/rejects/test_invalid_utf_rejects.test index 24948a67e581..be39a9f29cb0 100644 --- a/test/sql/copy/csv/rejects/test_invalid_utf_rejects.test +++ b/test/sql/copy/csv/rejects/test_invalid_utf_rejects.test @@ -8,7 +8,7 @@ require skip_reload require notwindows statement ok -from read_csv('data/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, +from read_csv('{DATA_DIR}/csv/test/invalid_utf_big.csv',columns = {'col1': 'VARCHAR','col2': 'VARCHAR','col3': 'VARCHAR'}, auto_detect=false, header = 0, delim = ',', store_rejects=true) query IIIIIIIII rowsort diff --git a/test/sql/copy/csv/rejects/test_mixed.test b/test/sql/copy/csv/rejects/test_mixed.test index 352d5c3dab98..ecbe526b05d1 100644 --- a/test/sql/copy/csv/rejects/test_mixed.test +++ b/test/sql/copy/csv/rejects/test_mixed.test @@ -9,7 +9,7 @@ require notwindows query III SELECT * FROM read_csv( - 'data/csv/rejects/frankstein/nightmare.csv', + '{DATA_DIR}/csv/rejects/frankstein/nightmare.csv', columns = {'a': 'INTEGER', 'b': 'INTEGER', 'c': 'VARCHAR'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=20, strict_mode=true); ---- diff --git a/test/sql/copy/csv/rejects/test_multiple_errors_same_line.test b/test/sql/copy/csv/rejects/test_multiple_errors_same_line.test index 6a687c4eb552..256dc22bf20b 100644 --- a/test/sql/copy/csv/rejects/test_multiple_errors_same_line.test +++ b/test/sql/copy/csv/rejects/test_multiple_errors_same_line.test @@ -8,7 +8,7 @@ require skip_reload require notwindows query IIII -FROM read_csv('data/csv/rejects/multiple_errors/cast_and_more_col.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/cast_and_more_col.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, strict_mode=True); ---- @@ -31,7 +31,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/multiple_cast_implicit.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/multiple_cast_implicit.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, strict_mode=True); ---- @@ -51,7 +51,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/multiple_casts_flush.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/multiple_casts_flush.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'tomorrow': 'DATE'}, store_rejects = true, auto_detect=false, header = 1); ---- @@ -71,7 +71,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/multiple_casts_mixed.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/multiple_casts_mixed.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1); ---- @@ -92,7 +92,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/cast_and_less_col.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/cast_and_less_col.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1); ---- @@ -115,7 +115,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/cast_and_maxline.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/cast_and_maxline.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40); ---- @@ -135,7 +135,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/less_col_and_max_line.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/less_col_and_max_line.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40); ---- @@ -156,7 +156,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/more_col_and_max_line.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/more_col_and_max_line.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=True); ---- @@ -177,7 +177,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/unquoted_cast.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/unquoted_cast.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- @@ -198,7 +198,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/unquoted_less.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/unquoted_less.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- @@ -218,7 +218,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/unquoted_maxline.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/unquoted_maxline.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- @@ -239,7 +239,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/unquoted_more.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/unquoted_more.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- @@ -259,7 +259,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/invalid_utf_cast.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/invalid_utf_cast.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40); ---- @@ -278,7 +278,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/invalid_utf_less.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/invalid_utf_less.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40); ---- @@ -298,7 +298,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/invalid_utf_max_line.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/invalid_utf_max_line.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=True); ---- @@ -319,7 +319,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/invalid_utf_more.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/invalid_utf_more.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=True); ---- @@ -339,7 +339,7 @@ statement ok DROP TABLE reject_scans; query IIIII -FROM read_csv('data/csv/rejects/multiple_errors/invalid_utf_unquoted.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/invalid_utf_unquoted.csv', columns = {'name': 'VARCHAR', 'last_name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- @@ -359,7 +359,7 @@ statement ok DROP TABLE reject_scans; query IIII -FROM read_csv('data/csv/rejects/multiple_errors/multiple_errors.csv', +FROM read_csv('{DATA_DIR}/csv/rejects/multiple_errors/multiple_errors.csv', columns = {'name': 'VARCHAR', 'age': 'INTEGER', 'current_day': 'DATE', 'barks': 'INTEGER'}, store_rejects = true, auto_detect=false, header = 1, max_line_size=40, strict_mode=true); ---- diff --git a/test/sql/copy/csv/relaxed_quotes.test b/test/sql/copy/csv/relaxed_quotes.test index 6bdfa8ede56b..5f53b81c0159 100644 --- a/test/sql/copy/csv/relaxed_quotes.test +++ b/test/sql/copy/csv/relaxed_quotes.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIII -from read_csv('data/csv/unescaped_quotes/stops.csv'); +from read_csv('{DATA_DIR}/csv/unescaped_quotes/stops.csv'); ---- "de:08115:1303:2:1" "Weil der Stadt Bahnhof" "48.7551432936956" "8.87272294455349" "de:08115:1303:2:4" "Weil der Stadt Bahnhof" "48.7542193911232" "8.87191446079767" @@ -19,7 +19,7 @@ from read_csv('data/csv/unescaped_quotes/stops.csv'); "de:08317:12007:2:1" "Lahr Schlüssel "Vis-à-Vis Bus"" "48.3411985847104" "7.87932997062448" query IIII -from read_csv('data/csv/unescaped_quotes/stops.csv', strict_mode=false); +from read_csv('{DATA_DIR}/csv/unescaped_quotes/stops.csv', strict_mode=false); ---- de:08115:1303:2:1 Weil der Stadt Bahnhof 48.7551432936956 8.87272294455349 de:08115:1303:2:4 Weil der Stadt Bahnhof 48.7542193911232 8.87191446079767 @@ -32,7 +32,7 @@ de:08317:12007:2:1 Lahr Schlüssel "Vis-à-Vis Bus" more text 48.3411985847104 7 de:08317:12007:2:1 Lahr Schlüssel "Vis-à-Vis Bus" 48.3411985847104 7.87932997062448 query II -from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', escape = '"', strict_mode=false); +from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote.csv', escape = '"', strict_mode=false); ---- 1 pedro pdet holanda 2 pedro pdet holanda @@ -42,7 +42,7 @@ from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', escape = '"', str 6 pedro pdet holanda query II -from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false, escape = '', quote = '"', delim = ';'); +from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false, escape = '', quote = '"', delim = ';'); ---- 1 pedro "pdet" holanda 2 pedro "pdet" holanda @@ -55,7 +55,7 @@ from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false loop buffer_size 30 35 query II -from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false, escape = '', buffer_size = ${buffer_size}, header = 0) +from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false, escape = '', buffer_size = ${buffer_size}, header = 0) ---- 1 pedro "pdet" holanda 2 pedro "pdet" holanda @@ -67,7 +67,7 @@ from read_csv('data/csv/unescaped_quotes/unescaped_quote.csv', strict_mode=false endloop statement ok -create table t as from read_csv('data/csv/unescaped_quotes/unescaped_quote_new_line.csv', strict_mode=false, header = 0) +create table t as from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote_new_line.csv', strict_mode=false, header = 0) query I select count(*) from t; @@ -78,41 +78,41 @@ statement ok drop table t; statement error -create table t as from read_csv('data/csv/unescaped_quotes/unescaped_quote_new_line_rn.csv', strict_mode=false, buffer_size = 20, header = 0, delim = ';') +create table t as from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote_new_line_rn.csv', strict_mode=false, buffer_size = 20, header = 0, delim = ';') ---- statement ok -create table t as from read_csv('data/csv/unescaped_quotes/unescaped_quote_new_line.csv', strict_mode=false, buffer_size = 30, header = 0, parallel = false) +create table t as from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote_new_line.csv', strict_mode=false, buffer_size = 30, header = 0, parallel = false) statement ok drop table t statement ok -create table t as from read_csv('data/csv/unescaped_quotes/unescaped_quote_new_line_rn.csv', strict_mode=false, buffer_size = 30, header = 0, parallel = false) +create table t as from read_csv('{DATA_DIR}/csv/unescaped_quotes/unescaped_quote_new_line_rn.csv', strict_mode=false, buffer_size = 30, header = 0, parallel = false) query I -FROM read_csv('data/csv/unescaped_quotes/end_quote.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/end_quote.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) ---- Pedro "the" legend Pedro "the query I -FROM read_csv('data/csv/unescaped_quotes/end_quote.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/end_quote.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) ---- Pedro "the" legend Pedro "the query I -FROM read_csv('data/csv/unescaped_quotes/end_quote_2.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/end_quote_2.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) ---- Pedro "the" legend Pedro "the Pedro "the query I -FROM read_csv('data/csv/unescaped_quotes/end_quote_3.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/end_quote_3.csv', columns = {'a':'varchar'}, header = false, quote = '"', strict_mode = false) ---- Pedro "the Pedro "the" legend @@ -120,7 +120,7 @@ Pedro "the # What happens when we mix everything query I -FROM read_csv('data/csv/unescaped_quotes/some_escaped_some_not.csv', columns = {'a':'varchar'}, header = false, quote = '"', escape = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/some_escaped_some_not.csv', columns = {'a':'varchar'}, header = false, quote = '"', escape = '"', strict_mode = false) ---- Pedro "the" legend Pedro the legend @@ -129,7 +129,7 @@ Pedro the # It is not possible to read this with escape set. query I -FROM read_csv('data/csv/unescaped_quotes/end_quote_mixed.csv', columns = {'a':'varchar'}, header = false, quote = '"', escape = '', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/unescaped_quotes/end_quote_mixed.csv', columns = {'a':'varchar'}, header = false, quote = '"', escape = '', strict_mode = false) ---- Pedro "the Pedro ""the"" legend diff --git a/test/sql/copy/csv/struct.test b/test/sql/copy/csv/struct.test index 5a181fa5ff50..0e5ebfd0aaf7 100644 --- a/test/sql/copy/csv/struct.test +++ b/test/sql/copy/csv/struct.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification query I -FROM 'data/csv/struct.csv' +FROM '{DATA_DIR}/csv/struct.csv' ---- {'a': 1, 'b': 2} \ No newline at end of file diff --git a/test/sql/copy/csv/struct_padding.test b/test/sql/copy/csv/struct_padding.test index c64ed409005a..8939d1763839 100644 --- a/test/sql/copy/csv/struct_padding.test +++ b/test/sql/copy/csv/struct_padding.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query I -SELECT * FROM read_csv('data/csv/test/struct_padding.csv', columns={'col': 'STRUCT(val VARCHAR)'}) ORDER BY 1; +SELECT * FROM read_csv('{DATA_DIR}/csv/test/struct_padding.csv', columns={'col': 'STRUCT(val VARCHAR)'}) ORDER BY 1; ---- {'val': ''} {'val': ''} diff --git a/test/sql/copy/csv/test_11403.test b/test/sql/copy/csv/test_11403.test index 3a49d55342f0..1b542cd7884c 100644 --- a/test/sql/copy/csv/test_11403.test +++ b/test/sql/copy/csv/test_11403.test @@ -9,11 +9,11 @@ PRAGMA enable_verification require notwindows query I -SELECT COUNT(*) FROM 'data/csv/quoted_newline.csv' +SELECT COUNT(*) FROM '{DATA_DIR}/csv/quoted_newline.csv' ---- 1 query I -SELECT quote FROM sniff_csv('data/csv/quoted_newline.csv') +SELECT quote FROM sniff_csv('{DATA_DIR}/csv/quoted_newline.csv') ---- " \ No newline at end of file diff --git a/test/sql/copy/csv/test_11840.test b/test/sql/copy/csv/test_11840.test index b84462d7c6f4..56edff2ee986 100644 --- a/test/sql/copy/csv/test_11840.test +++ b/test/sql/copy/csv/test_11840.test @@ -6,13 +6,13 @@ statement ok PRAGMA enable_verification query I -FROM 'data/csv/date_specificity.csv' +FROM '{DATA_DIR}/csv/date_specificity.csv' ---- 0.00 12/17/2019 query I -select d FROM 'data/csv/special_date.csv' +select d FROM '{DATA_DIR}/csv/special_date.csv' ---- 2000-01-01 2002-02-02 diff --git a/test/sql/copy/csv/test_12596.test b/test/sql/copy/csv/test_12596.test index c29fad86e95e..350f9f538ab6 100644 --- a/test/sql/copy/csv/test_12596.test +++ b/test/sql/copy/csv/test_12596.test @@ -6,19 +6,19 @@ statement ok PRAGMA enable_verification query III -from read_csv('data/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true); +from read_csv('{DATA_DIR}/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true); ---- 1 2 NULL 3 4 NULL query III -from read_csv('data/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true, parallel = false, auto_detect = false); +from read_csv('{DATA_DIR}/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true, parallel = false, auto_detect = false); ---- 1 2 NULL 3 4 NULL query III -from read_csv('data/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true, parallel = false); +from read_csv('{DATA_DIR}/csv/bug_12596.csv', skip=1, delim=',', header=false, columns={'c1': 'INTEGER', 'c2': 'INTEGER', 'column2': 'VARCHAR'}, null_padding = true, parallel = false); ---- 1 2 NULL 3 4 NULL \ No newline at end of file diff --git a/test/sql/copy/csv/test_15211.test b/test/sql/copy/csv/test_15211.test index fa23693938d0..6215339c5e33 100644 --- a/test/sql/copy/csv/test_15211.test +++ b/test/sql/copy/csv/test_15211.test @@ -9,7 +9,7 @@ statement ok set threads=1 query I -select count(*) FROM read_csv(['data/csv/drug_exposure.csv', 'data/csv/drug_exposure.csv','data/csv/drug_exposure.csv', 'data/csv/drug_exposure.csv','data/csv/drug_exposure.csv', 'data/csv/drug_exposure.csv','data/csv/drug_exposure.csv', 'data/csv/drug_exposure.csv','data/csv/drug_exposure.csv'], buffer_size = 500) +select count(*) FROM read_csv(['{DATA_DIR}/csv/drug_exposure.csv', '{DATA_DIR}/csv/drug_exposure.csv','{DATA_DIR}/csv/drug_exposure.csv', '{DATA_DIR}/csv/drug_exposure.csv','{DATA_DIR}/csv/drug_exposure.csv', '{DATA_DIR}/csv/drug_exposure.csv','{DATA_DIR}/csv/drug_exposure.csv', '{DATA_DIR}/csv/drug_exposure.csv','{DATA_DIR}/csv/drug_exposure.csv'], buffer_size = 500) ---- 37017 diff --git a/test/sql/copy/csv/test_15473.test b/test/sql/copy/csv/test_15473.test index da9bd85a6a6e..0c6c198b6b41 100644 --- a/test/sql/copy/csv/test_15473.test +++ b/test/sql/copy/csv/test_15473.test @@ -6,27 +6,27 @@ statement ok PRAGMA enable_verification query I -select columns from sniff_csv('data/csv/15473.csv'); +select columns from sniff_csv('{DATA_DIR}/csv/15473.csv'); ---- [{'name': name, 'type': VARCHAR}, {'name': Date, 'type': VARCHAR}, {'name': height, 'type': BIGINT}] query I -select columns from sniff_csv('data/csv/15473_time.csv'); +select columns from sniff_csv('{DATA_DIR}/csv/15473_time.csv'); ---- [{'name': a, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/15473_timestamp.csv'); +select columns from sniff_csv('{DATA_DIR}/csv/15473_timestamp.csv'); ---- [{'name': a, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/15473_date_timestamp.csv'); +select columns from sniff_csv('{DATA_DIR}/csv/15473_date_timestamp.csv'); ---- [{'name': a, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/15473_time_timestamp.csv'); +select columns from sniff_csv('{DATA_DIR}/csv/15473_time_timestamp.csv'); ---- [{'name': a, 'type': VARCHAR}] @@ -39,9 +39,9 @@ statement ok insert into t1 values ('1','1','1'); statement ok -COPY t1 TO '__TEST_DIR__/date_int.csv' (FORMAT CSV); +COPY t1 TO '{TEMP_DIR}/date_int.csv' (FORMAT CSV); query I -select columns from sniff_csv('__TEST_DIR__/date_int.csv'); +select columns from sniff_csv('{TEMP_DIR}/date_int.csv'); ---- [{'name': a, 'type': VARCHAR}, {'name': b, 'type': VARCHAR}, {'name': c, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/test_8890.test b/test/sql/copy/csv/test_8890.test index 73cdcdecf72b..0d9145ac5248 100644 --- a/test/sql/copy/csv/test_8890.test +++ b/test/sql/copy/csv/test_8890.test @@ -11,7 +11,7 @@ select field2, min(datum) as firstseen, max(datum) as lastseen -from read_csv('data/csv/hebere.csv.gz', +from read_csv('{DATA_DIR}/csv/hebere.csv.gz', delim='\t', header=0, columns={field1: varchar, field2: varchar, datum: varchar}, diff --git a/test/sql/copy/csv/test_9005.test b/test/sql/copy/csv/test_9005.test index de9a4e4216c7..ca1add98c4b8 100644 --- a/test/sql/copy/csv/test_9005.test +++ b/test/sql/copy/csv/test_9005.test @@ -9,8 +9,8 @@ PRAGMA enable_verification require notwindows query IIII -SELECT * FROM read_csv_auto('data/csv/bug_9005/teste*.csv',filename=true,union_by_name=True) where filename='data/csv/bug_9005/teste1.csv'; +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/bug_9005/teste*.csv',filename=true,union_by_name=True) where filename='{DATA_DIR}/csv/bug_9005/teste1.csv'; ---- -1 Ricardo 1.5 data/csv/bug_9005/teste1.csv -2 Jose 2.0 data/csv/bug_9005/teste1.csv +1 Ricardo 1.5 {DATA_DIR}/csv/bug_9005/teste1.csv +2 Jose 2.0 {DATA_DIR}/csv/bug_9005/teste1.csv diff --git a/test/sql/copy/csv/test_afl.test b/test/sql/copy/csv/test_afl.test index 5e75f606b2d4..d6d3fb98affc 100644 --- a/test/sql/copy/csv/test_afl.test +++ b/test/sql/copy/csv/test_afl.test @@ -6,102 +6,102 @@ statement ok PRAGMA enable_verification statement maybe -FROM read_csv('data/csv/afl/1.csv', force_not_null=012%0); +FROM read_csv('{DATA_DIR}/csv/afl/1.csv', force_not_null=012%0); ---- statement maybe -FROM read_csv('data/csv/afl/2.csv', max_line_size=-9151315542319464311); +FROM read_csv('{DATA_DIR}/csv/afl/2.csv', max_line_size=-9151315542319464311); ---- statement maybe -FROM read_csv('data/csv/afl/3.csv', max_line_size=-1003718790012071149, ignore_errors=true); +FROM read_csv('{DATA_DIR}/csv/afl/3.csv', max_line_size=-1003718790012071149, ignore_errors=true); ---- statement maybe -FROM read_csv('data/csv/afl/4.csv', max_line_size=-432344490485710328, all_varchar=false); +FROM read_csv('{DATA_DIR}/csv/afl/4.csv', max_line_size=-432344490485710328, all_varchar=false); ---- statement maybe -FROM read_csv('data/csv/afl/5.csv', max_line_size=-68088296696312078); +FROM read_csv('{DATA_DIR}/csv/afl/5.csv', max_line_size=-68088296696312078); ---- statement maybe -FROM read_csv('data/csv/afl/6.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/6.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/7.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/7.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/8.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/8.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/9.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/9.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/10.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/10.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/11.csv', max_line_size=-7987305294711008699, rejects_limit=42); +FROM read_csv('{DATA_DIR}/csv/afl/11.csv', max_line_size=-7987305294711008699, rejects_limit=42); ---- statement maybe -FROM read_csv('data/csv/afl/12.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/12.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/13.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/13.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/14.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/14.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/15.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/15.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/16.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/16.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/17.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/17.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/18.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/18.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/19.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/19.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/20.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/20.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/21.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/21.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/22.csv', rejects_table='d'); +FROM read_csv('{DATA_DIR}/csv/afl/22.csv', rejects_table='d'); ---- statement maybe -FROM read_csv('data/csv/afl/23.csv', buffer_size=42); +FROM read_csv('{DATA_DIR}/csv/afl/23.csv', buffer_size=42); ---- statement maybe -FROM read_csv('data/csv/afl/24.csv', buffer_size=26, delim=';', header=true, allow_quoted_nulls=true, allow_quoted_nulls=true); +FROM read_csv('{DATA_DIR}/csv/afl/24.csv', buffer_size=26, delim=';', header=true, allow_quoted_nulls=true, allow_quoted_nulls=true); ---- statement maybe -FROM read_csv('data/csv/afl/25.csv', buffer_size=734771105608237082, max_line_size=-8825501086615982989, allow_quoted_nulls=true); +FROM read_csv('{DATA_DIR}/csv/afl/25.csv', buffer_size=734771105608237082, max_line_size=-8825501086615982989, allow_quoted_nulls=true); ---- diff --git a/test/sql/copy/csv/test_all_quotes.test b/test/sql/copy/csv/test_all_quotes.test index 911ca000f420..a6b579b72fbd 100644 --- a/test/sql/copy/csv/test_all_quotes.test +++ b/test/sql/copy/csv/test_all_quotes.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification query I -SELECT quote FROM sniff_csv('data/csv/all_quotes.csv', ignore_errors = 1) +SELECT quote FROM sniff_csv('{DATA_DIR}/csv/all_quotes.csv', ignore_errors = 1) ---- " \ No newline at end of file diff --git a/test/sql/copy/csv/test_allow_quoted_nulls_option.test b/test/sql/copy/csv/test_allow_quoted_nulls_option.test index a9100c43bf7f..31202d7b18ac 100644 --- a/test/sql/copy/csv/test_allow_quoted_nulls_option.test +++ b/test/sql/copy/csv/test_allow_quoted_nulls_option.test @@ -7,21 +7,21 @@ PRAGMA enable_verification # allow_quoted_nulls set as true by default query II -SELECT * FROM read_csv_auto('data/csv/null_comparison.csv'); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/null_comparison.csv'); ---- 42 NULL NULL NULL 88 a query II -SELECT * FROM read_csv_auto('data/csv/null_comparison.csv', allow_quoted_nulls=False); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/null_comparison.csv', allow_quoted_nulls=False); ---- 42 (empty) NULL NULL 88 a query II -SELECT * FROM read_csv_auto('data/csv/null_comparison.csv', allow_quoted_nulls=True); +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/null_comparison.csv', allow_quoted_nulls=True); ---- 42 NULL NULL NULL diff --git a/test/sql/copy/csv/test_auto_date.test b/test/sql/copy/csv/test_auto_date.test index 03e7f27eab53..04433d88ba30 100644 --- a/test/sql/copy/csv/test_auto_date.test +++ b/test/sql/copy/csv/test_auto_date.test @@ -10,10 +10,10 @@ CREATE TABLE date_tests (a DATE) # -- Working cases (AUTO is Auto-Detected): statement ok -copy date_tests from 'data/csv/auto_date/date_example_1.csv'; +copy date_tests from '{DATA_DIR}/csv/auto_date/date_example_1.csv'; statement ok -copy date_tests from 'data/csv/auto_date/date_example_2.csv'; +copy date_tests from '{DATA_DIR}/csv/auto_date/date_example_2.csv'; query I FROM date_tests @@ -34,12 +34,12 @@ CREATE TABLE date_tests (a DATE) # -- Failed cases (AUTO is Set By User) # -- file's date format is YYYY-MM-DD statement ok -copy date_tests from 'data/csv/auto_date/date_example_1.csv' WITH (dateformat 'AUTO'); +copy date_tests from '{DATA_DIR}/csv/auto_date/date_example_1.csv' WITH (dateformat 'AUTO'); # -- file's date format is %m/%d/%y statement ok -copy date_tests from 'data/csv/auto_date/date_example_2.csv' WITH (dateformat 'AUTO'); +copy date_tests from '{DATA_DIR}/csv/auto_date/date_example_2.csv' WITH (dateformat 'AUTO'); query I FROM date_tests @@ -71,7 +71,7 @@ CREATE TABLE stg_device_metadata_with_dates ( statement ok COPY stg_device_metadata_with_dates - FROM 'data/csv/auto_date/device_metadata_1.csv' WITH ( + FROM '{DATA_DIR}/csv/auto_date/device_metadata_1.csv' WITH ( delimiter ',', skip '1', header 'false' diff --git a/test/sql/copy/csv/test_auto_detection_headers.test b/test/sql/copy/csv/test_auto_detection_headers.test index b842517ae622..253a0a6319bf 100644 --- a/test/sql/copy/csv/test_auto_detection_headers.test +++ b/test/sql/copy/csv/test_auto_detection_headers.test @@ -6,27 +6,27 @@ statement ok PRAGMA enable_verification query I -from read_csv('data/csv/headers/undetected_type.csv', delim = ';') +from read_csv('{DATA_DIR}/csv/headers/undetected_type.csv', delim = ';') ---- 68,527.00 query I -from 'data/csv/headers/all_varchar.csv' +from '{DATA_DIR}/csv/headers/all_varchar.csv' ---- Pedro query II -from 'data/csv/headers/single_line.csv' +from '{DATA_DIR}/csv/headers/single_line.csv' ---- query I -from 'data/csv/headers/borked_type.csv' +from '{DATA_DIR}/csv/headers/borked_type.csv' ---- 02/01/2019 08//01/2019 # This should work even with only one line query I -from 'data/csv/headers/integer.csv' +from '{DATA_DIR}/csv/headers/integer.csv' ---- 32 \ No newline at end of file diff --git a/test/sql/copy/csv/test_bgzf_read.test b/test/sql/copy/csv/test_bgzf_read.test index cbd8387882c5..78b91307e799 100644 --- a/test/sql/copy/csv/test_bgzf_read.test +++ b/test/sql/copy/csv/test_bgzf_read.test @@ -7,7 +7,7 @@ PRAGMA enable_verification statement ok -CREATE TABLE bgzf AS SELECT * FROM read_csv_auto('data/csv/test/bgzf.gz'); +CREATE TABLE bgzf AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/bgzf.gz'); query I SELECT COUNT(*) FROM bgzf; @@ -15,7 +15,7 @@ SELECT COUNT(*) FROM bgzf; 7 statement ok -CREATE TABLE concat AS SELECT * FROM read_csv_auto('data/csv/test/concat.gz'); +CREATE TABLE concat AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/concat.gz'); query I SELECT COUNT(*) FROM concat; diff --git a/test/sql/copy/csv/test_big_header.test b/test/sql/copy/csv/test_big_header.test index 0efc5fa6100d..0d5443361114 100644 --- a/test/sql/copy/csv/test_big_header.test +++ b/test/sql/copy/csv/test_big_header.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE test (foo INTEGER, bar VARCHAR(10), baz VARCHAR(10), bam VARCHAR(10)); query I -COPY test FROM 'data/csv/test/big_header.csv' (DELIMITER ' ', HEADER false, SKIP 3); +COPY test FROM '{DATA_DIR}/csv/test/big_header.csv' (DELIMITER ' ', HEADER false, SKIP 3); ---- 3 @@ -19,7 +19,7 @@ SELECT COUNT(bam) FROM test WHERE bam = '!'; 3 query I -INSERT INTO test SELECT * FROM read_csv('data/csv/test/big_header.csv', HEADER=FALSE, SKIP=3, DELIM=' ', columns=STRUCT_PACK(foo := 'INTEGER', bar := 'VARCHAR', baz := 'VARCHAR', bam := 'VARCHAR')); +INSERT INTO test SELECT * FROM read_csv('{DATA_DIR}/csv/test/big_header.csv', HEADER=FALSE, SKIP=3, DELIM=' ', columns=STRUCT_PACK(foo := 'INTEGER', bar := 'VARCHAR', baz := 'VARCHAR', bam := 'VARCHAR')); ---- 3 @@ -29,7 +29,7 @@ SELECT COUNT(bam) FROM test WHERE bam = '!'; 6 query I -INSERT INTO test SELECT * FROM read_csv_auto('data/csv/test/big_header.csv', HEADER=FALSE, SKIP=3, DELIM=' '); +INSERT INTO test SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/big_header.csv', HEADER=FALSE, SKIP=3, DELIM=' '); ---- 3 diff --git a/test/sql/copy/csv/test_blob.test b/test/sql/copy/csv/test_blob.test index 0fba00bcef70..071ba24f0c66 100644 --- a/test/sql/copy/csv/test_blob.test +++ b/test/sql/copy/csv/test_blob.test @@ -14,7 +14,7 @@ CREATE TABLE blobs (b BYTEA); # We must set auto_detect to 0 since blobs are not supported in auto-detection query I -COPY blobs FROM 'data/csv/test/blob.csv' (HEADER 0, AUTO_DETECT 0); +COPY blobs FROM '{DATA_DIR}/csv/test/blob.csv' (HEADER 0, AUTO_DETECT 0); ---- 1 @@ -26,7 +26,7 @@ SELECT b FROM blobs # testing copying to a file and copying them back in query I -COPY blobs TO '__TEST_DIR__/blob.csv'; +COPY blobs TO '{TEMP_DIR}/blob.csv'; ---- 1 @@ -34,7 +34,7 @@ statement ok DELETE FROM blobs query I -COPY blobs FROM '__TEST_DIR__/blob.csv'; +COPY blobs FROM '{TEMP_DIR}/blob.csv'; ---- 1 @@ -45,7 +45,7 @@ SELECT b FROM blobs # now test it with a delimiter that occurs IN the blob query I -COPY blobs TO '__TEST_DIR__/blob.csv' DELIMITER 'A' QUOTE 'B' ESCAPE 'C'; +COPY blobs TO '{TEMP_DIR}/blob.csv' DELIMITER 'A' QUOTE 'B' ESCAPE 'C'; ---- 1 @@ -53,7 +53,7 @@ statement ok DELETE FROM blobs query I -COPY blobs FROM '__TEST_DIR__/blob.csv' (DELIMITER 'A', QUOTE 'B', ESCAPE 'C'); +COPY blobs FROM '{TEMP_DIR}/blob.csv' (DELIMITER 'A', QUOTE 'B', ESCAPE 'C'); ---- 1 diff --git a/test/sql/copy/csv/test_bug_10273.test b/test/sql/copy/csv/test_bug_10273.test index bda2a8d35a10..192df3ea9a53 100644 --- a/test/sql/copy/csv/test_bug_10273.test +++ b/test/sql/copy/csv/test_bug_10273.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query II -FROM read_csv('data/csv/bug_10273.csv', header=0) +FROM read_csv('{DATA_DIR}/csv/bug_10273.csv', header=0) ---- 这是alice的苹果,所以你不能吃。 This is Alice's apple, so you can't eat it. 这是alice的苹果,所以你不能吃。 This is "Alice's apple", so you can't eat it. diff --git a/test/sql/copy/csv/test_bug_9952.test_slow b/test/sql/copy/csv/test_bug_9952.test_slow index 012e7ab6e8f0..e78a9a40878a 100644 --- a/test/sql/copy/csv/test_bug_9952.test_slow +++ b/test/sql/copy/csv/test_bug_9952.test_slow @@ -6,12 +6,12 @@ statement ok PRAGMA enable_verification statement error -FROM read_csv('data/csv/num.tsv.gz',quote = '"') +FROM read_csv('{DATA_DIR}/csv/num.tsv.gz',quote = '"') ---- * Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement ok -FROM read_csv('data/csv/num.tsv.gz',quote = '"', strict_mode = false) +FROM read_csv('{DATA_DIR}/csv/num.tsv.gz',quote = '"', strict_mode = false) statement ok -FROM read_csv('data/csv/num.tsv.gz') +FROM read_csv('{DATA_DIR}/csv/num.tsv.gz') diff --git a/test/sql/copy/csv/test_column_inconsistencies.test b/test/sql/copy/csv/test_column_inconsistencies.test index 022d65f9404d..c4f28688a9c0 100644 --- a/test/sql/copy/csv/test_column_inconsistencies.test +++ b/test/sql/copy/csv/test_column_inconsistencies.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv') +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_3.csv') ---- 1 2 3 4 1 2 3 4 @@ -15,7 +15,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv') 1 2 3 4 query IIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv', null_padding = true) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_3.csv', null_padding = true) ---- 1 2 3 4 1 2 3 4 @@ -24,7 +24,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv', null_paddin 1 2 3 4 query IIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv', null_padding = true, ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_3.csv', null_padding = true, ignore_errors = true) ---- 1 2 3 4 1 2 3 4 @@ -34,7 +34,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_3.csv', null_paddin query I -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv') +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_6.csv') ---- 1,2,3,4 2,2,3,4,5 @@ -43,7 +43,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv') 5,2,3,4 query IIIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true) ---- 1 2 3 4 NULL 2 2 3 4 5 @@ -52,7 +52,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv', null_paddin 5 2 3 4 NULL query IIIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true, ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true, ignore_errors = true) ---- 1 2 3 4 NULL 2 2 3 4 5 @@ -61,7 +61,7 @@ FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv', null_paddin 5 2 3 4 NULL query IIIII -FROM read_csv('data/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true, ignore_errors = true, header = false) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/inconsistent_columns_6.csv', null_padding = true, ignore_errors = true, header = false) ---- one two three four NULL 1 2 3 4 NULL @@ -71,7 +71,7 @@ one two three four NULL 5 2 3 4 NULL query IIII -FROM read_csv('data/csv/inconsistencies/line_with_spaces.csv') +FROM read_csv('{DATA_DIR}/csv/inconsistencies/line_with_spaces.csv') ---- 1 2 3 4 1 2 3 4 @@ -80,7 +80,7 @@ FROM read_csv('data/csv/inconsistencies/line_with_spaces.csv') # Header gets nullpadded instead of thrown away query IIII -FROM read_csv('data/csv/inconsistencies/line_with_spaces.csv', null_padding = true, header = false) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/line_with_spaces.csv', null_padding = true, header = false) ---- Line with spaces NULL NULL NULL a b c d @@ -90,7 +90,7 @@ a b c d 1 2 3 4 query IIII -FROM read_csv('data/csv/inconsistencies/line_with_spaces.csv', null_padding = true, ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/inconsistencies/line_with_spaces.csv', null_padding = true, ignore_errors = true) ---- a b c d 1 2 3 4 diff --git a/test/sql/copy/csv/test_comment_midline.test b/test/sql/copy/csv/test_comment_midline.test index e4a0a3bc51a7..e08d2c07c906 100644 --- a/test/sql/copy/csv/test_comment_midline.test +++ b/test/sql/copy/csv/test_comment_midline.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query II -from read_csv('data/csv/comments/14648.csv', +from read_csv('{DATA_DIR}/csv/comments/14648.csv', header=false, sep=',', null_padding=true, @@ -19,7 +19,7 @@ a NULL x y query II -from read_csv('data/csv/comments/14648.csv', +from read_csv('{DATA_DIR}/csv/comments/14648.csv', header=false, sep=',', comment='#', @@ -30,65 +30,65 @@ a b x y statement error -from read_csv('data/csv/comments/14648.csv', +from read_csv('{DATA_DIR}/csv/comments/14648.csv', header=false, sep=',', comment='#' ); ---- -Error when sniffing file "data/csv/comments/14648.csv". +Error when sniffing file "{DATA_DIR}/csv/comments/14648.csv". # Not a comment query III -FROM 'data/csv/comments/14635.csv' +FROM '{DATA_DIR}/csv/comments/14635.csv' ---- a #b c # Ignore errors will have, query III -FROM read_csv('data/csv/comments/14635.csv', ignore_errors = true, columns = {'header1': 'VARCHAR', 'header2': 'VARCHAR', 'header3': 'VARCHAR'}, quote = '', escape = '', delim =','); +FROM read_csv('{DATA_DIR}/csv/comments/14635.csv', ignore_errors = true, columns = {'header1': 'VARCHAR', 'header2': 'VARCHAR', 'header3': 'VARCHAR'}, quote = '', escape = '', delim =','); ---- a #b c # If we only have midline comments, the comment must be explicitly given by the user query I -FROM 'data/csv/comments/only_midline.csv'; +FROM '{DATA_DIR}/csv/comments/only_midline.csv'; ---- 6;7 query II -FROM read_csv('data/csv/comments/only_midline.csv', comment = '#'); +FROM read_csv('{DATA_DIR}/csv/comments/only_midline.csv', comment = '#'); ---- 1 3 6 7 query II -FROM 'data/csv/comments/mid_line.csv'; +FROM '{DATA_DIR}/csv/comments/mid_line.csv'; ---- 1 3 6 7 query II -FROM 'data/csv/comments/mid_line_header.csv'; +FROM '{DATA_DIR}/csv/comments/mid_line_header.csv'; ---- 1 3 6 7 query II -FROM 'data/csv/comments/mid_line_quote.csv'; +FROM '{DATA_DIR}/csv/comments/mid_line_quote.csv'; ---- 1 3 6 7#test query II -FROM 'data/csv/comments/mid_line_null.csv'; +FROM '{DATA_DIR}/csv/comments/mid_line_null.csv'; ---- 1 3 6 NULL query II -FROM 'data/csv/comments/simple_mid_line.csv'; +FROM '{DATA_DIR}/csv/comments/simple_mid_line.csv'; ---- 1 3 6 7 @@ -96,13 +96,13 @@ FROM 'data/csv/comments/simple_mid_line.csv'; 6 7 query II -FROM 'data/csv/comments/midline_empty_space.csv'; +FROM '{DATA_DIR}/csv/comments/midline_empty_space.csv'; ---- 1 3 6 7 query II -FROM read_csv('data/csv/comments/mid_line_invalid.csv', ignore_errors = true, delim = ';', comment = '#', auto_detect = false, columns= {'a':'integer', 'b':'integer'}, strict_mode=True); +FROM read_csv('{DATA_DIR}/csv/comments/mid_line_invalid.csv', ignore_errors = true, delim = ';', comment = '#', auto_detect = false, columns= {'a':'integer', 'b':'integer'}, strict_mode=True); ---- 1 3 6 7 @@ -110,7 +110,7 @@ FROM read_csv('data/csv/comments/mid_line_invalid.csv', ignore_errors = true, de # Lets try over a vector size query II -FROM 'data/csv/comments/midline_big.csv' limit 5; +FROM '{DATA_DIR}/csv/comments/midline_big.csv' limit 5; ---- 1 3 6 7 @@ -120,11 +120,11 @@ FROM 'data/csv/comments/midline_big.csv' limit 5; # Check commented data-points are not in the data query II -FROM 'data/csv/comments/midline_big.csv' where a = 20 +FROM '{DATA_DIR}/csv/comments/midline_big.csv' where a = 20 ---- query I -SELECT count(*) FROM 'data/csv/comments/midline_big.csv' +SELECT count(*) FROM '{DATA_DIR}/csv/comments/midline_big.csv' ---- 1448 @@ -133,7 +133,7 @@ SELECT count(*) FROM 'data/csv/comments/midline_big.csv' loop buffer_size 30 35 query II -FROM read_csv('data/csv/comments/simple_mid_line.csv', buffer_size = ${buffer_size}) limit 5; +FROM read_csv('{DATA_DIR}/csv/comments/simple_mid_line.csv', buffer_size = ${buffer_size}) limit 5; ---- 1 3 6 7 diff --git a/test/sql/copy/csv/test_comment_option.test b/test/sql/copy/csv/test_comment_option.test index 43b90eb4c12b..d9931017a27a 100644 --- a/test/sql/copy/csv/test_comment_option.test +++ b/test/sql/copy/csv/test_comment_option.test @@ -7,30 +7,30 @@ PRAGMA enable_verification # Test comment and skip option query III -FROM read_csv('data/csv/comments/17226.csv', comment='#', all_varchar=True, skip=0, ignore_errors=True) +FROM read_csv('{DATA_DIR}/csv/comments/17226.csv', comment='#', all_varchar=True, skip=0, ignore_errors=True) ---- 1 2 3 4 5 6 # Comment must be different than quote and delimiter options statement error -FROM read_csv('data/csv/comments/mixed_options.csv', delim = ',', comment = ',', auto_detect = false, columns= {'a':'integer'}) +FROM read_csv('{DATA_DIR}/csv/comments/mixed_options.csv', delim = ',', comment = ',', auto_detect = false, columns= {'a':'integer'}) ---- COMMENT must not appear in the DELIMITER specification and vice versa statement error -FROM read_csv('data/csv/comments/mixed_options.csv', quote = ',', comment = ',', escape = '', delim = ';', auto_detect = false, columns= {'a':'integer'}) +FROM read_csv('{DATA_DIR}/csv/comments/mixed_options.csv', quote = ',', comment = ',', escape = '', delim = ';', auto_detect = false, columns= {'a':'integer'}) ---- COMMENT must not appear in the QUOTE specification and vice versa query II -FROM 'data/csv/comments/simple.csv'; +FROM '{DATA_DIR}/csv/comments/simple.csv'; ---- 1 3 6 7 query I -FROM 'data/csv/comments/simple_comma.csv'; +FROM '{DATA_DIR}/csv/comments/simple_comma.csv'; ---- , I'm a csv file a;b @@ -40,14 +40,14 @@ a;b , You better skip me query II -FROM read_csv('data/csv/comments/simple_comma.csv', comment = ','); +FROM read_csv('{DATA_DIR}/csv/comments/simple_comma.csv', comment = ','); ---- 1 3 6 7 # Lets try over a vector size query II -FROM 'data/csv/comments/big.csv' limit 5; +FROM '{DATA_DIR}/csv/comments/big.csv' limit 5; ---- 1 3 6 7 @@ -57,17 +57,17 @@ FROM 'data/csv/comments/big.csv' limit 5; # Check commented data-points are not in the data query II -FROM 'data/csv/comments/big.csv' where a = 20 +FROM '{DATA_DIR}/csv/comments/big.csv' where a = 20 ---- query I -SELECT count(*) FROM 'data/csv/comments/big.csv' +SELECT count(*) FROM '{DATA_DIR}/csv/comments/big.csv' ---- 1448 # Test empty spaces query II -FROM 'data/csv/comments/empty_space.csv'; +FROM '{DATA_DIR}/csv/comments/empty_space.csv'; ---- 1 3 1 3 @@ -80,7 +80,7 @@ FROM 'data/csv/comments/empty_space.csv'; loop buffer_size 30 35 query II -FROM read_csv('data/csv/comments/simple.csv', buffer_size = ${buffer_size}) limit 5; +FROM read_csv('{DATA_DIR}/csv/comments/simple.csv', buffer_size = ${buffer_size}) limit 5; ---- 1 3 6 7 @@ -89,45 +89,45 @@ endloop # Test that fully commented lines are ignored by the parameter header but not by skiprows. query II -FROM read_csv('data/csv/comments/simple.csv', skip = 2); +FROM read_csv('{DATA_DIR}/csv/comments/simple.csv', skip = 2); ---- 6 7 # Test we can detect comments and skip rows at the same time query II -FROM 'data/csv/comments/invalid_rows.csv'; +FROM '{DATA_DIR}/csv/comments/invalid_rows.csv'; ---- 1 3 6 7 query II -select SkipRows, Comment FROM sniff_csv('data/csv/comments/invalid_rows.csv'); +select SkipRows, Comment FROM sniff_csv('{DATA_DIR}/csv/comments/invalid_rows.csv'); ---- 2 # # Test ignore errors statement error -select count(*) FROM 'data/csv/comments/error.csv'; +select count(*) FROM '{DATA_DIR}/csv/comments/error.csv'; ---- Expected Number of Columns: 2 Found: 1 query I -select count(*) FROM read_csv('data/csv/comments/error.csv', ignore_errors = true); +select count(*) FROM read_csv('{DATA_DIR}/csv/comments/error.csv', ignore_errors = true); ---- 2726 query I -select count(*) FROM read_csv('data/csv/comments/error.csv', ignore_errors = true, comment = '#'); +select count(*) FROM read_csv('{DATA_DIR}/csv/comments/error.csv', ignore_errors = true, comment = '#'); ---- 2726 query II -select comment, columns from sniff_csv('data/csv/comments/error.csv', ignore_errors = true); +select comment, columns from sniff_csv('{DATA_DIR}/csv/comments/error.csv', ignore_errors = true); ---- # [{'name': a, 'type': BIGINT}, {'name': b, 'type': BIGINT}] query II -select comment, columns from sniff_csv('data/csv/comments/error.csv', ignore_errors = true); +select comment, columns from sniff_csv('{DATA_DIR}/csv/comments/error.csv', ignore_errors = true); ---- # [{'name': a, 'type': BIGINT}, {'name': b, 'type': BIGINT}] @@ -135,7 +135,7 @@ select comment, columns from sniff_csv('data/csv/comments/error.csv', ignore_err loop i 0 2 query II -FROM read_csv('data/csv/comments/simple.csv',skip=${i}); +FROM read_csv('{DATA_DIR}/csv/comments/simple.csv',skip=${i}); ---- 1 3 6 7 diff --git a/test/sql/copy/csv/test_compression_flag.test b/test/sql/copy/csv/test_compression_flag.test index 1669930561f5..dfd699cddf86 100644 --- a/test/sql/copy/csv/test_compression_flag.test +++ b/test/sql/copy/csv/test_compression_flag.test @@ -13,7 +13,7 @@ CREATE TABLE lineitem(a INT NOT NULL, statement ok -COPY lineitem FROM 'data/csv/test/test_comp.csv.gzz' (COMPRESSION 'gzip', AUTO_DETECT 1); +COPY lineitem FROM '{DATA_DIR}/csv/test/test_comp.csv.gzz' (COMPRESSION 'gzip', AUTO_DETECT 1); query I SELECT COUNT(*) FROM lineitem @@ -36,7 +36,7 @@ CREATE TABLE lineitem(a INT NOT NULL, statement ok -COPY lineitem FROM 'data/csv/test/test_comp.csv.gz' (COMPRESSION 'infer', AUTO_DETECT 1); +COPY lineitem FROM '{DATA_DIR}/csv/test/test_comp.csv.gz' (COMPRESSION 'infer', AUTO_DETECT 1); query I SELECT COUNT(*) FROM lineitem @@ -53,12 +53,12 @@ statement ok DROP TABLE lineitem statement error -COPY lineitem FROM 'data/csv/test/test_comp.csv.gz' COMPRESSION 'none'; +COPY lineitem FROM '{DATA_DIR}/csv/test/test_comp.csv.gz' COMPRESSION 'none'; ---- syntax error at or near "COMPRESSION" statement ok -CREATE TABLE lineitem AS SELECT * FROM read_csv_auto('data/csv/test/test_comp.csv.gzz', compression='gzip'); +CREATE TABLE lineitem AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/test_comp.csv.gzz', compression='gzip'); query I SELECT COUNT(*) FROM lineitem @@ -75,7 +75,7 @@ statement ok DROP TABLE lineitem statement ok -CREATE TABLE lineitem AS SELECT * FROM read_csv('data/csv/test/test_comp.csv.gzz', compression='gzip', AUTO_DETECT=TRUE); +CREATE TABLE lineitem AS SELECT * FROM read_csv('{DATA_DIR}/csv/test/test_comp.csv.gzz', compression='gzip', AUTO_DETECT=TRUE); query I SELECT COUNT(*) FROM lineitem diff --git a/test/sql/copy/csv/test_copy.test b/test/sql/copy/csv/test_copy.test index b3d6f8036967..abfd0401eb3a 100644 --- a/test/sql/copy/csv/test_copy.test +++ b/test/sql/copy/csv/test_copy.test @@ -12,7 +12,7 @@ statement ok CREATE TABLE test (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY test FROM 'data/csv/test/test.csv'; +COPY test FROM '{DATA_DIR}/csv/test/test.csv'; ---- 5000 @@ -30,7 +30,7 @@ SELECT * FROM test ORDER BY 1 LIMIT 3; # create CSV file from table query I -COPY test TO '__TEST_DIR__/test2.csv'; +COPY test TO '{TEMP_DIR}/test2.csv'; ---- 5000 @@ -39,7 +39,7 @@ statement ok CREATE TABLE test2 (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY test2 FROM '__TEST_DIR__/test2.csv' ; +COPY test2 FROM '{TEMP_DIR}/test2.csv' ; ---- 5000 @@ -55,13 +55,13 @@ statement ok CREATE TABLE test_too_few_rows (a INTEGER, b INTEGER, c VARCHAR, d INTEGER); statement error -COPY test_too_few_rows FROM '__TEST_DIR__/test2.csv' (NULL_PADDING 0); +COPY test_too_few_rows FROM '{TEMP_DIR}/test2.csv' (NULL_PADDING 0); ---- It was not possible to automatically detect the CSV parsing dialect # create CSV file from query query I -COPY (SELECT a,b FROM test WHERE a < 4000) TO '__TEST_DIR__/test3.csv'; +COPY (SELECT a,b FROM test WHERE a < 4000) TO '{TEMP_DIR}/test3.csv'; ---- 4000 @@ -70,7 +70,7 @@ statement ok CREATE TABLE test3 (a INTEGER, b INTEGER); query I -COPY test3 FROM '__TEST_DIR__/test3.csv'; +COPY test3 FROM '{TEMP_DIR}/test3.csv'; ---- 4000 @@ -83,7 +83,7 @@ SELECT * FROM test3 ORDER BY 1 LIMIT 3; # export selected columns from a table to a CSV file query I -COPY test (a,c) TO '__TEST_DIR__/test4.csv' (DELIMITER ',', HEADER false); +COPY test (a,c) TO '{TEMP_DIR}/test4.csv' (DELIMITER ',', HEADER false); ---- 5000 @@ -92,7 +92,7 @@ statement ok CREATE TABLE test4 (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (DELIM ','); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (DELIM ','); ---- 5000 @@ -105,144 +105,144 @@ SELECT * FROM test4 ORDER BY 1 LIMIT 3; # unsupported type for HEADER statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (SEP ',', HEADER 0.2); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (SEP ',', HEADER 0.2); ---- "HEADER" expected an argument of type BOOLEAN # empty delimiter statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (SEP); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (SEP); ---- "SEP" requires an argument of type VARCHAR # number as delimiter statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (SEP 1); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (SEP 1); ---- "SEP" expected an argument of type VARCHAR # multiple format options statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (FORMAT 'csv', FORMAT 'some_other_copy_function'); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (FORMAT 'csv', FORMAT 'some_other_copy_function'); ---- duplicate option statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (FORMAT 'some_other_copy_function', FORMAT 'csv'); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (FORMAT 'some_other_copy_function', FORMAT 'csv'); ---- duplicate option # number as escape string statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (ESCAPE 1); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (ESCAPE 1); ---- "ESCAPE" expected an argument of type VARCHAR # no escape string statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (ESCAPE); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (ESCAPE); ---- "ESCAPE" requires an argument of type VARCHAR # number as quote string statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (QUOTE 1); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (QUOTE 1); ---- "QUOTE" expected an argument of type VARCHAR # no quote string statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (QUOTE); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (QUOTE); ---- "QUOTE" requires an argument of type VARCHAR # no format string statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (FORMAT); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (FORMAT); ---- Unsupported parameter type for FORMAT: expected e.g. FORMAT 'csv', 'parquet' # encoding must not be empty and must have the correct parameter type and value statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (ENCODING); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (ENCODING); ---- "ENCODING" requires an argument of type VARCHAR statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (ENCODING 42); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (ENCODING 42); ---- "ENCODING" expected an argument of type VARCHAR statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (ENCODING 'utf-42'); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (ENCODING 'utf-42'); ---- The CSV Reader does not support the encoding: "utf-42" # don't allow for non-existant copy options statement error -COPY test4 (a,c) FROM '__TEST_DIR__/test4.csv' (MAGIC '42'); +COPY test4 (a,c) FROM '{TEMP_DIR}/test4.csv' (MAGIC '42'); ---- Unrecognized option "MAGIC" # Try new_line option query I -COPY test TO '__TEST_DIR__/test_crlf.csv' (new_line '\r\n'); +COPY test TO '{TEMP_DIR}/test_crlf.csv' (new_line '\r\n'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_crlf.csv' +select count(*) from '{TEMP_DIR}/test_crlf.csv' ---- 5000 # Try CR LF lines query I -COPY test TO '__TEST_DIR__/test_r.csv' (new_line '\r'); +COPY test TO '{TEMP_DIR}/test_r.csv' (new_line '\r'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_r.csv' +select count(*) from '{TEMP_DIR}/test_r.csv' ---- 5000 query I -COPY test TO '__TEST_DIR__/test_n.csv' (new_line '\n'); +COPY test TO '{TEMP_DIR}/test_n.csv' (new_line '\n'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_n.csv' +select count(*) from '{TEMP_DIR}/test_n.csv' ---- 5000 query I -COPY test TO '__TEST_DIR__/test_crlfe.csv' (new_line e'\r\n'); +COPY test TO '{TEMP_DIR}/test_crlfe.csv' (new_line e'\r\n'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_crlfe.csv' +select count(*) from '{TEMP_DIR}/test_crlfe.csv' ---- 5000 query I -COPY test TO '__TEST_DIR__/test_re.csv' (new_line e'\r'); +COPY test TO '{TEMP_DIR}/test_re.csv' (new_line e'\r'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_re.csv' +select count(*) from '{TEMP_DIR}/test_re.csv' ---- 5000 query I -COPY test TO '__TEST_DIR__/test_en.csv' (new_line e'\n'); +COPY test TO '{TEMP_DIR}/test_en.csv' (new_line e'\n'); ---- 5000 query I -select count(*) from '__TEST_DIR__/test_en.csv' +select count(*) from '{TEMP_DIR}/test_en.csv' ---- 5000 @@ -255,25 +255,25 @@ statement ok CREATE TABLE test (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY test FROM 'data/csv/test/test_pipe.csv' (SEPARATOR '|'); +COPY test FROM '{DATA_DIR}/csv/test/test_pipe.csv' (SEPARATOR '|'); ---- 10 # throw exception if a line contains too many values statement error -COPY test FROM 'data/csv/test/too_many_values.csv'; +COPY test FROM '{DATA_DIR}/csv/test/too_many_values.csv'; ---- It was not possible to automatically detect the CSV parsing dialect # test default null string query I -COPY test FROM 'data/csv/test/test_null_csv.csv' DELIMITER '|'; +COPY test FROM '{DATA_DIR}/csv/test/test_null_csv.csv' DELIMITER '|'; ---- 1 # test invalid UTF-8 statement error -COPY test FROM 'data/csv/test/invalid_utf.csv' DELIMITER '|'; +COPY test FROM '{DATA_DIR}/csv/test/invalid_utf.csv' DELIMITER '|'; ---- Invalid unicode (byte sequence mismatch) detected. @@ -282,7 +282,7 @@ statement ok CREATE TABLE empty_table (a INTEGER, b INTEGER, c VARCHAR(10)); statement error -COPY empty_table FROM 'data/csv/test/empty.csv' (HEADER 0); +COPY empty_table FROM '{DATA_DIR}/csv/test/empty.csv' (HEADER 0); ---- It was not possible to automatically detect the CSV parsing dialect @@ -292,7 +292,7 @@ statement ok CREATE TABLE unterminated (a VARCHAR); statement error -COPY unterminated FROM 'data/csv/test/unterminated.csv' (HEADER 0, AUTO_DETECT FALSE, strict_mode TRUE); +COPY unterminated FROM '{DATA_DIR}/csv/test/unterminated.csv' (HEADER 0, AUTO_DETECT FALSE, strict_mode TRUE); ---- Value with unterminated quote found. @@ -302,7 +302,7 @@ statement ok CREATE TABLE vsize (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY vsize FROM 'data/csv/test/vsize.csv'; +COPY vsize FROM '{DATA_DIR}/csv/test/vsize.csv'; ---- 1024 diff --git a/test/sql/copy/csv/test_copy_default.test b/test/sql/copy/csv/test_copy_default.test index 7067274eb00c..c3b778498ff3 100644 --- a/test/sql/copy/csv/test_copy_default.test +++ b/test/sql/copy/csv/test_copy_default.test @@ -11,12 +11,12 @@ statement ok CREATE TABLE test (a INTEGER, b VARCHAR DEFAULT('hello'), c INTEGER DEFAULT(3+4)); query I -COPY test (a) FROM 'data/csv/test/test_default.csv'; +COPY test (a) FROM '{DATA_DIR}/csv/test/test_default.csv'; ---- 5000 query I -COPY test (c) FROM 'data/csv/test/test_default.csv'; +COPY test (c) FROM '{DATA_DIR}/csv/test/test_default.csv'; ---- 5000 diff --git a/test/sql/copy/csv/test_copy_gzip.test b/test/sql/copy/csv/test_copy_gzip.test index d5390d792e75..4494157cc326 100644 --- a/test/sql/copy/csv/test_copy_gzip.test +++ b/test/sql/copy/csv/test_copy_gzip.test @@ -25,7 +25,7 @@ CREATE TABLE lineitem(l_orderkey INT NOT NULL, l_comment VARCHAR(44) NOT NULL); statement ok -COPY lineitem FROM 'data/csv/lineitem1k.tbl.gz' DELIMITER '|'; +COPY lineitem FROM '{DATA_DIR}/csv/lineitem1k.tbl.gz' DELIMITER '|'; query I SELECT COUNT(*) FROM lineitem @@ -44,10 +44,10 @@ SELECT l_partkey FROM lineitem WHERE l_orderkey=1 ORDER BY l_linenumber # round trip statement ok -COPY lineitem TO '__TEST_DIR__/lineitem1k.csv.gz' (DELIMITER '|', HEADER); +COPY lineitem TO '{TEMP_DIR}/lineitem1k.csv.gz' (DELIMITER '|', HEADER); statement ok -CREATE TABLE lineitem_rt AS FROM '__TEST_DIR__/lineitem1k.csv.gz'; +CREATE TABLE lineitem_rt AS FROM '{TEMP_DIR}/lineitem1k.csv.gz'; query I SELECT COUNT(*) FROM (FROM lineitem EXCEPT FROM lineitem_rt) diff --git a/test/sql/copy/csv/test_copy_many_empty_lines.test b/test/sql/copy/csv/test_copy_many_empty_lines.test index 4ad537a1590e..24ddbde39c1d 100644 --- a/test/sql/copy/csv/test_copy_many_empty_lines.test +++ b/test/sql/copy/csv/test_copy_many_empty_lines.test @@ -12,7 +12,7 @@ statement ok CREATE TABLE test (a INTEGER); query I -COPY test FROM 'data/csv/test/many_empty_lines.csv' (HEADER 0); +COPY test FROM '{DATA_DIR}/csv/test/many_empty_lines.csv' (HEADER 0); ---- 20000 diff --git a/test/sql/copy/csv/test_copy_null.test b/test/sql/copy/csv/test_copy_null.test index 41dafc5abc76..c33ec7a5225b 100644 --- a/test/sql/copy/csv/test_copy_null.test +++ b/test/sql/copy/csv/test_copy_null.test @@ -14,7 +14,7 @@ CREATE TABLE test_null_option (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(1 # test COPY ... FROM ... # implicitly using default NULL value query I -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL 'NULL'); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL 'NULL'); ---- 3 @@ -30,7 +30,7 @@ DELETE FROM test_null_option; # explicitly using default NULL value query I -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL ''); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL ''); ---- 3 @@ -46,7 +46,7 @@ DELETE FROM test_null_option; # make sure a quoted null string is not interpreted as a null value query I -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL 'test'); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL 'test'); ---- 3 @@ -62,7 +62,7 @@ DELETE FROM test_null_option; # setting specific NULL value query I -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL 'null'); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL 'null'); ---- 3 @@ -75,29 +75,29 @@ SELECT * FROM test_null_option ORDER BY 1 LIMIT 3; # invalid parameter type statement error -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL null); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL null); ---- NULL is not supported # delimiter must not appear in the NULL specification statement error -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL 'null,', DELIMITER ','); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL 'null,', DELIMITER ','); ---- DELIMITER must not appear in the NULL specification and vice versa statement error -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (DELIMITER 'null', NULL 'null'); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (DELIMITER 'null', NULL 'null'); ---- It was not possible to automatically detect the CSV parsing dialect statement error -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (DELIMITER 'null', NULL 'nu'); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (DELIMITER 'null', NULL 'nu'); ---- It was not possible to automatically detect the CSV parsing dialect # no parameter type statement error -COPY test_null_option FROM 'data/csv/test/test_null_option.csv' (NULL); +COPY test_null_option FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL); ---- CSV Reader function option null requires a non-empty list of possible null strings (varchar) as input @@ -106,14 +106,14 @@ statement ok CREATE TABLE test_null_option_2 (col_a INTEGER, col_b INTEGER, col_c VARCHAR(10), col_d VARCHAR(10)); statement error -COPY test_null_option_2 FROM 'data/csv/test/test_null_option.csv' (NULL 'null'); +COPY test_null_option_2 FROM '{DATA_DIR}/csv/test/test_null_option.csv' (NULL 'null'); ---- It was not possible to automatically detect the CSV parsing dialect # test COPY ... TO ... # implicitly using default NULL value query I -COPY test_null_option TO '__TEST_DIR__/test_null_option_2.csv'; +COPY test_null_option TO '{TEMP_DIR}/test_null_option_2.csv'; ---- 3 @@ -121,7 +121,7 @@ statement ok DELETE FROM test_null_option; query I -COPY test_null_option FROM '__TEST_DIR__/test_null_option_2.csv'; +COPY test_null_option FROM '{TEMP_DIR}/test_null_option_2.csv'; ---- 3 @@ -134,7 +134,7 @@ SELECT * FROM test_null_option ORDER BY 1 LIMIT 3; # explicitly using default NULL value query I -COPY test_null_option TO '__TEST_DIR__/test_null_option_3.csv' (NULL ''); +COPY test_null_option TO '{TEMP_DIR}/test_null_option_3.csv' (NULL ''); ---- 3 @@ -142,7 +142,7 @@ statement ok DELETE FROM test_null_option; query I -COPY test_null_option FROM '__TEST_DIR__/test_null_option_3.csv' (NULL ''); +COPY test_null_option FROM '{TEMP_DIR}/test_null_option_3.csv' (NULL ''); ---- 3 @@ -155,7 +155,7 @@ SELECT * FROM test_null_option ORDER BY 1 LIMIT 3; # setting specific NULL value query I -COPY test_null_option TO '__TEST_DIR__/test_null_option_4.csv' (NULL 'null'); +COPY test_null_option TO '{TEMP_DIR}/test_null_option_4.csv' (NULL 'null'); ---- 3 @@ -163,7 +163,7 @@ statement ok DELETE FROM test_null_option; query I -COPY test_null_option FROM '__TEST_DIR__/test_null_option_4.csv' (NULL 'null'); +COPY test_null_option FROM '{TEMP_DIR}/test_null_option_4.csv' (NULL 'null'); ---- 3 diff --git a/test/sql/copy/csv/test_cranlogs.test_slow b/test/sql/copy/csv/test_cranlogs.test_slow index 2f416f9aa320..48268e10f572 100644 --- a/test/sql/copy/csv/test_cranlogs.test_slow +++ b/test/sql/copy/csv/test_cranlogs.test_slow @@ -9,7 +9,7 @@ statement ok CREATE TABLE cranlogs (date date,time string,size int,r_version string,r_arch string,r_os string,package string,version string,country string,ip_id int) query I -COPY cranlogs FROM 'data/csv/real/tmp2013-06-15.csv.gz'; +COPY cranlogs FROM '{DATA_DIR}/csv/real/tmp2013-06-15.csv.gz'; ---- 37459 diff --git a/test/sql/copy/csv/test_csv_column_count_mismatch.test_slow b/test/sql/copy/csv/test_csv_column_count_mismatch.test_slow index 6e5aed350653..7d13cf216b9d 100644 --- a/test/sql/copy/csv/test_csv_column_count_mismatch.test_slow +++ b/test/sql/copy/csv/test_csv_column_count_mismatch.test_slow @@ -7,14 +7,14 @@ pragma enable_verification; # We can read with auto just fine statement ok -select * from read_csv_auto('data/csv/people.csv'); +select * from read_csv_auto('{DATA_DIR}/csv/people.csv'); # Specifying columns, but not specifying the right amount throws an error statement error -select * from read_csv_auto('data/csv/people.csv', columns={'a': 'VARCHAR'}, auto_detect = false, strict_mode=True) +select * from read_csv_auto('{DATA_DIR}/csv/people.csv', columns={'a': 'VARCHAR'}, auto_detect = false, strict_mode=True) ---- Expected Number of Columns: 1 Found: 2 # When we do specify the right amount of columns, everything works statement ok -select * from read_csv_auto('data/csv/people.csv', columns={'a': 'VARCHAR', 'b': 'VARCHAR'}) +select * from read_csv_auto('{DATA_DIR}/csv/people.csv', columns={'a': 'VARCHAR', 'b': 'VARCHAR'}) diff --git a/test/sql/copy/csv/test_csv_error_message_type.test b/test/sql/copy/csv/test_csv_error_message_type.test index 08836e82a904..5c8d4b50bd4b 100644 --- a/test/sql/copy/csv/test_csv_error_message_type.test +++ b/test/sql/copy/csv/test_csv_error_message_type.test @@ -14,27 +14,27 @@ CREATE TABLE venue ( ); statement error -copy venue from 'data/csv/venue_pipe.csv' +copy venue from '{DATA_DIR}/csv/venue_pipe.csv' ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. statement error -SELECT * FROM read_csv('data/csv/venue_pipe.csv', types=['SMALLINT','VARCHAR','VARCHAR','VARCHAR','INTEGER']); +SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe.csv', types=['SMALLINT','VARCHAR','VARCHAR','VARCHAR','INTEGER']); ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. statement error -SELECT * FROM read_csv('data/csv/venue_pipe.csv', types=['SMALLINT','VARCHAR','VARCHAR','VARCHAR','INTEGER']); +SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe.csv', types=['SMALLINT','VARCHAR','VARCHAR','VARCHAR','INTEGER']); ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. statement error -SELECT * FROM read_csv('data/csv/venue_pipe.csv', types={'venueseats':'INTEGER'}); +SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe.csv', types={'venueseats':'INTEGER'}); ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. statement error -SELECT * FROM read_csv('data/csv/venue_pipe_big.csv', sample_size = 1); +SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe_big.csv', sample_size = 1); ---- This type was auto-detected from the CSV file. @@ -48,7 +48,7 @@ CREATE TABLE venue_2 ( ); statement ok -INSERT INTO venue_2 SELECT * FROM read_csv('data/csv/venue_pipe_big.csv', sample_size = 1); +INSERT INTO venue_2 SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe_big.csv', sample_size = 1); query I SELECT COUNT(*) from venue_2 @@ -72,23 +72,23 @@ CREATE TABLE venue_2 ( # Check our possible solutions: #* Override the type for this column manually by setting the type explicitly, e.g., types={'venueseats': 'VARCHAR'} statement ok -INSERT INTO venue_2 SELECT * FROM read_csv('data/csv/venue_pipe_big.csv', sample_size = 1, types={'venueseats': 'VARCHAR'}); +INSERT INTO venue_2 SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe_big.csv', sample_size = 1, types={'venueseats': 'VARCHAR'}); #* Set the sample size to a larger value to enable the auto-detection to scan more values, e.g., sample_size=-1 statement ok -INSERT INTO venue_2 SELECT * FROM read_csv('data/csv/venue_pipe_big.csv', sample_size = -1); +INSERT INTO venue_2 SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe_big.csv', sample_size = -1); #* Use a COPY statement to automatically derive types from an existing table. statement ok -copy venue_2 from 'data/csv/venue_pipe_big.csv' +copy venue_2 from '{DATA_DIR}/csv/venue_pipe_big.csv' statement error -SELECT * FROM read_csv('data/csv/venue_pipe_big.csv', sample_size = 1); +SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe_big.csv', sample_size = 1); ---- This type was auto-detected from the CSV file. # This cast happens from the CSV -> To the table so there is not much we can easily do statement error -INSERT INTO venue SELECT * FROM read_csv('data/csv/venue_pipe.csv'); +INSERT INTO venue SELECT * FROM read_csv('{DATA_DIR}/csv/venue_pipe.csv'); ---- Could not convert string "\N" to 'INTEGER' \ No newline at end of file diff --git a/test/sql/copy/csv/test_csv_json.test b/test/sql/copy/csv/test_csv_json.test index 5321490408ea..3de1df089515 100644 --- a/test/sql/copy/csv/test_csv_json.test +++ b/test/sql/copy/csv/test_csv_json.test @@ -8,12 +8,12 @@ PRAGMA enable_verification require json statement error -FROM read_csv('data/csv/error/json.csv', columns={'a':'JSON'}) +FROM read_csv('{DATA_DIR}/csv/error/json.csv', columns={'a':'JSON'}) ---- Error when converting column "a". Could not convert string "[not a json]" to 'JSON' query I -FROM read_csv('data/csv/error/json.csv', columns={'a':'JSON'}, ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/error/json.csv', columns={'a':'JSON'}, ignore_errors = true) ---- {"col_a":0,"col_b":0} {"col_a":1,"col_b":2} @@ -23,7 +23,7 @@ statement ok create table t (a json); statement ok -insert into t FROM read_csv('data/csv/error/json.csv', columns={'a':'JSON'}, ignore_errors = true) +insert into t FROM read_csv('{DATA_DIR}/csv/error/json.csv', columns={'a':'JSON'}, ignore_errors = true) query I FROM t diff --git a/test/sql/copy/csv/test_csv_mixed_casts.test b/test/sql/copy/csv/test_csv_mixed_casts.test index f6d53a68aaab..76bde52fca6d 100644 --- a/test/sql/copy/csv/test_csv_mixed_casts.test +++ b/test/sql/copy/csv/test_csv_mixed_casts.test @@ -8,7 +8,7 @@ PRAGMA enable_verification # Try Date query II FROM read_csv( - 'data/csv/mixed_dates.csv', + '{DATA_DIR}/csv/mixed_dates.csv', auto_detect = false, header = true, columns = { @@ -24,7 +24,7 @@ FROM read_csv( # Try Timestamp query II FROM read_csv( - 'data/csv/mixed_timestamps.csv', + '{DATA_DIR}/csv/mixed_timestamps.csv', auto_detect = false, header = true, columns = { @@ -40,7 +40,7 @@ FROM read_csv( # Try Float query II FROM read_csv( - 'data/csv/mixed_double.csv', + '{DATA_DIR}/csv/mixed_double.csv', auto_detect = false, header = true, columns = { @@ -56,7 +56,7 @@ FROM read_csv( # Try Double query II FROM read_csv( - 'data/csv/mixed_double.csv', + '{DATA_DIR}/csv/mixed_double.csv', auto_detect = false, header = true, columns = { @@ -72,7 +72,7 @@ FROM read_csv( # Try Decimal query II FROM read_csv( - 'data/csv/mixed_double.csv', + '{DATA_DIR}/csv/mixed_double.csv', auto_detect = false, header = true, columns = { @@ -88,7 +88,7 @@ FROM read_csv( query II FROM read_csv( - 'data/csv/mixed_decimal.csv', + '{DATA_DIR}/csv/mixed_decimal.csv', auto_detect = false, header = true, delim = ';', @@ -104,7 +104,7 @@ FROM read_csv( query II FROM read_csv( - 'data/csv/mixed_decimal.csv', + '{DATA_DIR}/csv/mixed_decimal.csv', auto_detect = false, header = true, delim = ';', @@ -123,7 +123,7 @@ FROM read_csv( statement error FROM read_csv( - 'data/csv/mixed_decimal.csv', + '{DATA_DIR}/csv/mixed_decimal.csv', auto_detect = false, header = true, delim = '|', diff --git a/test/sql/copy/csv/test_csv_no_trailing_newline.test b/test/sql/copy/csv/test_csv_no_trailing_newline.test index 1fea79fcd3ba..47d3a86699b6 100644 --- a/test/sql/copy/csv/test_csv_no_trailing_newline.test +++ b/test/sql/copy/csv/test_csv_no_trailing_newline.test @@ -11,12 +11,12 @@ statement ok CREATE TABLE no_newline (a INTEGER, b INTEGER, c VARCHAR(10)); query I -COPY no_newline FROM 'data/csv/test/no_newline.csv'; +COPY no_newline FROM '{DATA_DIR}/csv/test/no_newline.csv'; ---- 1024 query III -FROM read_csv('data/csv/test/no_newline_unicode.csv', delim= '🦆') limit 5; +FROM read_csv('{DATA_DIR}/csv/test/no_newline_unicode.csv', delim= '🦆') limit 5; ---- 0 0 test 1 1 test diff --git a/test/sql/copy/csv/test_csv_projection_pushdown.test b/test/sql/copy/csv/test_csv_projection_pushdown.test index 803443e5ac8e..d5bdf85f382f 100644 --- a/test/sql/copy/csv/test_csv_projection_pushdown.test +++ b/test/sql/copy/csv/test_csv_projection_pushdown.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query II -select l_returnflag, l_linenumber from read_csv('data/csv/real/lineitem_sample.csv', delim='|', header=False, columns={ +select l_returnflag, l_linenumber from read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', header=False, columns={ 'l_orderkey': 'INT', 'l_partkey': 'INT', 'l_suppkey': 'INT', @@ -37,12 +37,12 @@ R 2 A 3 query I -SELECT COUNT(*) FROM 'data/csv/real/lineitem_sample.csv' +SELECT COUNT(*) FROM '{DATA_DIR}/csv/real/lineitem_sample.csv' ---- 10 statement ok -CREATE VIEW lineitem_csv AS SELECT * FROM read_csv('data/csv/real/lineitem_sample.csv', delim='|', header=False, columns={ +CREATE VIEW lineitem_csv AS SELECT * FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|', header=False, columns={ 'l_orderkey': 'INT', 'l_partkey': 'INT', 'l_suppkey': 'INT', @@ -62,7 +62,7 @@ CREATE VIEW lineitem_csv AS SELECT * FROM read_csv('data/csv/real/lineitem_sampl }) statement ok -CREATE VIEW lineitem_csv_auto AS SELECT * FROM read_csv_auto('data/csv/real/lineitem_sample.csv', header=False) lineitem(l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) +CREATE VIEW lineitem_csv_auto AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/real/lineitem_sample.csv', header=False) lineitem(l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) foreach lineitem lineitem_csv lineitem_csv_auto @@ -117,12 +117,12 @@ endloop # Test Projection over multiple buffers query I -select count(*) from read_csv('data/csv/projection_buffer.csv', quote = '"', escape = '"', buffer_size=35) +select count(*) from read_csv('{DATA_DIR}/csv/projection_buffer.csv', quote = '"', escape = '"', buffer_size=35) ---- 27 query III -select d,b,a from read_csv('data/csv/projection_buffer.csv', quote = '"', escape = '"', buffer_size=35) +select d,b,a from read_csv('{DATA_DIR}/csv/projection_buffer.csv', quote = '"', escape = '"', buffer_size=35) ---- d" b" a" d" b" a" diff --git a/test/sql/copy/csv/test_csv_timestamp_tz.test b/test/sql/copy/csv/test_csv_timestamp_tz.test index 01a53f2b4000..edf87a176cdb 100644 --- a/test/sql/copy/csv/test_csv_timestamp_tz.test +++ b/test/sql/copy/csv/test_csv_timestamp_tz.test @@ -9,7 +9,7 @@ pragma enable_verification statement error copy ( select '2021-05-25 04:55:03.382494 UTC'::timestamp as ts, '2021-05-25 04:55:03.382494 UTC'::timestamptz as tstz -) to '__TEST_DIR__/timestamps.csv' ( timestampformat '%A'); +) to '{TEMP_DIR}/timestamps.csv' ( timestampformat '%A'); ---- No function matches the given name and argument types @@ -22,7 +22,7 @@ SET TimeZone='UTC' # If we set to timestamptz it works query II -FROM read_csv('data/csv/timestamp_timezone.csv', columns = {'time':'timestamptz', 'description':'varchar'}) +FROM read_csv('{DATA_DIR}/csv/timestamp_timezone.csv', columns = {'time':'timestamptz', 'description':'varchar'}) ---- 2020-01-01 00:00:00+00 midnight local 2020-01-01 08:00:00+00 midnight in San Francisco @@ -34,7 +34,7 @@ FROM read_csv('data/csv/timestamp_timezone.csv', columns = {'time':'timestamptz' 2020-01-01 03:30:00+00 Canada/Newfoundland query II -FROM read_csv('data/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEAN', 'BIGINT', 'DOUBLE', 'TIME', 'DATE', 'TIMESTAMP','TIMESTAMPTZ', 'VARCHAR']) +FROM read_csv('{DATA_DIR}/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEAN', 'BIGINT', 'DOUBLE', 'TIME', 'DATE', 'TIMESTAMP','TIMESTAMPTZ', 'VARCHAR']) ---- 2020-01-01 00:00:00+00 midnight local 2020-01-01 08:00:00+00 midnight in San Francisco @@ -47,7 +47,7 @@ FROM read_csv('data/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEA query II -FROM read_csv('data/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEAN', 'BIGINT', 'DOUBLE', 'TIME', 'DATE', 'TIMESTAMPTZ', 'VARCHAR']) +FROM read_csv('{DATA_DIR}/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEAN', 'BIGINT', 'DOUBLE', 'TIME', 'DATE', 'TIMESTAMPTZ', 'VARCHAR']) ---- 2020-01-01 00:00:00+00 midnight local 2020-01-01 08:00:00+00 midnight in San Francisco @@ -59,12 +59,12 @@ FROM read_csv('data/csv/timestamp_timezone.csv', auto_type_candidates = ['BOOLEA 2020-01-01 03:30:00+00 Canada/Newfoundland query I -SELECT columns FROM sniff_csv('data/csv/timestamp_timezone.csv') +SELECT columns FROM sniff_csv('{DATA_DIR}/csv/timestamp_timezone.csv') ---- [{'name': time, 'type': TIMESTAMP WITH TIME ZONE}, {'name': description, 'type': VARCHAR}] query II -FROM 'data/csv/timestamp_timezone.csv' +FROM '{DATA_DIR}/csv/timestamp_timezone.csv' ---- 2020-01-01 00:00:00+00 midnight local 2020-01-01 08:00:00+00 midnight in San Francisco @@ -77,12 +77,12 @@ FROM 'data/csv/timestamp_timezone.csv' query I -SELECT columns FROM sniff_csv('data/csv/timestamp_with_tz.csv') +SELECT columns FROM sniff_csv('{DATA_DIR}/csv/timestamp_with_tz.csv') ---- [{'name': id, 'type': BIGINT}, {'name': timestamps, 'type': TIMESTAMP WITH TIME ZONE}] query II -FROM 'data/csv/timestamp_with_tz.csv' +FROM '{DATA_DIR}/csv/timestamp_with_tz.csv' ---- 1 2021-05-25 04:55:03.382494+00 2 2021-05-25 09:55:03.382494+00 @@ -92,10 +92,10 @@ statement ok create table t as SELECT '1; 2020-01-01 00:00:00+00' as ts from range (10000) statement ok -copy t to '__TEST_DIR__/timetz.csv' +copy t to '{TEMP_DIR}/timetz.csv' query I -SELECT columns FROM sniff_csv('__TEST_DIR__/timetz.csv') +SELECT columns FROM sniff_csv('{TEMP_DIR}/timetz.csv') ---- [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': TIMESTAMP WITH TIME ZONE}] @@ -103,10 +103,10 @@ statement ok insert into t values ('2; thisisastring') statement ok -copy t to '__TEST_DIR__/timetz_2.csv' +copy t to '{TEMP_DIR}/timetz_2.csv' query I -SELECT columns FROM sniff_csv('__TEST_DIR__/timetz_2.csv') +SELECT columns FROM sniff_csv('{TEMP_DIR}/timetz_2.csv') ---- [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/test_date.test b/test/sql/copy/csv/test_date.test index 9a5ceb8461a5..fa9b30ba6a9c 100644 --- a/test/sql/copy/csv/test_date.test +++ b/test/sql/copy/csv/test_date.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE date_test(d date); query I -COPY date_test FROM 'data/csv/test/date.csv'; +COPY date_test FROM '{DATA_DIR}/csv/test/date.csv'; ---- 1 @@ -19,41 +19,41 @@ SELECT cast(d as string) FROM date_test; 2019-06-05 query TT -Select * from read_csv('data/csv/custom_date.csv', header=true, dateformat='%m/%d/%Y, %-I:%-M %p', types = ['BIGINT', 'DATE'] ); +Select * from read_csv('{DATA_DIR}/csv/custom_date.csv', header=true, dateformat='%m/%d/%Y, %-I:%-M %p', types = ['BIGINT', 'DATE'] ); ---- 123 2023-01-02 124 2023-12-02 query TT -Select * from read_csv('data/csv/custom_date.csv', header=true, dateformat='%m/%d/%Y, %-I:%-M %p'); +Select * from read_csv('{DATA_DIR}/csv/custom_date.csv', header=true, dateformat='%m/%d/%Y, %-I:%-M %p'); ---- 123 2023-01-02 124 2023-12-02 query TT -Select * from read_csv('data/csv/custom_date.csv', header=true, timestampformat='%m/%d/%Y, %-I:%-M %p', types = ['BIGINT', 'TIMESTAMP'] ); +Select * from read_csv('{DATA_DIR}/csv/custom_date.csv', header=true, timestampformat='%m/%d/%Y, %-I:%-M %p', types = ['BIGINT', 'TIMESTAMP'] ); ---- 123 2023-01-02 12:33:00 124 2023-12-02 11:57:00 query TT -Select * from read_csv('data/csv/custom_date.csv', header=true, timestampformat='%m/%d/%Y, %-I:%-M %p'); +Select * from read_csv('{DATA_DIR}/csv/custom_date.csv', header=true, timestampformat='%m/%d/%Y, %-I:%-M %p'); ---- 123 2023-01-02 12:33:00 124 2023-12-02 11:57:00 # Check that less than two numbers and @ separator does not become a date query III -select typeof(#1),typeof(#2),typeof(#3) FROM 'data/csv/versions.csv' limit 1 +select typeof(#1),typeof(#2),typeof(#3) FROM '{DATA_DIR}/csv/versions.csv' limit 1 ---- VARCHAR DATE VARCHAR query I -select typeof(#1) FROM read_csv('data/csv/dates_special_format.csv', dateformat = '%b %-d, %Y', columns = {'Date': 'DATE'}) limit 1; +select typeof(#1) FROM read_csv('{DATA_DIR}/csv/dates_special_format.csv', dateformat = '%b %-d, %Y', columns = {'Date': 'DATE'}) limit 1; ---- DATE query I -select typeof(#1) FROM read_csv('data/csv/dates_special_format.csv', dateformat = '%b %-d, %Y') limit 1; +select typeof(#1) FROM read_csv('{DATA_DIR}/csv/dates_special_format.csv', dateformat = '%b %-d, %Y') limit 1; ---- DATE \ No newline at end of file diff --git a/test/sql/copy/csv/test_date_sniffer.test b/test/sql/copy/csv/test_date_sniffer.test index 71925b708062..18b1a5fa5f5a 100644 --- a/test/sql/copy/csv/test_date_sniffer.test +++ b/test/sql/copy/csv/test_date_sniffer.test @@ -6,17 +6,17 @@ statement ok PRAGMA enable_verification query III -FROM 'data/csv/bad_date.csv' +FROM '{DATA_DIR}/csv/bad_date.csv' ---- 2016-10-03 vst_OSBS_2016 41_10_1 query I -SELECT columns FROM sniff_csv('data/csv/bad_date.csv') +SELECT columns FROM sniff_csv('{DATA_DIR}/csv/bad_date.csv') ---- [{'name': date, 'type': DATE}, {'name': eventID, 'type': VARCHAR}, {'name': subplotID, 'type': VARCHAR}] query II -FROM 'data/csv/bad_date_2.csv' +FROM '{DATA_DIR}/csv/bad_date_2.csv' ---- 2023-12-03 2.2.2 2023-04-06 2.2.0 @@ -25,33 +25,33 @@ FROM 'data/csv/bad_date_2.csv' 2022-11-09 2.2.0 query I -SELECT columns[1].type FROM sniff_csv('data/csv/bad_date_2.csv') +SELECT columns[1].type FROM sniff_csv('{DATA_DIR}/csv/bad_date_2.csv') ---- DATE query I -SELECT columns FROM sniff_csv('data/csv/bad_date_2.csv') +SELECT columns FROM sniff_csv('{DATA_DIR}/csv/bad_date_2.csv') ---- [{'name': date, 'type': DATE}, {'name': specification_version, 'type': VARCHAR}] query II -FROM 'data/csv/conflict_timestamp.csv' +FROM '{DATA_DIR}/csv/conflict_timestamp.csv' ---- 2020-01-01 01:02:03 01-01-2020 01:02:03 query II -SELECT columns[1].type, columns[2].type FROM sniff_csv('data/csv/conflict_timestamp.csv') +SELECT columns[1].type, columns[2].type FROM sniff_csv('{DATA_DIR}/csv/conflict_timestamp.csv') ---- TIMESTAMP VARCHAR query IIIII -FROM 'data/csv/bad_date_timestamp_mix.csv' +FROM '{DATA_DIR}/csv/bad_date_timestamp_mix.csv' ---- 2016-10-03 vst_OSBS_2016 41_10_1 2020-01-01 01:02:03 01-01-2020 01:02:03 query I -SELECT columns FROM sniff_csv('data/csv/bad_date_timestamp_mix.csv') +SELECT columns FROM sniff_csv('{DATA_DIR}/csv/bad_date_timestamp_mix.csv') ---- [{'name': date, 'type': DATE}, {'name': eventID, 'type': VARCHAR}, {'name': subplotID, 'type': VARCHAR}, {'name': ts, 'type': TIMESTAMP}, {'name': t2_2, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/test_dateformat.test b/test/sql/copy/csv/test_dateformat.test index a14ad53f3155..8876c823a594 100644 --- a/test/sql/copy/csv/test_dateformat.test +++ b/test/sql/copy/csv/test_dateformat.test @@ -10,12 +10,12 @@ CREATE TABLE dates (d DATE); # base date format does not work here statement error -COPY dates FROM 'data/csv/test/dateformat.csv' (AUTO_DETECT 0, HEADER 0) +COPY dates FROM '{DATA_DIR}/csv/test/dateformat.csv' (AUTO_DETECT 0, HEADER 0) ---- Line: 1 statement ok -COPY dates FROM 'data/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') +COPY dates FROM '{DATA_DIR}/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') query I SELECT * FROM dates @@ -24,7 +24,7 @@ SELECT * FROM dates # if we reverse the date format, we get a different result statement ok -COPY dates FROM 'data/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%m/%d/%Y') +COPY dates FROM '{DATA_DIR}/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%m/%d/%Y') query I SELECT * FROM dates ORDER BY d @@ -37,10 +37,10 @@ statement ok CREATE TABLE new_dates (d DATE); statement ok -COPY dates TO '__TEST_DIR__/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') +COPY dates TO '{TEMP_DIR}/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') statement ok -COPY new_dates FROM '__TEST_DIR__/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') +COPY new_dates FROM '{TEMP_DIR}/dateformat.csv' (HEADER 0, DATEFORMAT '%d/%m/%Y') query I SELECT * FROM new_dates ORDER BY 1 @@ -54,7 +54,7 @@ CREATE TABLE timestamps(t TIMESTAMP); # timestamp format statement ok -COPY timestamps FROM 'data/csv/test/timestampformat.csv' (HEADER 0, DELIMITER '|', TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') +COPY timestamps FROM '{DATA_DIR}/csv/test/timestampformat.csv' (HEADER 0, DELIMITER '|', TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') query I SELECT * FROM timestamps @@ -66,10 +66,10 @@ statement ok CREATE TABLE new_timestamps (t TIMESTAMP); statement ok -COPY timestamps TO '__TEST_DIR__/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') +COPY timestamps TO '{TEMP_DIR}/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') statement ok -COPY new_timestamps FROM '__TEST_DIR__/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') +COPY new_timestamps FROM '{TEMP_DIR}/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT '%a %d, %B %Y, %I:%M:%S %p') query I SELECT * FROM new_timestamps ORDER BY 1 @@ -81,10 +81,10 @@ DELETE FROM new_timestamps # test iso format in copy statement ok -COPY timestamps TO '__TEST_DIR__/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT ISO) +COPY timestamps TO '{TEMP_DIR}/timestampformat.csv' (HEADER 0, TIMESTAMPFORMAT ISO) statement ok -COPY new_timestamps FROM '__TEST_DIR__/timestampformat.csv' (HEADER 0) +COPY new_timestamps FROM '{TEMP_DIR}/timestampformat.csv' (HEADER 0) query I SELECT * FROM new_timestamps ORDER BY 1 @@ -93,20 +93,20 @@ SELECT * FROM new_timestamps ORDER BY 1 # incorrect date/timestamp format results in an error statement error -COPY dates FROM 'data/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%') +COPY dates FROM '{DATA_DIR}/csv/test/dateformat.csv' (HEADER 0, DATEFORMAT '%') ---- Could not parse DATEFORMAT: Trailing format character % statement error -COPY timestamps FROM 'data/csv/test/timestampformat.csv' (HEADER 0, DELIMITER '|', TIMESTAMPFORMAT '%') +COPY timestamps FROM '{DATA_DIR}/csv/test/timestampformat.csv' (HEADER 0, DELIMITER '|', TIMESTAMPFORMAT '%') ---- query I -select columns FROM sniff_csv('data/csv/dateformat/working.csv', header=true,dateformat='%d-%b-%Y'); +select columns FROM sniff_csv('{DATA_DIR}/csv/dateformat/working.csv', header=true,dateformat='%d-%b-%Y'); ---- [{'name': ACCESSION_NUMBER, 'type': VARCHAR}, {'name': FILING_DATE, 'type': DATE}] query I -select columns FROM sniff_csv('data/csv/dateformat/not_working.csv', header=true,dateformat='%d-%b-%Y'); +select columns FROM sniff_csv('{DATA_DIR}/csv/dateformat/not_working.csv', header=true,dateformat='%d-%b-%Y'); ---- [{'name': ACCESSION_NUMBER, 'type': VARCHAR}, {'name': FILING_DATE, 'type': DATE}] \ No newline at end of file diff --git a/test/sql/copy/csv/test_decimal.test b/test/sql/copy/csv/test_decimal.test index 5e08c0e1f495..60f967ccc27f 100644 --- a/test/sql/copy/csv/test_decimal.test +++ b/test/sql/copy/csv/test_decimal.test @@ -7,26 +7,26 @@ PRAGMA enable_verification query I -FROM 'data/csv/decimal.csv' +FROM '{DATA_DIR}/csv/decimal.csv' ---- 3.521525712040701 query I -select typeof (col_a) FROM 'data/csv/decimal.csv' +select typeof (col_a) FROM '{DATA_DIR}/csv/decimal.csv' ---- DOUBLE # By Default our decimal is (18,3) query I SELECT * FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', columns = {'col_a': 'DECIMAL'}); ---- 3.522 query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', columns = {'col_a': 'DECIMAL'}); ---- DECIMAL(18,3) @@ -34,21 +34,21 @@ DECIMAL(18,3) # We can define our decimal as (18,15) query I SELECT * FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', columns = {'col_a': 'DECIMAL(18,15)'}); ---- 3.521525712040701 query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', columns = {'col_a': 'DECIMAL(18,15)'}); ---- DECIMAL(18,15) query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', auto_type_candidates=['NULL', 'DECIMAL', 'VARCHAR']); ---- DECIMAL(18,3) @@ -56,21 +56,21 @@ DECIMAL(18,3) # If we have multiple decimal values being defined, it defaults to the last one here query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', auto_type_candidates=['NULL', 'DECIMAL(18,3)','DECIMAL(18,15)', 'VARCHAR']); ---- DECIMAL(18,15) query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', auto_type_candidates=['NULL','DECIMAL(18,15)', 'DECIMAL(18,3)', 'VARCHAR']); ---- DECIMAL(18,3) query I SELECT typeof(col_a) FROM read_csv( - 'data/csv/decimal.csv', + '{DATA_DIR}/csv/decimal.csv', auto_type_candidates=['NULL', 'DECIMAL(18,15)', 'VARCHAR']); ---- DECIMAL(18,15) diff --git a/test/sql/copy/csv/test_double_sniffer.test b/test/sql/copy/csv/test_double_sniffer.test index ee96952c3208..96aaf011c0f5 100644 --- a/test/sql/copy/csv/test_double_sniffer.test +++ b/test/sql/copy/csv/test_double_sniffer.test @@ -6,30 +6,30 @@ statement ok PRAGMA enable_verification query I -SELECT typeof(number) FROM read_csv('data/csv/double_trouble.csv', decimal_separator=',') limit 1; +SELECT typeof(number) FROM read_csv('{DATA_DIR}/csv/double_trouble.csv', decimal_separator=',') limit 1; ---- DOUBLE # Lets try sniffing float query I -SELECT typeof(number) FROM read_csv('data/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['FLOAT']) limit 1; +SELECT typeof(number) FROM read_csv('{DATA_DIR}/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['FLOAT']) limit 1; ---- FLOAT query I -SELECT typeof(number) FROM read_csv('data/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['FLOAT', 'DOUBLE']) limit 1; +SELECT typeof(number) FROM read_csv('{DATA_DIR}/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['FLOAT', 'DOUBLE']) limit 1; ---- FLOAT query I -SELECT typeof(number) FROM read_csv('data/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['DOUBLE', 'FLOAT']) limit 1; +SELECT typeof(number) FROM read_csv('{DATA_DIR}/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['DOUBLE', 'FLOAT']) limit 1; ---- FLOAT # Lets try sniffing DECIMAL query I -SELECT typeof(number) FROM read_csv('data/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['DECIMAL']) limit 1; +SELECT typeof(number) FROM read_csv('{DATA_DIR}/csv/double_trouble.csv', decimal_separator=',', auto_type_candidates = ['DECIMAL']) limit 1; ---- DECIMAL(18,3) diff --git a/test/sql/copy/csv/test_empty_header.test b/test/sql/copy/csv/test_empty_header.test index 294165b4fae2..7342dc78da91 100644 --- a/test/sql/copy/csv/test_empty_header.test +++ b/test/sql/copy/csv/test_empty_header.test @@ -9,7 +9,7 @@ loop i 1 5 query IIIII select columns[1].name,columns[2].name,columns[3].name,columns[4].name,columns[5].name -from sniff_csv('data/csv/headers/empty_${i}.csv') +from sniff_csv('{DATA_DIR}/csv/headers/empty_${i}.csv') ---- a b c column3 e diff --git a/test/sql/copy/csv/test_empty_quote.test b/test/sql/copy/csv/test_empty_quote.test index 6f6c3b075cd1..995a8c89359a 100644 --- a/test/sql/copy/csv/test_empty_quote.test +++ b/test/sql/copy/csv/test_empty_quote.test @@ -11,7 +11,7 @@ CREATE TABLE no_quote(a VARCHAR, b VARCHAR); # empty quote query I -COPY no_quote FROM 'data/csv/no_quote.csv' ( QUOTE '', ESCAPE '', DELIM '|'); +COPY no_quote FROM '{DATA_DIR}/csv/no_quote.csv' ( QUOTE '', ESCAPE '', DELIM '|'); ---- 3 @@ -24,7 +24,7 @@ h'ow do you do" # empty quote in read_csv function query TT -SELECT * FROM read_csv('data/csv/no_quote.csv', auto_detect=1, quote=''); +SELECT * FROM read_csv('{DATA_DIR}/csv/no_quote.csv', auto_detect=1, quote=''); ---- "hello world h'ow do you do" diff --git a/test/sql/copy/csv/test_encodings.test_slow b/test/sql/copy/csv/test_encodings.test_slow index ca56b9034ad8..f0db0d837404 100644 --- a/test/sql/copy/csv/test_encodings.test_slow +++ b/test/sql/copy/csv/test_encodings.test_slow @@ -6,34 +6,34 @@ statement ok PRAGMA enable_verification statement ok -FROM read_csv('data/csv/test/test.csv', encoding = 'utf-8') +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'utf-8') statement ok -FROM read_csv('data/csv/test/test.csv', encoding = 'utf-16') +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'utf-16') statement ok -FROM read_csv('data/csv/test/test.csv', encoding = 'latin-1') +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'latin-1') statement ok create table test (a INTEGER, b INTEGER, c VARCHAR(10)); statement ok -COPY test FROM 'data/csv/test/test.csv' (encoding 'utf-8'); +COPY test FROM '{DATA_DIR}/csv/test/test.csv' (encoding 'utf-8'); # We error for not supported encodings, sorry goku, but soon. statement error -FROM read_csv('data/csv/test/test.csv', encoding = 'Shift-JIS') +FROM read_csv('{DATA_DIR}/csv/test/test.csv', encoding = 'Shift-JIS') ---- The CSV Reader does not support the encoding: "Shift-JIS" # Writing not supported statement error -COPY test TO 'data/csv/test/test.csv' (encoding 'utf-16'); +COPY test TO '{DATA_DIR}/csv/test/test.csv' (encoding 'utf-16'); ---- Option "encoding" is not supported for writing - only for reading statement error -FROM read_csv('data/csv/encodings/latin1.csv') +FROM read_csv('{DATA_DIR}/csv/encodings/latin1.csv') ---- Invalid unicode (byte sequence mismatch) detected. @@ -44,7 +44,7 @@ statement ok create table test (a varchar, b integer) statement ok -COPY test FROM 'data/csv/encodings/latin1.csv' (encoding 'latin-1'); +COPY test FROM '{DATA_DIR}/csv/encodings/latin1.csv' (encoding 'latin-1'); query II FROM test @@ -63,7 +63,7 @@ André Lefèvre 36 Léonard Dubois 29 query II -FROM read_csv('data/csv/encodings/latin1.csv', encoding = 'latin-1') +FROM read_csv('{DATA_DIR}/csv/encodings/latin1.csv', encoding = 'latin-1') ---- José Álvarez 30 Müller 25 @@ -79,12 +79,12 @@ André Lefèvre 36 Léonard Dubois 29 statement error -FROM read_csv('data/csv/encodings/utf16.csv') +FROM read_csv('{DATA_DIR}/csv/encodings/utf16.csv') ---- Make sure you are using the correct file encoding. If not, set it (e.g., encoding = 'utf-16'). query II -FROM read_csv('data/csv/encodings/utf16.csv', encoding = 'utf-16') +FROM read_csv('{DATA_DIR}/csv/encodings/utf16.csv', encoding = 'utf-16') ---- José Álvarez 30 Müller 25 @@ -108,17 +108,17 @@ Sigurður Jónsson 55 Krystýna Novák 23 statement error -FROM read_csv('data/csv/encodings/shift_jis.csv', encoding = 'utf-16') +FROM read_csv('{DATA_DIR}/csv/encodings/shift_jis.csv', encoding = 'utf-16') ---- File is not utf-16 encoded statement error -FROM read_csv('data/csv/encodings/shift_jis.csv', encoding = 'latin-1') +FROM read_csv('{DATA_DIR}/csv/encodings/shift_jis.csv', encoding = 'latin-1') ---- File is not latin-1 encoded query II -FROM read_csv('data/csv/encodings/only_utf16.csv', encoding = 'utf-16', header = 0) +FROM read_csv('{DATA_DIR}/csv/encodings/only_utf16.csv', encoding = 'utf-16', header = 0) ---- ŁŁŁŁŁŁŁ ŁŁŁŁŁŁŁŁ Ł ŁŁŁ ŁŁŁŁŁŁŁŁŁ Ł @@ -130,7 +130,7 @@ FROM read_csv('data/csv/encodings/only_utf16.csv', encoding = 'utf-16', header = loop buffer_size 30 50 query II -FROM read_csv('data/csv/encodings/latin1.csv', encoding = 'latin-1', buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/encodings/latin1.csv', encoding = 'latin-1', buffer_size = ${buffer_size}) ---- José Álvarez 30 Müller 25 @@ -146,7 +146,7 @@ André Lefèvre 36 Léonard Dubois 29 query II -FROM read_csv('data/csv/encodings/latin1.csv.gz', encoding = 'latin-1', buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/encodings/latin1.csv.gz', encoding = 'latin-1', buffer_size = ${buffer_size}) ---- José Álvarez 30 Müller 25 @@ -162,7 +162,7 @@ André Lefèvre 36 Léonard Dubois 29 query II -FROM read_csv('data/csv/encodings/utf16.csv', encoding = 'utf-16', buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/encodings/utf16.csv', encoding = 'utf-16', buffer_size = ${buffer_size}) ---- José Álvarez 30 Müller 25 @@ -186,7 +186,7 @@ Sigurður Jónsson 55 Krystýna Novák 23 query II -FROM read_csv('data/csv/encodings/utf16.csv.gz', encoding = 'utf-16', buffer_size = ${buffer_size}) +FROM read_csv('{DATA_DIR}/csv/encodings/utf16.csv.gz', encoding = 'utf-16', buffer_size = ${buffer_size}) ---- José Álvarez 30 Müller 25 @@ -214,7 +214,7 @@ endloop loop buffer_size 40 60 query II -FROM read_csv('data/csv/encodings/only_utf16.csv', encoding = 'utf-16', buffer_size = ${buffer_size}, header = 0) +FROM read_csv('{DATA_DIR}/csv/encodings/only_utf16.csv', encoding = 'utf-16', buffer_size = ${buffer_size}, header = 0) ---- ŁŁŁŁŁŁŁ ŁŁŁŁŁŁŁŁ Ł ŁŁŁ ŁŁŁŁŁŁŁŁŁ Ł @@ -224,7 +224,7 @@ FROM read_csv('data/csv/encodings/only_utf16.csv', encoding = 'utf-16', buffer_s ŁŁŁŁ ŁŁŁŁŁŁ ŁŁ query II -FROM read_csv('data/csv/encodings/only_latin1.csv', encoding = 'latin-1', buffer_size = ${buffer_size}, header = 0) +FROM read_csv('{DATA_DIR}/csv/encodings/only_latin1.csv', encoding = 'latin-1', buffer_size = ${buffer_size}, header = 0) ---- ýýýýýýý ýýýýýýýý ý ýýý ýýýýýýýýý ý @@ -236,7 +236,7 @@ FROM read_csv('data/csv/encodings/only_latin1.csv', encoding = 'latin-1', buffer endloop query I -FROM read_csv('data/csv/encodings/all_latin1.csv', encoding = 'latin-1', header = 0, quote = '', auto_detect = false, columns = {'a':'varchar'}, delim = '') +FROM read_csv('{DATA_DIR}/csv/encodings/all_latin1.csv', encoding = 'latin-1', header = 0, quote = '', auto_detect = false, columns = {'a':'varchar'}, delim = '') ---- ! """" @@ -431,6 +431,6 @@ z query I -SELECT count (*) FROM read_csv('data/csv/encodings/all_utf16.csv', encoding = 'utf-16', header = 0, quote = '', auto_detect = false, columns = {'a':'varchar'}, delim = '') +SELECT count (*) FROM read_csv('{DATA_DIR}/csv/encodings/all_utf16.csv', encoding = 'utf-16', header = 0, quote = '', auto_detect = false, columns = {'a':'varchar'}, delim = '') ---- 63489 \ No newline at end of file diff --git a/test/sql/copy/csv/test_enum_csv.test b/test/sql/copy/csv/test_enum_csv.test index dbe7d2f38a33..7da4f34c106b 100644 --- a/test/sql/copy/csv/test_enum_csv.test +++ b/test/sql/copy/csv/test_enum_csv.test @@ -9,7 +9,7 @@ statement ok CREATE TYPE mood AS ENUM ('happy', 'sad', 'angry') query I -FROM read_csv('data/csv/enum_type.csv', types=[mood]) +FROM read_csv('{DATA_DIR}/csv/enum_type.csv', types=[mood]) ---- sad happy diff --git a/test/sql/copy/csv/test_escape_long_value.test b/test/sql/copy/csv/test_escape_long_value.test index fe940adb84ac..287392b4943a 100644 --- a/test/sql/copy/csv/test_escape_long_value.test +++ b/test/sql/copy/csv/test_escape_long_value.test @@ -9,7 +9,7 @@ PRAGMA enable_verification loop buffer_size 28 50 statement ok -CREATE TABLE T AS FROM read_csv('data/csv/big_escape.csv', buffer_size = ${buffer_size}, quote = '"', escape = '"', delim = ';', columns = {'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, auto_detect = false) +CREATE TABLE T AS FROM read_csv('{DATA_DIR}/csv/big_escape.csv', buffer_size = ${buffer_size}, quote = '"', escape = '"', delim = ';', columns = {'a':'INTEGER','b':'INTEGER', 'c':'VARCHAR'}, auto_detect = false) query I select count(*) from T; @@ -27,7 +27,7 @@ statement ok CREATE TABLE long_escaped_value (a INTEGER, b INTEGER, c VARCHAR); query I -COPY long_escaped_value FROM 'data/csv/test/long_escaped_value.csv' (DELIMITER '🦆', AUTO_DETECT FALSE, QUOTE '"', ESCAPE '"'); +COPY long_escaped_value FROM '{DATA_DIR}/csv/test/long_escaped_value.csv' (DELIMITER '🦆', AUTO_DETECT FALSE, QUOTE '"', ESCAPE '"'); ---- 1 @@ -42,7 +42,7 @@ statement ok CREATE TABLE long_escaped_value_unicode (a INTEGER, b INTEGER, c VARCHAR); query I -COPY long_escaped_value_unicode FROM 'data/csv/test/long_escaped_value_unicode.csv'; +COPY long_escaped_value_unicode FROM '{DATA_DIR}/csv/test/long_escaped_value_unicode.csv'; ---- 1 diff --git a/test/sql/copy/csv/test_extra_delimiters_rfc.test b/test/sql/copy/csv/test_extra_delimiters_rfc.test index e90a120b6099..b8d227ed2b83 100644 --- a/test/sql/copy/csv/test_extra_delimiters_rfc.test +++ b/test/sql/copy/csv/test_extra_delimiters_rfc.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query III -FROM read_csv('data/csv/extra_delimiters.csv', strict_mode = false, columns={'a':'VARCHAR','b':'VARCHAR','c':'VARCHAR'}, auto_detect = false, delim = ',', header = true) +FROM read_csv('{DATA_DIR}/csv/extra_delimiters.csv', strict_mode = false, columns={'a':'VARCHAR','b':'VARCHAR','c':'VARCHAR'}, auto_detect = false, delim = ',', header = true) ---- 1 2 3 1 2 3 diff --git a/test/sql/copy/csv/test_filename_filter.test b/test/sql/copy/csv/test_filename_filter.test index 07305ce20738..3b29a809ce17 100644 --- a/test/sql/copy/csv/test_filename_filter.test +++ b/test/sql/copy/csv/test_filename_filter.test @@ -6,32 +6,32 @@ statement ok PRAGMA enable_verification query IIII -SELECT column1, column2, column3, filename.replace('\', '/') FROM read_csv('data/csv/filename_filter/*.csv', filename=true); +SELECT column1, column2, column3, parse_filename(filename) FROM read_csv('{DATA_DIR}/csv/filename_filter/*.csv', filename=true); ---- -1 2 3 data/csv/filename_filter/a.csv -4 5 6 data/csv/filename_filter/b.csv -1 NULL 3 data/csv/filename_filter/c.csv -1 1 3 data/csv/filename_filter/d.csv -2 NULL 2 data/csv/filename_filter/d.csv -3 3 100 data/csv/filename_filter/d.csv +1 2 3 a.csv +4 5 6 b.csv +1 NULL 3 c.csv +1 1 3 d.csv +2 NULL 2 d.csv +3 3 100 d.csv query IIII -SELECT column1, column2, column3, filename.replace('\', '/') FROM read_csv(['data/csv/filename_filter/a.csv','data/csv/filename_filter/b.csv','data/csv/filename_filter/c.csv','data/csv/filename_filter/d.csv'], filename=true) WHERE filename like '%d.csv'; +SELECT column1, column2, column3, parse_filename(filename) FROM read_csv(['{DATA_DIR}/csv/filename_filter/a.csv','{DATA_DIR}/csv/filename_filter/b.csv','{DATA_DIR}/csv/filename_filter/c.csv','{DATA_DIR}/csv/filename_filter/d.csv'], filename=true) WHERE filename like '%d.csv'; ---- -1 1 3 data/csv/filename_filter/d.csv -2 NULL 2 data/csv/filename_filter/d.csv -3 3 100 data/csv/filename_filter/d.csv +1 1 3 d.csv +2 NULL 2 d.csv +3 3 100 d.csv query IIII -SELECT column1, column2, column3, filename.replace('\', '/') FROM read_csv(['data/csv/filename_filter/a.csv','data/csv/filename_filter/b.csv','data/csv/filename_filter/c.csv','data/csv/filename_filter/d.csv'], filename=true, union_by_name=true) WHERE filename like '%d.csv'; +SELECT column1, column2, column3, parse_filename(filename) FROM read_csv(['{DATA_DIR}/csv/filename_filter/a.csv','{DATA_DIR}/csv/filename_filter/b.csv','{DATA_DIR}/csv/filename_filter/c.csv','{DATA_DIR}/csv/filename_filter/d.csv'], filename=true, union_by_name=true) WHERE filename like '%d.csv'; ---- -1 1 3 data/csv/filename_filter/d.csv -2 NULL 2 data/csv/filename_filter/d.csv -3 3 100 data/csv/filename_filter/d.csv +1 1 3 d.csv +2 NULL 2 d.csv +3 3 100 d.csv query IIII -SELECT column1, column2, column3, filename.replace('\', '/') FROM read_csv('data/csv/filename_filter/*.csv', filename=true, union_by_name=true) WHERE filename like '%d.csv'; +SELECT column1, column2, column3, parse_filename(filename) FROM read_csv('{DATA_DIR}/csv/filename_filter/*.csv', filename=true, union_by_name=true) WHERE filename like '%d.csv'; ---- -1 1 3 data/csv/filename_filter/d.csv -2 NULL 2 data/csv/filename_filter/d.csv -3 3 100 data/csv/filename_filter/d.csv \ No newline at end of file +1 1 3 d.csv +2 NULL 2 d.csv +3 3 100 d.csv diff --git a/test/sql/copy/csv/test_force_not_null.test b/test/sql/copy/csv/test_force_not_null.test index c1e34eff0879..092cb1366fb6 100644 --- a/test/sql/copy/csv/test_force_not_null.test +++ b/test/sql/copy/csv/test_force_not_null.test @@ -11,7 +11,7 @@ CREATE TABLE test (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(10)); # test if null value is correctly converted into string query I -COPY test FROM 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0,allow_quoted_nulls false ); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0,allow_quoted_nulls false ); ---- 3 @@ -27,7 +27,7 @@ DELETE FROM test; # test if null value is correctly converted into string if explicit columns are used query I -COPY test (col_a, col_b, col_c) FROM 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0, allow_quoted_nulls false); +COPY test (col_a, col_b, col_c) FROM '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0, allow_quoted_nulls false); ---- 3 @@ -40,40 +40,40 @@ SELECT * FROM test ORDER BY 1; # FORCE_NOT_NULL is only supported in COPY ... FROM ... statement error -COPY test TO 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0); +COPY test TO '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_b), NULL 'test', HEADER 0); ---- Option "FORCE_NOT_NULL" is not supported for writing - only for reading # FORCE_NOT_NULL must not be empty and must have the correct parameter type statement error -COPY test FROM 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL, NULL 'test'); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL, NULL 'test'); ---- "force_not_null" expects a column list or * as parameter statement error -COPY test FROM 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL 42, NULL 'test'); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL 42, NULL 'test'); ---- "force_not_null" expected to find 42, but it was not found in the table # test using a column in FORCE_NOT_NULL that is not set as output, but that is a column of the table statement error -COPY test (col_b, col_a) FROM 'data/csv/test/force_not_null_reordered.csv' (FORCE_NOT_NULL (col_c, col_b)); +COPY test (col_b, col_a) FROM '{DATA_DIR}/csv/test/force_not_null_reordered.csv' (FORCE_NOT_NULL (col_c, col_b)); ---- "force_not_null" expected to find col_c, but it was not found in the table # test using a column in FORCE_NOT_NULL that is not a column of the table statement error -COPY test FROM 'data/csv/test/force_not_null_reordered.csv' (FORCE_NOT_NULL (col_c, col_d)); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null_reordered.csv' (FORCE_NOT_NULL (col_c, col_d)); ---- "force_not_null" expected to find col_d, but it was not found in the table # FORCE_NOT_NULL fails on integer columns with NULL values, but only if there are null values query I -COPY test FROM 'data/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_a), HEADER 0); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null.csv' (FORCE_NOT_NULL (col_a), HEADER 0); ---- 3 statement error -COPY test FROM 'data/csv/test/force_not_null_inull.csv' (FORCE_NOT_NULL (col_a), HEADER 0); +COPY test FROM '{DATA_DIR}/csv/test/force_not_null_inull.csv' (FORCE_NOT_NULL (col_a), HEADER 0); ---- Error when converting column "col_a". diff --git a/test/sql/copy/csv/test_force_quote.test b/test/sql/copy/csv/test_force_quote.test index e6227a802b44..046fd69ed028 100644 --- a/test/sql/copy/csv/test_force_quote.test +++ b/test/sql/copy/csv/test_force_quote.test @@ -11,19 +11,19 @@ statement ok CREATE TABLE test (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(10)); query I -COPY test FROM 'data/csv/test/force_quote.csv' (HEADER 0); +COPY test FROM '{DATA_DIR}/csv/test/force_quote.csv' (HEADER 0); ---- 3 # test FORCE_QUOTE * query I -COPY test TO '__TEST_DIR__/test_star.csv' (FORCE_QUOTE *, HEADER 0); +COPY test TO '{TEMP_DIR}/test_star.csv' (FORCE_QUOTE *, HEADER 0); ---- 3 # test FORCE_QUOTE with specific columns and non-default quote character and non-default null character query I -COPY test TO '__TEST_DIR__/test_chosen_columns.csv' (FORCE_QUOTE (col_a, col_c), QUOTE 't', NULL 'ea'); +COPY test TO '{TEMP_DIR}/test_chosen_columns.csv' (FORCE_QUOTE (col_a, col_c), QUOTE 't', NULL 'ea'); ---- 3 @@ -32,7 +32,7 @@ statement ok CREATE TABLE test2 (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(10)); query I -COPY test2 FROM '__TEST_DIR__/test_chosen_columns.csv' (QUOTE 't', NULL 'ea'); +COPY test2 FROM '{TEMP_DIR}/test_chosen_columns.csv' (QUOTE 't', NULL 'ea'); ---- 3 @@ -45,7 +45,7 @@ SELECT * FROM test2 # test FORCE_QUOTE with reordered columns query I -COPY test (col_b, col_c, col_a) TO '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE (col_c, col_b), NULL 'test'); +COPY test (col_b, col_c, col_a) TO '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE (col_c, col_b), NULL 'test'); ---- 3 @@ -53,7 +53,7 @@ statement ok CREATE TABLE test3 (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(10)); query I -COPY test3(col_b, col_c, col_a) FROM '__TEST_DIR__/test_reorder.csv' (NULL 'test'); +COPY test3(col_b, col_c, col_a) FROM '{TEMP_DIR}/test_reorder.csv' (NULL 'test'); ---- 3 @@ -67,30 +67,30 @@ SELECT * FROM test2 # test using a column in FORCE_QUOTE that is not set as output, but that is a column of the table statement error -COPY test (col_b, col_a) TO '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE (col_c, col_b)); +COPY test (col_b, col_a) TO '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE (col_c, col_b)); ---- "force_quote" expected to find col_c, but it was not found in the table # test using a column in FORCE_QUOTE that is not a column of the table statement error -COPY test TO '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE (col_c, col_d)); +COPY test TO '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE (col_c, col_d)); ---- "force_quote" expected to find col_d, but it was not found in the table # FORCE_QUOTE is only supported in COPY ... TO ... statement error -COPY test FROM '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE (col_c, col_d)); +COPY test FROM '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE (col_c, col_d)); ---- Option "FORCE_QUOTE" is not supported for reading - only for writing # FORCE_QUOTE must not be empty and must have the correct parameter type statement error -COPY test TO '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE); +COPY test TO '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE); ---- "force_quote" expects a column list or * as parameter statement error -COPY test TO '__TEST_DIR__/test_reorder.csv' (FORCE_QUOTE 42); +COPY test TO '{TEMP_DIR}/test_reorder.csv' (FORCE_QUOTE 42); ---- "force_quote" expected to find 42, but it was not found in the table diff --git a/test/sql/copy/csv/test_glob_type.test b/test/sql/copy/csv/test_glob_type.test index bf8e0e0f273a..de13e00f469c 100644 --- a/test/sql/copy/csv/test_glob_type.test +++ b/test/sql/copy/csv/test_glob_type.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query I -SELECT typeof(bar) FROM 'data/csv/17451/*.csv' limit 1 +SELECT typeof(bar) FROM '{DATA_DIR}/csv/17451/*.csv' limit 1 ---- DATE @@ -14,30 +14,30 @@ statement ok CREATE TABLE T AS SELECT 'bar,baz', UNION ALL SELECT ',baz' from range (0,100000) statement ok -COPY T TO '__TEST_DIR__/t.csv' (QUOTE '', HEADER 0) +COPY T TO '{TEMP_DIR}/t.csv' (QUOTE '', HEADER 0) # If we can't be absolutely sure by reading the whole file, we need to push it as a varchar still query I -SELECT typeof(bar) FROM read_csv(['data/csv/17451/1.csv', '__TEST_DIR__/t.csv']) limit 1 +SELECT typeof(bar) FROM read_csv(['{DATA_DIR}/csv/17451/1.csv', '{TEMP_DIR}/t.csv']) limit 1 ---- VARCHAR statement error -SELECT typeof(bar) FROM read_csv(['data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/2.csv','data/csv/17451/extra/3.csv'], files_to_sniff = 0) limit 1 +SELECT typeof(bar) FROM read_csv(['{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/2.csv','{DATA_DIR}/csv/17451/extra/3.csv'], files_to_sniff = 0) limit 1 ---- Unsupported parameter for files_to_sniff: value must be -1 for all files or higher than one. query I -SELECT typeof(bar) FROM read_csv(['data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/2.csv','data/csv/17451/extra/3.csv'], files_to_sniff = 5) limit 1 +SELECT typeof(bar) FROM read_csv(['{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/2.csv','{DATA_DIR}/csv/17451/extra/3.csv'], files_to_sniff = 5) limit 1 ---- DATE statement error -FROM read_csv(['data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/2.csv','data/csv/17451/extra/3.csv'], files_to_sniff = 5) +FROM read_csv(['{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/2.csv','{DATA_DIR}/csv/17451/extra/3.csv'], files_to_sniff = 5) ---- Consider setting files_to_sniff to a higher value (e.g., files_to_sniff = -1) query I -SELECT typeof(bar) FROM read_csv(['data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/1.csv','data/csv/17451/2.csv','data/csv/17451/extra/3.csv'], files_to_sniff = -1) limit 1 +SELECT typeof(bar) FROM read_csv(['{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/1.csv','{DATA_DIR}/csv/17451/2.csv','{DATA_DIR}/csv/17451/extra/3.csv'], files_to_sniff = -1) limit 1 ---- VARCHAR \ No newline at end of file diff --git a/test/sql/copy/csv/test_greek_utf8.test b/test/sql/copy/csv/test_greek_utf8.test index 0945d3a1a043..2bd53d9a9366 100644 --- a/test/sql/copy/csv/test_greek_utf8.test +++ b/test/sql/copy/csv/test_greek_utf8.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE greek_utf8 AS SELECT i, nfc_normalize(j) j, k FROM read_csv('data/csv/real/greek_utf8.csv', columns=STRUCT_PACK(i := 'INTEGER', j := 'VARCHAR', k := 'INTEGER'), delim='|') +CREATE TABLE greek_utf8 AS SELECT i, nfc_normalize(j) j, k FROM read_csv('{DATA_DIR}/csv/real/greek_utf8.csv', columns=STRUCT_PACK(i := 'INTEGER', j := 'VARCHAR', k := 'INTEGER'), delim='|') query ITI SELECT * FROM greek_utf8 ORDER BY 1; @@ -21,7 +21,7 @@ SELECT * FROM greek_utf8 ORDER BY 1; 607808 poverty‪ 1 query I -COPY greek_utf8 TO '__TEST_DIR__/greek_utf8.csv' DELIMITER ' ' HEADER; +COPY greek_utf8 TO '{TEMP_DIR}/greek_utf8.csv' DELIMITER ' ' HEADER; ---- 8 @@ -35,7 +35,7 @@ SELECT * FROM greek_utf8; # now copy back into the table query I -COPY greek_utf8 FROM '__TEST_DIR__/greek_utf8.csv' DELIMITER ' ' HEADER; +COPY greek_utf8 FROM '{TEMP_DIR}/greek_utf8.csv' DELIMITER ' ' HEADER; ---- 8 diff --git a/test/sql/copy/csv/test_header_only.test b/test/sql/copy/csv/test_header_only.test index 52aa6ac13d15..a22fdff47d0e 100644 --- a/test/sql/copy/csv/test_header_only.test +++ b/test/sql/copy/csv/test_header_only.test @@ -6,44 +6,44 @@ statement ok PRAGMA enable_verification query I -SELECT columns from sniff_csv('data/csv/header_only.csv', header=True, ignore_errors=True) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only.csv', header=True, ignore_errors=True) ---- [{'name': abs_file_name, 'type': VARCHAR}] query I -SELECT abs_file_name FROM read_csv('data/csv/header_only.csv', header=True, ignore_errors=True) +SELECT abs_file_name FROM read_csv('{DATA_DIR}/csv/header_only.csv', header=True, ignore_errors=True) ---- query I SELECT REGEXP_MATCHES(abs_file_name, 'foo') - FROM ( SELECT abs_file_name FROM read_csv('data/csv/header_only.csv', header=True, ignore_errors=True)) + FROM ( SELECT abs_file_name FROM read_csv('{DATA_DIR}/csv/header_only.csv', header=True, ignore_errors=True)) ---- query I SELECT REGEXP_MATCHES(abs_file_name, 'foo') - FROM ( SELECT abs_file_name FROM read_csv(['data/csv/header_only.csv','data/csv/header_only.csv'], header=True, ignore_errors=True)) + FROM ( SELECT abs_file_name FROM read_csv(['{DATA_DIR}/csv/header_only.csv','{DATA_DIR}/csv/header_only.csv'], header=True, ignore_errors=True)) ---- statement error SELECT REGEXP_MATCHES(abs_file_name, 'foo') - FROM ( SELECT abs_file_name FROM read_csv(['data/csv/header_only.csv','data/csv/bool.csv','data/csv/header_only.csv'], header=True, ignore_errors=True)) + FROM ( SELECT abs_file_name FROM read_csv(['{DATA_DIR}/csv/header_only.csv','{DATA_DIR}/csv/bool.csv','{DATA_DIR}/csv/header_only.csv'], header=True, ignore_errors=True)) ---- No function matches the given name and argument types 'regexp_matches(BOOLEAN, STRING_LITERAL)'. You might need to add explicit type casts. # Try to replace column type query I -SELECT columns from sniff_csv('data/csv/header_only.csv', types = ['INTEGER']) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only.csv', types = ['INTEGER']) ---- [{'name': abs_file_name, 'type': INTEGER}] query I -SELECT columns from sniff_csv('data/csv/header_only.csv', types = {'abs_file_name':'INTEGER'}) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only.csv', types = {'abs_file_name':'INTEGER'}) ---- [{'name': abs_file_name, 'type': INTEGER}] statement ok -CREATE TABLE T AS from read_csv('data/csv/header_only.csv',types = ['INTEGER']) +CREATE TABLE T AS from read_csv('{DATA_DIR}/csv/header_only.csv',types = ['INTEGER']) query IIIIII DESCRIBE T @@ -51,7 +51,7 @@ DESCRIBE T abs_file_name INTEGER YES NULL NULL NULL statement ok -CREATE OR REPLACE TABLE T AS from read_csv('data/csv/header_only.csv', types = {'abs_file_name':'INTEGER'}) +CREATE OR REPLACE TABLE T AS from read_csv('{DATA_DIR}/csv/header_only.csv', types = {'abs_file_name':'INTEGER'}) query IIIIII DESCRIBE T @@ -60,23 +60,23 @@ abs_file_name INTEGER YES NULL NULL NULL # Now with two columns query I -SELECT columns from sniff_csv('data/csv/header_only_2.csv', types = ['INTEGER']) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only_2.csv', types = ['INTEGER']) ---- [{'name': foo, 'type': INTEGER}, {'name': bar, 'type': VARCHAR}] query I -SELECT columns from sniff_csv('data/csv/header_only_2.csv', types = {'foo':'INTEGER'}) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only_2.csv', types = {'foo':'INTEGER'}) ---- [{'name': foo, 'type': INTEGER}, {'name': bar, 'type': VARCHAR}] # This is clearly wrong query I -SELECT columns from sniff_csv('data/csv/header_only_2.csv', types = {'bar':'INTEGER'}) +SELECT columns from sniff_csv('{DATA_DIR}/csv/header_only_2.csv', types = {'bar':'INTEGER'}) ---- [{'name': foo, 'type': VARCHAR}, {'name': bar, 'type': INTEGER}] statement ok -CREATE OR REPLACE TABLE T AS from read_csv('data/csv/header_only_2.csv', types = ['INTEGER']) +CREATE OR REPLACE TABLE T AS from read_csv('{DATA_DIR}/csv/header_only_2.csv', types = ['INTEGER']) query IIIIII DESCRIBE T @@ -85,7 +85,7 @@ foo INTEGER YES NULL NULL NULL bar VARCHAR YES NULL NULL NULL statement ok -CREATE OR REPLACE TABLE T AS from read_csv('data/csv/header_only_2.csv', types = {'foo':'INTEGER'}) +CREATE OR REPLACE TABLE T AS from read_csv('{DATA_DIR}/csv/header_only_2.csv', types = {'foo':'INTEGER'}) query IIIIII DESCRIBE T @@ -94,7 +94,7 @@ foo INTEGER YES NULL NULL NULL bar VARCHAR YES NULL NULL NULL statement ok -CREATE OR REPLACE TABLE T AS from read_csv('data/csv/header_only_2.csv', types = {'bar':'INTEGER'}) +CREATE OR REPLACE TABLE T AS from read_csv('{DATA_DIR}/csv/header_only_2.csv', types = {'bar':'INTEGER'}) query IIIIII DESCRIBE T diff --git a/test/sql/copy/csv/test_headers_12089.test b/test/sql/copy/csv/test_headers_12089.test index ec3c904a6db1..1e9bea70bbfb 100644 --- a/test/sql/copy/csv/test_headers_12089.test +++ b/test/sql/copy/csv/test_headers_12089.test @@ -6,12 +6,12 @@ statement ok PRAGMA enable_verification query I -select columns FROM sniff_csv('data/csv/headers/escaped_quote.csv'); +select columns FROM sniff_csv('{DATA_DIR}/csv/headers/escaped_quote.csv'); ---- [{'name': Name, 'type': VARCHAR}, {'name': 'Escaped\'\'Quote', 'type': BIGINT}] query I -select columns FROM sniff_csv('data/csv/headers/unescaped_quote.csv'); +select columns FROM sniff_csv('{DATA_DIR}/csv/headers/unescaped_quote.csv'); ---- [{'name': Name, 'type': VARCHAR}, {'name': 'Unescaped\'Quote', 'type': BIGINT}] \ No newline at end of file diff --git a/test/sql/copy/csv/test_hits_problematic.test b/test/sql/copy/csv/test_hits_problematic.test index 0ab2e170ba2e..dd2919500fc1 100644 --- a/test/sql/copy/csv/test_hits_problematic.test +++ b/test/sql/copy/csv/test_hits_problematic.test @@ -3,17 +3,17 @@ # group: [csv] statement error -FROM read_csv('data/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d'); +FROM read_csv('{DATA_DIR}/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d'); ---- * Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement ok -FROM read_csv('data/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d', strict_mode = False); +FROM read_csv('{DATA_DIR}/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d', strict_mode = False); statement error -FROM read_csv('data/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d'); +FROM read_csv('{DATA_DIR}/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d'); ---- * Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement ok -FROM read_csv('data/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d', strict_mode = False); \ No newline at end of file +FROM read_csv('{DATA_DIR}/csv/hits_problematic.csv', auto_detect=false, delim=',', quote='"', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column000': 'BIGINT', 'column001': 'BIGINT', 'column002': 'VARCHAR', 'column003': 'BIGINT', 'column004': 'TIMESTAMP', 'column005': 'DATE', 'column006': 'BIGINT', 'column007': 'BIGINT', 'column008': 'BIGINT', 'column009': 'BIGINT', 'column010': 'BIGINT', 'column011': 'BIGINT', 'column012': 'BIGINT', 'column013': 'VARCHAR', 'column014': 'VARCHAR', 'column015': 'BIGINT', 'column016': 'BIGINT', 'column017': 'BIGINT', 'column018': 'BIGINT', 'column019': 'BIGINT', 'column020': 'BIGINT', 'column021': 'BIGINT', 'column022': 'BIGINT', 'column023': 'BIGINT', 'column024': 'BIGINT', 'column025': 'DOUBLE', 'column026': 'BIGINT', 'column027': 'BIGINT', 'column028': 'BIGINT', 'column029': 'VARCHAR', 'column030': 'BIGINT', 'column031': 'BIGINT', 'column032': 'BIGINT', 'column033': 'BIGINT', 'column034': 'VARCHAR', 'column035': 'VARCHAR', 'column036': 'BIGINT', 'column037': 'BIGINT', 'column038': 'BIGINT', 'column039': 'VARCHAR', 'column040': 'BIGINT', 'column041': 'BIGINT', 'column042': 'BIGINT', 'column043': 'BIGINT', 'column044': 'BIGINT', 'column045': 'TIMESTAMP', 'column046': 'BIGINT', 'column047': 'BIGINT', 'column048': 'BIGINT', 'column049': 'BIGINT', 'column050': 'VARCHAR', 'column051': 'BIGINT', 'column052': 'BIGINT', 'column053': 'BIGINT', 'column054': 'BIGINT', 'column055': 'BIGINT', 'column056': 'VARCHAR', 'column057': 'BIGINT', 'column058': 'BIGINT', 'column059': 'BIGINT', 'column060': 'BIGINT', 'column061': 'BIGINT', 'column062': 'BIGINT', 'column063': 'VARCHAR', 'column064': 'TIMESTAMP', 'column065': 'BIGINT', 'column066': 'BIGINT', 'column067': 'BIGINT', 'column068': 'BIGINT', 'column069': 'BIGINT', 'column070': 'BIGINT', 'column071': 'BIGINT', 'column072': 'BIGINT', 'column073': 'BIGINT', 'column074': 'VARCHAR', 'column075': 'VARCHAR', 'column076': 'VARCHAR', 'column077': 'VARCHAR', 'column078': 'BIGINT', 'column079': 'BIGINT', 'column080': 'BIGINT', 'column081': 'BIGINT', 'column082': 'BIGINT', 'column083': 'BIGINT', 'column084': 'BIGINT', 'column085': 'BIGINT', 'column086': 'VARCHAR', 'column087': 'BIGINT', 'column088': 'VARCHAR', 'column089': 'VARCHAR', 'column090': 'BIGINT', 'column091': 'VARCHAR', 'column092': 'VARCHAR', 'column093': 'VARCHAR', 'column094': 'VARCHAR', 'column095': 'VARCHAR', 'column096': 'VARCHAR', 'column097': 'VARCHAR', 'column098': 'VARCHAR', 'column099': 'VARCHAR', 'column100': 'VARCHAR', 'column101': 'BIGINT', 'column102': 'BIGINT', 'column103': 'BIGINT', 'column104': 'BIGINT'}, dateformat='%Y-%m-%d', strict_mode = False); \ No newline at end of file diff --git a/test/sql/copy/csv/test_ignore_errors.test b/test/sql/copy/csv/test_ignore_errors.test index 23d4c794f30d..f28bc198e246 100644 --- a/test/sql/copy/csv/test_ignore_errors.test +++ b/test/sql/copy/csv/test_ignore_errors.test @@ -9,10 +9,10 @@ statement ok CREATE TABLE integers(i INTEGER, j INTEGER); statement ok -COPY integers FROM 'data/csv/test/error_too_little.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING FALSE) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING FALSE) statement error -COPY integers FROM 'data/csv/test/error_too_little.csv' (HEADER, NULL_PADDING FALSE) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little.csv' (HEADER, NULL_PADDING FALSE) ---- It was not possible to automatically detect the CSV parsing dialect @@ -30,10 +30,10 @@ statement ok DELETE FROM integers; statement ok -INSERT INTO integers SELECT * FROM read_csv('data/csv/test/error_too_little.csv', columns={'i': 'INTEGER', 'j': 'INTEGER'}, ignore_errors=1, null_padding=0) +INSERT INTO integers SELECT * FROM read_csv('{DATA_DIR}/csv/test/error_too_little.csv', columns={'i': 'INTEGER', 'j': 'INTEGER'}, ignore_errors=1, null_padding=0) statement error -INSERT INTO integers SELECT * FROM read_csv('data/csv/test/error_too_little.csv', columns={'i': 'INTEGER'}, null_padding=0) +INSERT INTO integers SELECT * FROM read_csv('{DATA_DIR}/csv/test/error_too_little.csv', columns={'i': 'INTEGER'}, null_padding=0) ---- table integers has 2 columns but 1 values were supplied @@ -51,10 +51,10 @@ statement ok DELETE FROM integers; statement ok -COPY integers FROM 'data/csv/test/error_too_little_single.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_single.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0) statement error -COPY integers FROM 'data/csv/test/error_too_little_single.csv' (HEADER, NULL_PADDING 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_single.csv' (HEADER, NULL_PADDING 0) ---- It was not possible to automatically detect the CSV parsing dialect @@ -68,10 +68,10 @@ statement ok DELETE FROM integers; statement ok -COPY integers FROM 'data/csv/test/error_too_many.csv' (HEADER, IGNORE_ERRORS, SAMPLE_SIZE -1, strict_mode TRUE) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_many.csv' (HEADER, IGNORE_ERRORS, SAMPLE_SIZE -1, strict_mode TRUE) statement error -COPY integers FROM 'data/csv/test/error_too_many.csv' (HEADER) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_many.csv' (HEADER) ---- It was not possible to automatically detect the CSV parsing dialect @@ -89,10 +89,10 @@ statement ok DELETE FROM integers; statement ok -COPY integers FROM 'data/csv/test/error_invalid_type.csv' (HEADER, IGNORE_ERRORS) +COPY integers FROM '{DATA_DIR}/csv/test/error_invalid_type.csv' (HEADER, IGNORE_ERRORS) statement error -COPY integers FROM 'data/csv/test/error_invalid_type.csv' (HEADER) +COPY integers FROM '{DATA_DIR}/csv/test/error_invalid_type.csv' (HEADER) ---- This type was either manually set or derived from an existing table. Select a different type to correctly parse this column. @@ -110,7 +110,7 @@ statement ok CREATE TABLE nullable_type (col_a INTEGER, col_b VARCHAR(10), col_c VARCHAR(10), col_d VARCHAR(10)); statement ok -COPY nullable_type FROM 'data/csv/test/test_incompatible_type_with_nullable.csv' +COPY nullable_type FROM '{DATA_DIR}/csv/test/test_incompatible_type_with_nullable.csv' query IIII SELECT * FROM nullable_type @@ -119,7 +119,7 @@ SELECT * FROM nullable_type 2 NULL test null query IIIIIIIIIIII -FROM read_csv('data/csv/titanic.csv', ignore_errors=1) limit 10 +FROM read_csv('{DATA_DIR}/csv/titanic.csv', ignore_errors=1) limit 10 ---- 1 0 3 Braund, Mr. Owen Harris male 22.0 1 0 A/5 21171 7.25 NULL S 2 1 1 Cumings, Mrs. John Bradley (Florence Briggs Thayer) female 38.0 1 0 PC 17599 71.2833 C85 C @@ -134,16 +134,16 @@ FROM read_csv('data/csv/titanic.csv', ignore_errors=1) limit 10 # If we can't parse even one row, we can't sniff it. statement error -SELECT * FROM read_csv('data/csv/test_ignore_errors.csv', columns = {'Order ref ID': 'VARCHAR'}, delim = ',', ignore_errors=true); +SELECT * FROM read_csv('{DATA_DIR}/csv/test_ignore_errors.csv', columns = {'Order ref ID': 'VARCHAR'}, delim = ',', ignore_errors=true); ---- It was not possible to automatically detect the CSV parsing dialect query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM read_csv('data/csv/test_ignore_errors.csv', types = {'Order ref ID': 'VARCHAR'}, ignore_errors=true); +SELECT * FROM read_csv('{DATA_DIR}/csv/test_ignore_errors.csv', types = {'Order ref ID': 'VARCHAR'}, ignore_errors=true); ---- 40243121-bechamelfoodsin@gmail_com_item_01_2024.csv 523944955 163178211923806 163178211923806 swiggy Completed AMD_VASTRAPUR_JUNOS NFD - - AMD_VASTRAPUR_JUNOS_swiggy_JP 675029 NFD NFD NFD - 2024-01-02 20:46:54 2235571 474092 Exotica Pizza exotica pizza 1 539.0 26.95 aggregator 10.0 575.95 Medium | 1mm" Thin Crust 1797632 | 1876675 None Ahmedabad JP 1.0 0.0 27.45 10.0 0.0 27.45 10.0 Cooked 996331 5.0 0.0 549.0 query IIIIIIIIIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/rejects_sniffer.csv',ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/rejects_sniffer.csv',ignore_errors = true) ---- Dummy Naam DUMMYREKENINGNUMMER 1234567890 Dummy Transactiestatus USD 123,45 DUMMYBATCHNAAM 2023-01-01 2023-01-01 USD DUMMYEIGENREKENING Dummy Eigen Rekening Naam Dummy Batchtype Dummy Batchstatus dummy_file.xml DUMMYCHECKSUMSHA1 DUMMYCHECKSUMSHA2 false diff --git a/test/sql/copy/csv/test_ignore_errors_end_of_chunk.test b/test/sql/copy/csv/test_ignore_errors_end_of_chunk.test index 4141b62be90c..4d86b6a30160 100644 --- a/test/sql/copy/csv/test_ignore_errors_end_of_chunk.test +++ b/test/sql/copy/csv/test_ignore_errors_end_of_chunk.test @@ -8,10 +8,10 @@ statement ok CREATE TABLE integers(i INTEGER, j INTEGER); statement ok -COPY integers FROM 'data/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0, AUTO_DETECT 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0, AUTO_DETECT 0) statement error -COPY integers FROM 'data/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, NULL_PADDING 0, AUTO_DETECT 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, NULL_PADDING 0, AUTO_DETECT 0) ---- Expected Number of Columns: 2 Found: 1 @@ -36,10 +36,10 @@ statement ok CREATE TABLE integers(i INTEGER, j INTEGER); statement ok -COPY integers FROM 'data/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, IGNORE_ERRORS, NULL_PADDING 0) statement error -COPY integers FROM 'data/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, NULL_PADDING 0) +COPY integers FROM '{DATA_DIR}/csv/test/error_too_little_end_of_filled_chunk.csv' (HEADER, NULL_PADDING 0) ---- It was not possible to automatically detect the CSV parsing dialect diff --git a/test/sql/copy/csv/test_ignore_mid_null_line.test b/test/sql/copy/csv/test_ignore_mid_null_line.test index 4a70387afc37..b5b292f0ec74 100644 --- a/test/sql/copy/csv/test_ignore_mid_null_line.test +++ b/test/sql/copy/csv/test_ignore_mid_null_line.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query III -FROM read_csv('data/csv/error/mid_null.csv', delim = ';', +FROM read_csv('{DATA_DIR}/csv/error/mid_null.csv', delim = ';', columns = {'a':'integer','b':'integer','c':'integer'}, auto_detect = false, header = true, ignore_errors = true, strict_mode=True) ---- 1 2 3 diff --git a/test/sql/copy/csv/test_imdb.test b/test/sql/copy/csv/test_imdb.test index 2ec666073073..1243afdeb424 100644 --- a/test/sql/copy/csv/test_imdb.test +++ b/test/sql/copy/csv/test_imdb.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE movie_info (id integer NOT NULL PRIMARY KEY, movie_id integer NOT NULL, info_type_id integer NOT NULL, info text NOT NULL, note text); query I -COPY movie_info FROM 'data/csv/real/imdb_movie_info_escaped.csv' DELIMITER ',' ESCAPE '\'; +COPY movie_info FROM '{DATA_DIR}/csv/real/imdb_movie_info_escaped.csv' DELIMITER ',' ESCAPE '\'; ---- 201 diff --git a/test/sql/copy/csv/test_insert_into_types.test b/test/sql/copy/csv/test_insert_into_types.test index 2a250b0e50f9..05f475ae96e6 100644 --- a/test/sql/copy/csv/test_insert_into_types.test +++ b/test/sql/copy/csv/test_insert_into_types.test @@ -17,7 +17,7 @@ CREATE TABLE users ( statement ok INSERT INTO users SELECT * -FROM read_csv('data/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); query III select * from users order by all; @@ -45,7 +45,7 @@ CREATE TABLE users ( statement ok INSERT INTO users SELECT * -FROM read_csv('data/csv/file_error.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/file_error.csv', ignore_errors=true, null_padding=true); query III select * from users; @@ -62,7 +62,7 @@ CREATE TABLE proj ( statement ok INSERT INTO proj SELECT id -FROM read_csv('data/csv/file_error.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/file_error.csv', ignore_errors=true, null_padding=true); query I select * from proj; @@ -73,7 +73,7 @@ select * from proj; statement error INSERT INTO proj SELECT id -FROM read_csv('data/csv/file_error.csv'); +FROM read_csv('{DATA_DIR}/csv/file_error.csv'); ---- Error when converting column "id". Could not convert string "3r" to 'INTEGER' @@ -89,7 +89,7 @@ CREATE TABLE proj ( statement ok INSERT INTO proj SELECT name, id -FROM read_csv('data/csv/file_error.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/file_error.csv', ignore_errors=true, null_padding=true); query II select * from proj; @@ -100,7 +100,7 @@ eve 2 statement error INSERT INTO proj SELECT name, id -FROM read_csv('data/csv/file_error.csv'); +FROM read_csv('{DATA_DIR}/csv/file_error.csv'); ---- Could not convert string "3r" to 'INTEGER' @@ -116,7 +116,7 @@ CREATE TABLE proj ( statement ok INSERT INTO proj SELECT email, id -FROM read_csv('data/csv/file_error.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/file_error.csv', ignore_errors=true, null_padding=true); query II select * from proj; @@ -127,7 +127,7 @@ NULL 2 statement error INSERT INTO proj SELECT name, id -FROM read_csv('data/csv/file_error.csv'); +FROM read_csv('{DATA_DIR}/csv/file_error.csv'); ---- Could not convert string "3r" to 'INTEGER' @@ -144,7 +144,7 @@ CREATE TABLE proj ( statement error INSERT INTO proj SELECT name, id::INTEGER -FROM read_csv('data/csv/file_error.csv'); +FROM read_csv('{DATA_DIR}/csv/file_error.csv'); ---- Could not convert string '3r' to INT32 @@ -161,7 +161,7 @@ CREATE TABLE proj ( statement error INSERT INTO proj SELECT 'Pedro', id -FROM read_csv('data/csv/file_error.csv'); +FROM read_csv('{DATA_DIR}/csv/file_error.csv'); ---- Could not convert string '3r' to INT32 @@ -177,14 +177,14 @@ insert into ppl values ('alice'), ('bob'), ('pedro') statement error INSERT INTO proj SELECT ppl.name,id -FROM read_csv('data/csv/file_error.csv') T inner join ppl on (ppl.name = T.name); +FROM read_csv('{DATA_DIR}/csv/file_error.csv') T inner join ppl on (ppl.name = T.name); ---- Could not convert string '3r' to INT32 statement error INSERT INTO proj SELECT T.name,id -FROM read_csv('data/csv/file_error.csv') T inner join ppl on (ppl.name = T.name); +FROM read_csv('{DATA_DIR}/csv/file_error.csv') T inner join ppl on (ppl.name = T.name); ---- Could not convert string '3r' to INT32 @@ -202,7 +202,7 @@ CREATE TABLE users ( statement ok INSERT INTO users SELECT * -FROM read_csv('data/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); query III select * from users order by all; @@ -228,7 +228,7 @@ CREATE TABLE proj ( statement ok INSERT INTO proj SELECT email, id -FROM read_csv('data/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); +FROM read_csv('{DATA_DIR}/csv/glob/f_*.csv', ignore_errors=true, null_padding=true); query II select * from proj order by all; @@ -252,7 +252,7 @@ CREATE TABLE users_age ( statement ok INSERT INTO users_age SELECT * -FROM read_csv('data/csv/union-by-name/type_mismatch/f_*.csv', ignore_errors=true, null_padding=true, union_by_name=true); +FROM read_csv('{DATA_DIR}/csv/union-by-name/type_mismatch/f_*.csv', ignore_errors=true, null_padding=true, union_by_name=true); query IIII select * from users_age; @@ -267,7 +267,7 @@ statement ok create table timestamps(ts timestamp, dt date); statement ok -insert into timestamps select ts,ts from read_csv('data/csv/timestamp.csv'); +insert into timestamps select ts,ts from read_csv('{DATA_DIR}/csv/timestamp.csv'); query II from timestamps; diff --git a/test/sql/copy/csv/test_issue3562_assertion.test b/test/sql/copy/csv/test_issue3562_assertion.test index d4adefedb80b..50b7458e7e2d 100644 --- a/test/sql/copy/csv/test_issue3562_assertion.test +++ b/test/sql/copy/csv/test_issue3562_assertion.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -create table test as SELECT * FROM read_csv('data/csv/test/issue3562_assertion.csv.gz', columns={'OBJECTID': 'DECIMAL(18,3)', 'URL': 'TEXT', 'NAME': 'TEXT', 'the_geom':'TEXT', 'LINE': 'TEXT'}); +create table test as SELECT * FROM read_csv('{DATA_DIR}/csv/test/issue3562_assertion.csv.gz', columns={'OBJECTID': 'DECIMAL(18,3)', 'URL': 'TEXT', 'NAME': 'TEXT', 'the_geom':'TEXT', 'LINE': 'TEXT'}); query II select objectid, name from test ORDER BY objectid limit 10 diff --git a/test/sql/copy/csv/test_issue5077.test b/test/sql/copy/csv/test_issue5077.test index f34bd9030242..c5cb636d0f2a 100644 --- a/test/sql/copy/csv/test_issue5077.test +++ b/test/sql/copy/csv/test_issue5077.test @@ -6,17 +6,17 @@ statement ok PRAGMA enable_verification query III -select * from 'data/csv/issue5077_aligned.csv' +select * from '{DATA_DIR}/csv/issue5077_aligned.csv' ---- d e false query III -select * from read_csv('data/csv/issue5077.csv', header=0) +select * from read_csv('{DATA_DIR}/csv/issue5077.csv', header=0) ---- c d e query IIIIIIIIIIIIIIII -select * from 'data/csv/lineitem-carriage.csv'; +select * from '{DATA_DIR}/csv/lineitem-carriage.csv'; ---- 1 1552 93 1 17 24710.35 0.04 0.02 N O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON TRUCK egular courts above the 1 674 75 2 36 56688.12 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL ly final dependencies: slyly bold diff --git a/test/sql/copy/csv/test_lineitem.test b/test/sql/copy/csv/test_lineitem.test index db6d0dca6e21..18708294e5c6 100644 --- a/test/sql/copy/csv/test_lineitem.test +++ b/test/sql/copy/csv/test_lineitem.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE lineitem(l_orderkey INT NOT NULL, l_partkey INT NOT NULL, l_suppkey INT NOT NULL, l_linenumber INT NOT NULL, l_quantity INTEGER NOT NULL, l_extendedprice DECIMAL(15,2) NOT NULL, l_discount DECIMAL(15,2) NOT NULL, l_tax DECIMAL(15,2) NOT NULL, l_returnflag VARCHAR(1) NOT NULL, l_linestatus VARCHAR(1) NOT NULL, l_shipdate DATE NOT NULL, l_commitdate DATE NOT NULL, l_receiptdate DATE NOT NULL, l_shipinstruct VARCHAR(25) NOT NULL, l_shipmode VARCHAR(10) NOT NULL, l_comment VARCHAR(44) NOT NULL); query I -COPY lineitem FROM 'data/csv/real/lineitem_sample.csv' DELIMITER '|' +COPY lineitem FROM '{DATA_DIR}/csv/real/lineitem_sample.csv' DELIMITER '|' ---- 10 @@ -25,7 +25,7 @@ SELECT l_partkey, l_comment FROM lineitem WHERE l_orderkey=1 ORDER BY l_linenumb # test COPY ... TO ... with HEADER query I -COPY lineitem TO '__TEST_DIR__/lineitem.csv' (DELIMITER ' ', HEADER); +COPY lineitem TO '{TEMP_DIR}/lineitem.csv' (DELIMITER ' ', HEADER); ---- 10 @@ -39,7 +39,7 @@ SELECT * FROM lineitem; # now copy back into the table query I -COPY lineitem FROM '__TEST_DIR__/lineitem.csv' DELIMITER ' ' HEADER; +COPY lineitem FROM '{TEMP_DIR}/lineitem.csv' DELIMITER ' ' HEADER; ---- 10 diff --git a/test/sql/copy/csv/test_linesize.test b/test/sql/copy/csv/test_linesize.test index 4f2484c63b0b..02f520a0829f 100644 --- a/test/sql/copy/csv/test_linesize.test +++ b/test/sql/copy/csv/test_linesize.test @@ -6,6 +6,6 @@ statement ok pragma enable_verification; statement error -FROM read_csv('data/csv/15473.csv', max_line_size = 10) +FROM read_csv('{DATA_DIR}/csv/15473.csv', max_line_size = 10) ---- Possible Solution: Change the maximum length size, e.g., max_line_size=19 diff --git a/test/sql/copy/csv/test_long_line.test b/test/sql/copy/csv/test_long_line.test index 85b702c6b0d3..607f1d51c457 100644 --- a/test/sql/copy/csv/test_long_line.test +++ b/test/sql/copy/csv/test_long_line.test @@ -11,7 +11,7 @@ statement ok CREATE TABLE test (a INTEGER, b VARCHAR, c INTEGER); query I -COPY test FROM 'data/csv/test/test_long_line.csv'; +COPY test FROM '{DATA_DIR}/csv/test/test_long_line.csv'; ---- 2 diff --git a/test/sql/copy/csv/test_many_columns.test b/test/sql/copy/csv/test_many_columns.test index 4647d34af223..57ac0ebd498a 100644 --- a/test/sql/copy/csv/test_many_columns.test +++ b/test/sql/copy/csv/test_many_columns.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE t AS SELECT * FROM read_csv_auto('data/csv/manycolumns.csv'); +CREATE TABLE t AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/manycolumns.csv'); statement ok SHOW t diff --git a/test/sql/copy/csv/test_mismatch_schemas.test b/test/sql/copy/csv/test_mismatch_schemas.test index 57b73edc6348..2a5eb80268ca 100644 --- a/test/sql/copy/csv/test_mismatch_schemas.test +++ b/test/sql/copy/csv/test_mismatch_schemas.test @@ -7,8 +7,8 @@ PRAGMA enable_verification # We will use the header from the first file (the one identified in the binder) as our schema. If other files have extra columns, these columns will be ignored query III -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_1.csv','data/csv/multiple_files/more_columns/file_2.csv', -'data/csv/multiple_files/more_columns/file_3.csv','data/csv/multiple_files/more_columns/file_4.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_2.csv', +'{DATA_DIR}/csv/multiple_files/more_columns/file_3.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_4.csv']) ORDER BY ALL; ---- 1 2 3 @@ -22,30 +22,30 @@ ORDER BY ALL; # If there is a mismatch, all files must have a header, or error. statement error -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_1.csv','data/csv/multiple_files/more_columns/file_2.csv', -'data/csv/multiple_files/more_columns/file_3.csv','data/csv/multiple_files/more_columns/file_no_header.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_2.csv', +'{DATA_DIR}/csv/multiple_files/more_columns/file_3.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_no_header.csv']) ORDER BY ALL; ---- -Current file: data/csv/multiple_files/more_columns/file_no_header.csv +Current file: {DATA_DIR}/csv/multiple_files/more_columns/file_no_header.csv # What if we add file 5, has 3 columns but with a name mismatch statement error -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_1.csv','data/csv/multiple_files/more_columns/file_2.csv', -'data/csv/multiple_files/more_columns/file_3.csv','data/csv/multiple_files/more_columns/file_5.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_2.csv', +'{DATA_DIR}/csv/multiple_files/more_columns/file_3.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_5.csv']) ORDER BY ALL; ---- Column with name: "c" is missing # file_1 missing column from file_2 that is not ok statement error -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_2.csv','data/csv/multiple_files/more_columns/file_1.csv', -'data/csv/multiple_files/more_columns/file_3.csv','data/csv/multiple_files/more_columns/file_4.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_2.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv', +'{DATA_DIR}/csv/multiple_files/more_columns/file_3.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_4.csv']) ---- Column with name: "d" is missing # Test files with different order query III -SELECT * FROM read_csv(['data/csv/multiple_files/different_order/file_1.csv','data/csv/multiple_files/different_order/file_2.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/different_order/file_1.csv','{DATA_DIR}/csv/multiple_files/different_order/file_2.csv']) ORDER BY ALL; ---- 1 2 3 @@ -55,7 +55,7 @@ ORDER BY ALL; # Test that minimal sniffer properly detects types of new columns query III -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_1.csv','data/csv/multiple_files/more_columns/file_6.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_6.csv']) ORDER BY ALL; ---- 1 2 3 @@ -68,13 +68,13 @@ ORDER BY ALL; 1 2 3 statement error -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_6.csv','data/csv/multiple_files/more_columns/file_1.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_6.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_1.csv']) ORDER BY ALL; ---- Column with name: "d" is missing query IIII -SELECT * FROM read_csv(['data/csv/multiple_files/more_columns/file_6.csv','data/csv/multiple_files/more_columns/file_6.csv']) +SELECT * FROM read_csv(['{DATA_DIR}/csv/multiple_files/more_columns/file_6.csv','{DATA_DIR}/csv/multiple_files/more_columns/file_6.csv']) ORDER BY ALL; ---- 1 2 3 4.0 @@ -88,4 +88,4 @@ ORDER BY ALL; 1 2 3 4.0 1 2 3 4.0 1 2 3 4.9 -1 2 3 4.9 \ No newline at end of file +1 2 3 4.9 diff --git a/test/sql/copy/csv/test_missing_row.test b/test/sql/copy/csv/test_missing_row.test index 32f8b8fcfbf5..e0108b2b61e3 100644 --- a/test/sql/copy/csv/test_missing_row.test +++ b/test/sql/copy/csv/test_missing_row.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IIIIIIII -FROM read_csv('data/csv/customer.4.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns = {'c_custkey': 'BIGINT', 'c_name': 'VARCHAR', 'c_address': 'VARCHAR', 'c_nationkey': 'INTEGER', 'c_phone': 'VARCHAR', 'c_acctbal': 'DECIMAL(15, 2)', 'c_mktsegment': 'VARCHAR', 'c_comment': 'VARCHAR'}, parallel=true, buffer_size = 300); +FROM read_csv('{DATA_DIR}/csv/customer.4.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns = {'c_custkey': 'BIGINT', 'c_name': 'VARCHAR', 'c_address': 'VARCHAR', 'c_nationkey': 'INTEGER', 'c_phone': 'VARCHAR', 'c_acctbal': 'DECIMAL(15, 2)', 'c_mktsegment': 'VARCHAR', 'c_comment': 'VARCHAR'}, parallel=true, buffer_size = 300); ---- 750001 Customer#000750001 ocIz 2S9MsEyfkL 6 16-182-876-9496 4522.76 FURNITURE dolites alongside of the furiously pending theodolites affix closely idly bold instruction 750002 Customer#000750002 Y9eOW Ena8pVx 15 25-241-686-3974 1969.87 BUILDING ide of the slyly express hockey players. slyly ironic dependencies boost furiou diff --git a/test/sql/copy/csv/test_mixed_line_endings.test b/test/sql/copy/csv/test_mixed_line_endings.test index 8c57a1918df9..9d4efe23e519 100644 --- a/test/sql/copy/csv/test_mixed_line_endings.test +++ b/test/sql/copy/csv/test_mixed_line_endings.test @@ -11,6 +11,6 @@ statement ok CREATE TABLE test (a INTEGER, b VARCHAR, c INTEGER); query I -insert into test select * from read_csv_auto('data/csv/test/mixed_line_endings.csv', strict_mode=false); +insert into test select * from read_csv_auto('{DATA_DIR}/csv/test/mixed_line_endings.csv', strict_mode=false); ---- 3 diff --git a/test/sql/copy/csv/test_mixed_new_line.test b/test/sql/copy/csv/test_mixed_new_line.test index 5175ce3a58bc..95b6be0f21e1 100644 --- a/test/sql/copy/csv/test_mixed_new_line.test +++ b/test/sql/copy/csv/test_mixed_new_line.test @@ -8,7 +8,7 @@ statement ok pragma enable_verification; query III -from read_csv('data/csv/mixed_new_line.csv', strict_mode=false) +from read_csv('{DATA_DIR}/csv/mixed_new_line.csv', strict_mode=false) ---- 1 2 3 4 5 6 @@ -17,7 +17,7 @@ from read_csv('data/csv/mixed_new_line.csv', strict_mode=false) foreach newline '\r' '\n' '\r\n' query III -from read_csv('data/csv/mixed_new_line.csv', new_line = ${newline}, strict_mode = false) +from read_csv('{DATA_DIR}/csv/mixed_new_line.csv', new_line = ${newline}, strict_mode = false) ---- 1 2 3 4 5 6 @@ -27,66 +27,66 @@ from read_csv('data/csv/mixed_new_line.csv', new_line = ${newline}, strict_mode endloop statement error -from read_csv('data/csv/mixed_new_line.csv', columns = {'a':'integer', 'b':'integer', 'c':'integer'}, new_line = '\r\n', header = false, strict_mode=true, delim = ',') +from read_csv('{DATA_DIR}/csv/mixed_new_line.csv', columns = {'a':'integer', 'b':'integer', 'c':'integer'}, new_line = '\r\n', header = false, strict_mode=true, delim = ',') ---- -Error when sniffing file "data/csv/mixed_new_line.csv". +Error when sniffing file "{DATA_DIR}/csv/mixed_new_line.csv". statement error -from read_csv('data/csv/mixed_new_line.csv', columns = {'a':'integer', 'b':'integer', 'c':'integer'}, new_line = '\r\n', header = false, strict_mode=true, delim = ',', auto_detect= false) +from read_csv('{DATA_DIR}/csv/mixed_new_line.csv', columns = {'a':'integer', 'b':'integer', 'c':'integer'}, new_line = '\r\n', header = false, strict_mode=true, delim = ',', auto_detect= false) ---- The CSV Parser state machine reached an invalid state. query I -select count(*) from read_csv('data/csv/mixed_new_line_2.csv', new_line ='\r\n', strict_mode = False, columns = {'a':'varchar', 'b':'varchar'}, delim = ',', ignore_errors = true, header = false) +select count(*) from read_csv('{DATA_DIR}/csv/mixed_new_line_2.csv', new_line ='\r\n', strict_mode = False, columns = {'a':'varchar', 'b':'varchar'}, delim = ',', ignore_errors = true, header = false) ---- 3 statement error -select count(*) from read_csv('data/csv/mixed_new_line_2.csv', new_line ='\r\n', strict_mode = False, columns = {'a':'varchar', 'b':'varchar'}, auto_detect = false, delim = ',') +select count(*) from read_csv('{DATA_DIR}/csv/mixed_new_line_2.csv', new_line ='\r\n', strict_mode = False, columns = {'a':'varchar', 'b':'varchar'}, auto_detect = false, delim = ',') ---- Expected Number of Columns: 2 Found: 1 statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_r_two.csv',new_line ='\r\n', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_r_two.csv',new_line ='\r\n', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_r_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_r_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. query I -SELECT column0 FROM read_csv('data/csv/one_r_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 FROM read_csv('{DATA_DIR}/csv/one_r_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) ---- one two statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_n_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_n_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_n_two.csv',new_line ='\r\n', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_n_two.csv',new_line ='\r\n', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. query I -SELECT column0 FROM read_csv('data/csv/one_n_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 FROM read_csv('{DATA_DIR}/csv/one_n_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) ---- one two statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_r_n_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_r_n_two.csv',new_line ='\r', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. statement error -SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('data/csv/one_r_n_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) +SELECT column0 like '%one%' and column0 like '%two%' as success FROM read_csv('{DATA_DIR}/csv/one_r_n_two.csv',new_line ='\n', strict_mode = TRUE, HEADER = FALSE) ---- Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. diff --git a/test/sql/copy/csv/test_ncvoter.test b/test/sql/copy/csv/test_ncvoter.test index d85c051ecc2f..6cdc87bb8832 100644 --- a/test/sql/copy/csv/test_ncvoter.test +++ b/test/sql/copy/csv/test_ncvoter.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE IF NOT EXISTS ncvoters(county_id INTEGER, county_desc STRING, voter_reg_num STRING,status_cd STRING, voter_status_desc STRING, reason_cd STRING, voter_status_reason_desc STRING, absent_ind STRING, name_prefx_cd STRING,last_name STRING, first_name STRING, midl_name STRING, name_sufx_cd STRING, full_name_rep STRING,full_name_mail STRING, house_num STRING, half_code STRING, street_dir STRING, street_name STRING, street_type_cd STRING, street_sufx_cd STRING, unit_designator STRING, unit_num STRING, res_city_desc STRING,state_cd STRING, zip_code STRING, res_street_address STRING, res_city_state_zip STRING, mail_addr1 STRING, mail_addr2 STRING, mail_addr3 STRING, mail_addr4 STRING, mail_city STRING, mail_state STRING, mail_zipcode STRING, mail_city_state_zip STRING, area_cd STRING, phone_num STRING, full_phone_number STRING, drivers_lic STRING, race_code STRING, race_desc STRING, ethnic_code STRING, ethnic_desc STRING, party_cd STRING, party_desc STRING, sex_code STRING, sex STRING, birth_age STRING, birth_place STRING, registr_dt STRING, precinct_abbrv STRING, precinct_desc STRING,municipality_abbrv STRING, municipality_desc STRING, ward_abbrv STRING, ward_desc STRING, cong_dist_abbrv STRING, cong_dist_desc STRING, super_court_abbrv STRING, super_court_desc STRING, judic_dist_abbrv STRING, judic_dist_desc STRING, nc_senate_abbrv STRING, nc_senate_desc STRING, nc_house_abbrv STRING, nc_house_desc STRING,county_commiss_abbrv STRING, county_commiss_desc STRING, township_abbrv STRING, township_desc STRING,school_dist_abbrv STRING, school_dist_desc STRING, fire_dist_abbrv STRING, fire_dist_desc STRING, water_dist_abbrv STRING, water_dist_desc STRING, sewer_dist_abbrv STRING, sewer_dist_desc STRING, sanit_dist_abbrv STRING, sanit_dist_desc STRING, rescue_dist_abbrv STRING, rescue_dist_desc STRING, munic_dist_abbrv STRING, munic_dist_desc STRING, dist_1_abbrv STRING, dist_1_desc STRING, dist_2_abbrv STRING, dist_2_desc STRING, confidential_ind STRING, age STRING, ncid STRING, vtd_abbrv STRING, vtd_desc STRING); query I -COPY ncvoters FROM 'data/csv/real/ncvoter.csv' DELIMITER ' '; +COPY ncvoters FROM '{DATA_DIR}/csv/real/ncvoter.csv' DELIMITER ' '; ---- 10 @@ -28,7 +28,7 @@ SELECT county_id, county_desc, vtd_desc, name_prefx_cd FROM ncvoters; 1 ALAMANCE 064 NULL query I -COPY ncvoters TO '__TEST_DIR__/ncvoter.csv' DELIMITER 'A' HEADER; +COPY ncvoters TO '{TEMP_DIR}/ncvoter.csv' DELIMITER 'A' HEADER; ---- 10 @@ -42,7 +42,7 @@ SELECT * FROM ncvoters; # now copy back into the table query I -COPY ncvoters FROM '__TEST_DIR__/ncvoter.csv' DELIMITER 'A' HEADER; +COPY ncvoters FROM '{TEMP_DIR}/ncvoter.csv' DELIMITER 'A' HEADER; ---- 10 diff --git a/test/sql/copy/csv/test_nfc.test b/test/sql/copy/csv/test_nfc.test index 11a56c0f6760..ecade4e6b4a6 100644 --- a/test/sql/copy/csv/test_nfc.test +++ b/test/sql/copy/csv/test_nfc.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE nfcstrings (s STRING); query I -COPY nfcstrings FROM 'data/csv/test/nfc.csv' (HEADER 0); +COPY nfcstrings FROM '{DATA_DIR}/csv/test/nfc.csv' (HEADER 0); ---- 2 diff --git a/test/sql/copy/csv/test_nfc_suite.test b/test/sql/copy/csv/test_nfc_suite.test index 48d4ac3fdb08..72b00e8400a5 100644 --- a/test/sql/copy/csv/test_nfc_suite.test +++ b/test/sql/copy/csv/test_nfc_suite.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE nfcstrings (source STRING, nfc STRING, nfd STRING); query I -COPY nfcstrings FROM 'data/csv/real/nfc_normalization.csv' (DELIMITER '|', HEADER 0); +COPY nfcstrings FROM '{DATA_DIR}/csv/real/nfc_normalization.csv' (DELIMITER '|', HEADER 0); ---- 18819 @@ -36,7 +36,7 @@ statement ok CREATE TABLE nfcstrings (source STRING, nfc STRING, nfd STRING); query I -COPY nfcstrings FROM 'data/csv/real/nfc_normalization_rn.csv' (DELIMITER '|', HEADER 0); +COPY nfcstrings FROM '{DATA_DIR}/csv/real/nfc_normalization_rn.csv' (DELIMITER '|', HEADER 0); ---- 18819 diff --git a/test/sql/copy/csv/test_non_unicode_header.test b/test/sql/copy/csv/test_non_unicode_header.test index 924ba0fe82f0..d7b76906ecc4 100644 --- a/test/sql/copy/csv/test_non_unicode_header.test +++ b/test/sql/copy/csv/test_non_unicode_header.test @@ -11,7 +11,7 @@ statement ok drop table if exists reject_errors; query IIIIIII -FROM read_csv('data/csv/error/banklist.csv', store_rejects=true) order by all limit 5 +FROM read_csv('{DATA_DIR}/csv/error/banklist.csv', store_rejects=true) order by all limit 5 ---- 1st American State Bank of Minnesota Hancock MN 15448 Community Development Bank, FSB 5-Feb-10 10183 1st Centennial Bank Redlands CA 33025 First California Bank 23-Jan-09 10030 diff --git a/test/sql/copy/csv/test_null_padding_projection.test b/test/sql/copy/csv/test_null_padding_projection.test index 7d91778dfba7..32b534a17809 100644 --- a/test/sql/copy/csv/test_null_padding_projection.test +++ b/test/sql/copy/csv/test_null_padding_projection.test @@ -7,14 +7,14 @@ PRAGMA enable_verification # Test simple null_padding, this will fail because we have a row with more columns that defined statement error -from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect = false, strict_mode=True) ---- Expected Number of Columns: 4 Found: 5 # Create a view statement ok -CREATE VIEW np AS from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +CREATE VIEW np AS from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect = false, ignore_errors = true, strict_mode=True); # With ignore errors this should work, with last row being ignored @@ -45,34 +45,34 @@ NULL NULL # Now let's try with options that give a const value query IIIII -from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect => false, ignore_errors => true, filename => true, strict_mode=True); ---- -10 100 1000 NULL data/csv/nullpadding.csv -10 100 1000 10000 data/csv/nullpadding.csv -10 NULL NULL NULL data/csv/nullpadding.csv -10 100 NULL NULL data/csv/nullpadding.csv +10 100 1000 NULL {DATA_DIR}/csv/nullpadding.csv +10 100 1000 10000 {DATA_DIR}/csv/nullpadding.csv +10 NULL NULL NULL {DATA_DIR}/csv/nullpadding.csv +10 100 NULL NULL {DATA_DIR}/csv/nullpadding.csv query II -select a, filename from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select a, filename from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect => false, ignore_errors => true, filename => true, strict_mode=True); ---- -10 data/csv/nullpadding.csv -10 data/csv/nullpadding.csv -10 data/csv/nullpadding.csv -10 data/csv/nullpadding.csv +10 {DATA_DIR}/csv/nullpadding.csv +10 {DATA_DIR}/csv/nullpadding.csv +10 {DATA_DIR}/csv/nullpadding.csv +10 {DATA_DIR}/csv/nullpadding.csv query I -select filename from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select filename from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect => false, ignore_errors => true, filename => true, strict_mode=True); ---- -data/csv/nullpadding.csv -data/csv/nullpadding.csv -data/csv/nullpadding.csv -data/csv/nullpadding.csv +{DATA_DIR}/csv/nullpadding.csv +{DATA_DIR}/csv/nullpadding.csv +{DATA_DIR}/csv/nullpadding.csv +{DATA_DIR}/csv/nullpadding.csv query I -select a from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select a from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect = false, ignore_errors => true, filename => true, strict_mode=True); ---- 10 @@ -83,7 +83,7 @@ select a from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ # Let's try some filters query IIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect := false, ignore_errors := true, strict_mode=True) where b = 100; ---- @@ -93,23 +93,23 @@ where b = 100; query IIIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER'}, auto_detect = false, ignore_errors = true, filename = true, strict_mode=True) where a = 10 and d = 10000; ---- -10 100 1000 10000 data/csv/nullpadding.csv +10 100 1000 10000 {DATA_DIR}/csv/nullpadding.csv # Try All Columns query IIIIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER', 'e': 'INTEGER'}, auto_detect = false, filename = true) where a = 10 and d = 10000; ---- -10 100 1000 10000 NULL data/csv/nullpadding.csv -10 100 1000 10000 100000 data/csv/nullpadding.csv +10 100 1000 10000 NULL {DATA_DIR}/csv/nullpadding.csv +10 100 1000 10000 100000 {DATA_DIR}/csv/nullpadding.csv query IIIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER', 'e': 'INTEGER'}, auto_detect := false) ---- 10 100 1000 NULL NULL @@ -120,7 +120,7 @@ select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ # Try more columns query IIIIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER', 'e': 'INTEGER', 'f': 'INTEGER'}, auto_detect = false) ---- 10 100 1000 NULL NULL NULL @@ -131,7 +131,7 @@ select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ # Moreee query IIIIIII -select * from read_csv('data/csv/nullpadding.csv',null_padding=true, columns={ +select * from read_csv('{DATA_DIR}/csv/nullpadding.csv',null_padding=true, columns={ 'a': 'INTEGER','b': 'INTEGER','c': 'INTEGER','d': 'INTEGER', 'e': 'INTEGER', 'f': 'INTEGER', 'g': 'INTEGER'}, auto_detect = false) ---- 10 100 1000 NULL NULL NULL NULL @@ -148,7 +148,7 @@ set threads =1 statement ok create view T as SELECT SETTLEMENTDATE, DUID, I, filename, UNIT, CAST(LOWER6SEC AS DOUBLE) AS LOWER6SEC, CAST(LOWER6SECFLAGS AS DOUBLE) AS LOWER6SECFLAGS, CAST(LOWER60SECFLAGS AS DOUBLE) AS LOWER60SECFLAGS, CAST(RAISE5MINACTUALAVAILABILITY AS DOUBLE) AS RAISE5MINACTUALAVAILABILITY, CAST(INTERVENTION AS DOUBLE) AS INTERVENTION, CAST(LOWER5MINFLAGS AS DOUBLE) AS LOWER5MINFLAGS, CAST(RAISEREGAVAILABILITY AS DOUBLE) AS RAISEREGAVAILABILITY, CAST(LOWERREGACTUALAVAILABILITY AS DOUBLE) AS LOWERREGACTUALAVAILABILITY, CAST(VIOLATION60SECDEGREE AS DOUBLE) AS VIOLATION60SECDEGREE, CAST(LOWER60SEC AS DOUBLE) AS LOWER60SEC, CAST(MARGINAL5MINVALUE AS DOUBLE) AS MARGINAL5MINVALUE, CAST(RAISE60SEC AS DOUBLE) AS RAISE60SEC, CAST(RAMPUPRATE AS DOUBLE) AS RAMPUPRATE, CAST(TOTALCLEARED AS DOUBLE) AS TOTALCLEARED, CAST(VIOLATION5MINDEGREE AS DOUBLE) AS VIOLATION5MINDEGREE, CAST(LOWER6SECACTUALAVAILABILITY AS DOUBLE) AS LOWER6SECACTUALAVAILABILITY, CAST(AGCSTATUS AS DOUBLE) AS AGCSTATUS, CAST(RAISE60SECFLAGS AS DOUBLE) AS RAISE60SECFLAGS, CAST("VERSION" AS DOUBLE) AS "VERSION", CAST(RAISE5MINFLAGS AS DOUBLE) AS RAISE5MINFLAGS, CAST(LOWER60SECACTUALAVAILABILITY AS DOUBLE) AS LOWER60SECACTUALAVAILABILITY, CAST(RAMPDOWNRATE AS DOUBLE) AS RAMPDOWNRATE, CAST(RAISE6SECFLAGS AS DOUBLE) AS RAISE6SECFLAGS, CAST(RAISE60SECACTUALAVAILABILITY AS DOUBLE) AS RAISE60SECACTUALAVAILABILITY, CAST(VIOLATIONDEGREE AS DOUBLE) AS VIOLATIONDEGREE, CAST(RAISE5MIN AS DOUBLE) AS RAISE5MIN, CAST(MARGINALVALUE AS DOUBLE) AS MARGINALVALUE, CAST(LOWERREGFLAGS AS DOUBLE) AS LOWERREGFLAGS, CAST(RAISEREG AS DOUBLE) AS RAISEREG, CAST(LOWERREGENABLEMENTMIN AS DOUBLE) AS LOWERREGENABLEMENTMIN, CAST(LOWERREGENABLEMENTMAX AS DOUBLE) AS LOWERREGENABLEMENTMAX, CAST(DISPATCHMODE AS DOUBLE) AS DISPATCHMODE, CAST(VIOLATION6SECDEGREE AS DOUBLE) AS VIOLATION6SECDEGREE, CAST(LOWERREG AS DOUBLE) AS LOWERREG, CAST(LOWERREGAVAILABILITY AS DOUBLE) AS LOWERREGAVAILABILITY, CAST(RAISEREGACTUALAVAILABILITY AS DOUBLE) AS RAISEREGACTUALAVAILABILITY, CAST(RAISEREGFLAGS AS DOUBLE) AS RAISEREGFLAGS, CAST(MARGINAL60SECVALUE AS DOUBLE) AS MARGINAL60SECVALUE, CAST(LOWER5MINACTUALAVAILABILITY AS DOUBLE) AS LOWER5MINACTUALAVAILABILITY, CAST(RAISEREGENABLEMENTMAX AS DOUBLE) AS RAISEREGENABLEMENTMAX, CAST(INITIALMW AS DOUBLE) AS INITIALMW, CAST(AVAILABILITY AS DOUBLE) AS AVAILABILITY, CAST(RUNNO AS DOUBLE) AS RUNNO, CAST(RAISE6SECACTUALAVAILABILITY AS DOUBLE) AS RAISE6SECACTUALAVAILABILITY, CAST(MARGINAL6SECVALUE AS DOUBLE) AS MARGINAL6SECVALUE, CAST(RAISE6SEC AS DOUBLE) AS RAISE6SEC, CAST(XX AS DOUBLE) AS XX, CAST(RAISEREGENABLEMENTMIN AS DOUBLE) AS RAISEREGENABLEMENTMIN, CAST(LOWER5MIN AS DOUBLE) AS LOWER5MIN FROM (SELECT * -FROM read_csv(main.list_value('data/csv/public_daily_sample.csv','data/csv/public_daily_sample.csv','data/csv/public_daily_sample.csv') +FROM read_csv(main.list_value('{DATA_DIR}/csv/public_daily_sample.csv','{DATA_DIR}/csv/public_daily_sample.csv','{DATA_DIR}/csv/public_daily_sample.csv') , ("Skip" = 1), ("header" = 0), (all_varchar = 1), ("columns" = main.struct_pack(I := 'VARCHAR', UNIT := 'VARCHAR', XX := 'VARCHAR', "VERSION" := 'VARCHAR', SETTLEMENTDATE := 'VARCHAR', RUNNO := 'VARCHAR', DUID := 'VARCHAR', INTERVENTION := 'VARCHAR', DISPATCHMODE := 'VARCHAR', AGCSTATUS := 'VARCHAR', INITIALMW := 'VARCHAR', TOTALCLEARED := 'VARCHAR', RAMPDOWNRATE := 'VARCHAR', RAMPUPRATE := 'VARCHAR', LOWER5MIN := 'VARCHAR', LOWER60SEC := 'VARCHAR', LOWER6SEC := 'VARCHAR', RAISE5MIN := 'VARCHAR', RAISE60SEC := 'VARCHAR', RAISE6SEC := 'VARCHAR', MARGINAL5MINVALUE := 'VARCHAR', MARGINAL60SECVALUE := 'VARCHAR', MARGINAL6SECVALUE := 'VARCHAR', MARGINALVALUE := 'VARCHAR', VIOLATION5MINDEGREE := 'VARCHAR', VIOLATION60SECDEGREE := 'VARCHAR', VIOLATION6SECDEGREE := 'VARCHAR', VIOLATIONDEGREE := 'VARCHAR', LOWERREG := 'VARCHAR', RAISEREG := 'VARCHAR', AVAILABILITY := 'VARCHAR', RAISE6SECFLAGS := 'VARCHAR', RAISE60SECFLAGS := 'VARCHAR', RAISE5MINFLAGS := 'VARCHAR', RAISEREGFLAGS := 'VARCHAR', LOWER6SECFLAGS := 'VARCHAR', LOWER60SECFLAGS := 'VARCHAR', LOWER5MINFLAGS := 'VARCHAR', LOWERREGFLAGS := 'VARCHAR', RAISEREGAVAILABILITY := 'VARCHAR', RAISEREGENABLEMENTMAX := 'VARCHAR', RAISEREGENABLEMENTMIN := 'VARCHAR', LOWERREGAVAILABILITY := 'VARCHAR', LOWERREGENABLEMENTMAX := 'VARCHAR', LOWERREGENABLEMENTMIN := 'VARCHAR', RAISE6SECACTUALAVAILABILITY := 'VARCHAR', RAISE60SECACTUALAVAILABILITY := 'VARCHAR', RAISE5MINACTUALAVAILABILITY := 'VARCHAR', RAISEREGACTUALAVAILABILITY := 'VARCHAR', LOWER6SECACTUALAVAILABILITY := 'VARCHAR', LOWER60SECACTUALAVAILABILITY := 'VARCHAR', LOWER5MINACTUALAVAILABILITY := 'VARCHAR', LOWERREGACTUALAVAILABILITY := 'VARCHAR')), (filename = 1), (null_padding = CAST('t' AS BOOLEAN)), (ignore_errors = 1), (auto_detect = CAST('f' AS BOOLEAN))) WHERE ((I = 'D') AND (UNIT = 'DUNIT'))); statement ok @@ -159,4 +159,4 @@ SELECT * EXCLUDE (SETTLEMENTDATE, XX, filename, I), CAST(SETTLEMENTDATE AS TIMES query I select count(*) from T_2 ---- -8349 \ No newline at end of file +8349 diff --git a/test/sql/copy/csv/test_null_padding_union.test b/test/sql/copy/csv/test_null_padding_union.test index 916a88ace757..1b3886bc1c3d 100644 --- a/test/sql/copy/csv/test_null_padding_union.test +++ b/test/sql/copy/csv/test_null_padding_union.test @@ -11,7 +11,7 @@ SET default_null_order='nulls_first'; statement ok create view v as SELECT a, b, c, d -FROM read_csv('data/csv/union-by-name/null_padding/*.csv', UNION_BY_NAME=TRUE, sample_size = 1, null_padding = 1) +FROM read_csv('{DATA_DIR}/csv/union-by-name/null_padding/*.csv', UNION_BY_NAME=TRUE, sample_size = 1, null_padding = 1) ORDER BY a,b,c,d query IIII diff --git a/test/sql/copy/csv/test_ontime.test b/test/sql/copy/csv/test_ontime.test index 5ec698db64bb..d62d590fad4f 100644 --- a/test/sql/copy/csv/test_ontime.test +++ b/test/sql/copy/csv/test_ontime.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE ontime(year SMALLINT, quarter SMALLINT, month SMALLINT, dayofmonth SMALLINT, dayofweek SMALLINT, flightdate DATE, uniquecarrier CHAR(7), airlineid DECIMAL(8,2), carrier CHAR(2), tailnum VARCHAR(50), flightnum VARCHAR(10), originairportid INTEGER, originairportseqid INTEGER, origincitymarketid INTEGER, origin CHAR(5), origincityname VARCHAR(100), originstate CHAR(2), originstatefips VARCHAR(10), originstatename VARCHAR(100), originwac DECIMAL(8,2), destairportid INTEGER, destairportseqid INTEGER, destcitymarketid INTEGER, dest CHAR(5), destcityname VARCHAR(100), deststate CHAR(2), deststatefips VARCHAR(10), deststatename VARCHAR(100), destwac DECIMAL(8,2), crsdeptime DECIMAL(8,2), deptime DECIMAL(8,2), depdelay DECIMAL(8,2), depdelayminutes DECIMAL(8,2), depdel15 DECIMAL(8,2), departuredelaygroups DECIMAL(8,2), deptimeblk VARCHAR(20), taxiout DECIMAL(8,2), wheelsoff DECIMAL(8,2), wheelson DECIMAL(8,2), taxiin DECIMAL(8,2), crsarrtime DECIMAL(8,2), arrtime DECIMAL(8,2), arrdelay DECIMAL(8,2), arrdelayminutes DECIMAL(8,2), arrdel15 DECIMAL(8,2), arrivaldelaygroups DECIMAL(8,2), arrtimeblk VARCHAR(20), cancelled SMALLINT, cancellationcode CHAR(1), diverted SMALLINT, crselapsedtime DECIMAL(8,2), actualelapsedtime DECIMAL(8,2), airtime DECIMAL(8,2), flights DECIMAL(8,2), distance DECIMAL(8,2), distancegroup SMALLINT, carrierdelay DECIMAL(8,2), weatherdelay DECIMAL(8,2), nasdelay DECIMAL(8,2), securitydelay DECIMAL(8,2), lateaircraftdelay DECIMAL(8,2), firstdeptime VARCHAR(10), totaladdgtime VARCHAR(10), longestaddgtime VARCHAR(10), divairportlandings VARCHAR(10), divreacheddest VARCHAR(10), divactualelapsedtime VARCHAR(10), divarrdelay VARCHAR(10), divdistance VARCHAR(10), div1airport VARCHAR(10), div1aiportid INTEGER, div1airportseqid INTEGER, div1wheelson VARCHAR(10), div1totalgtime VARCHAR(10), div1longestgtime VARCHAR(10), div1wheelsoff VARCHAR(10), div1tailnum VARCHAR(10), div2airport VARCHAR(10), div2airportid INTEGER, div2airportseqid INTEGER, div2wheelson VARCHAR(10), div2totalgtime VARCHAR(10), div2longestgtime VARCHAR(10), div2wheelsoff VARCHAR(10), div2tailnum VARCHAR(10), div3airport VARCHAR(10), div3airportid INTEGER, div3airportseqid INTEGER, div3wheelson VARCHAR(10), div3totalgtime VARCHAR(10), div3longestgtime VARCHAR(10), div3wheelsoff VARCHAR(10), div3tailnum VARCHAR(10), div4airport VARCHAR(10), div4airportid INTEGER, div4airportseqid INTEGER, div4wheelson VARCHAR(10), div4totalgtime VARCHAR(10), div4longestgtime VARCHAR(10), div4wheelsoff VARCHAR(10), div4tailnum VARCHAR(10), div5airport VARCHAR(10), div5airportid INTEGER, div5airportseqid INTEGER, div5wheelson VARCHAR(10), div5totalgtime VARCHAR(10), div5longestgtime VARCHAR(10), div5wheelsoff VARCHAR(10), div5tailnum VARCHAR(10)); query I -COPY ontime FROM 'data/csv/real/ontime_sample.csv' DELIMITER ',' HEADER; +COPY ontime FROM '{DATA_DIR}/csv/real/ontime_sample.csv' DELIMITER ',' HEADER; ---- 9 @@ -27,7 +27,7 @@ SELECT year, uniquecarrier, origin, origincityname, div5longestgtime FROM ontime 1988 AA JFK New York, NY NULL query I -COPY ontime TO '__TEST_DIR__/ontime.csv' DELIMITER ',' HEADER; +COPY ontime TO '{TEMP_DIR}/ontime.csv' DELIMITER ',' HEADER; ---- 9 @@ -35,7 +35,7 @@ statement ok DELETE FROM ontime; query I -COPY ontime FROM '__TEST_DIR__/ontime.csv' DELIMITER ',' HEADER; +COPY ontime FROM '{TEMP_DIR}/ontime.csv' DELIMITER ',' HEADER; ---- 9 diff --git a/test/sql/copy/csv/test_parallel_nullpadding.test b/test/sql/copy/csv/test_parallel_nullpadding.test index 0eb400a31404..0de4bb2aa0d7 100644 --- a/test/sql/copy/csv/test_parallel_nullpadding.test +++ b/test/sql/copy/csv/test_parallel_nullpadding.test @@ -7,32 +7,32 @@ PRAGMA enable_verification # These will work, because we will only fail if the quoted new value happens between buffers or thread boundaries query I -select count(*) from read_csv('data/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0) +select count(*) from read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0) ---- 3 query I -select count(*) from read_csv('data/csv/evil_nullpadding_2.csv', delim = ';', quote = '"', null_padding = True, header = 0) +select count(*) from read_csv('{DATA_DIR}/csv/evil_nullpadding_2.csv', delim = ';', quote = '"', null_padding = True, header = 0) ---- 3 # If we force the buffer to break in the middle of the quotes, we can't read this. statement error -select * from read_csv('data/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 30) +select * from read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 30) ---- The parallel scanner does not support null_padding in conjunction with quoted new lines. Please disable the parallel csv reader with parallel=false statement error -select count(*) from read_csv('data/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 27) +select count(*) from read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 27) ---- The parallel scanner does not support null_padding in conjunction with quoted new lines. Please disable the parallel csv reader with parallel=false statement error -select count(*) from read_csv('data/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 30) +select count(*) from read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 30) ---- The parallel scanner does not support null_padding in conjunction with quoted new lines. Please disable the parallel csv reader with parallel=false statement error -select * from read_csv('data/csv/evil_nullpadding_2.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 26) +select * from read_csv('{DATA_DIR}/csv/evil_nullpadding_2.csv', delim = ';', quote = '"', null_padding = True, header = 0, buffer_size = 26) ---- The parallel scanner does not support null_padding in conjunction with quoted new lines. Please disable the parallel csv reader with parallel=false diff --git a/test/sql/copy/csv/test_quote_default.test b/test/sql/copy/csv/test_quote_default.test index e67a614cb14c..28d5eb78de57 100644 --- a/test/sql/copy/csv/test_quote_default.test +++ b/test/sql/copy/csv/test_quote_default.test @@ -6,11 +6,11 @@ statement ok PRAGMA enable_verification query II -from read_csv('data/csv/test_default_option.csv', columns = {'a':'varchar', 'b':'integer'}, auto_detect = false, header = true) where b = 1 +from read_csv('{DATA_DIR}/csv/test_default_option.csv', columns = {'a':'varchar', 'b':'integer'}, auto_detect = false, header = true) where b = 1 ---- x,y 1 query II -from read_csv('data/csv/test_default_option_2.csv', columns = {'a':'varchar', 'b':'integer'}, auto_detect = false, header = true, delim = '|') where b = 1 +from read_csv('{DATA_DIR}/csv/test_default_option_2.csv', columns = {'a':'varchar', 'b':'integer'}, auto_detect = false, header = true, delim = '|') where b = 1 ---- x|y 1 \ No newline at end of file diff --git a/test/sql/copy/csv/test_quoted_newline.test b/test/sql/copy/csv/test_quoted_newline.test index e054922fa949..ab4c2059fc63 100644 --- a/test/sql/copy/csv/test_quoted_newline.test +++ b/test/sql/copy/csv/test_quoted_newline.test @@ -10,7 +10,7 @@ statement ok CREATE TABLE test (a VARCHAR, b INTEGER); query I -COPY test FROM 'data/csv/test/quoted_newline.csv' (DELIMITER ',', AUTO_DETECT 0); +COPY test FROM '{DATA_DIR}/csv/test/quoted_newline.csv' (DELIMITER ',', AUTO_DETECT 0); ---- 2 diff --git a/test/sql/copy/csv/test_read_csv.test b/test/sql/copy/csv/test_read_csv.test index ade7fe0429fc..bbb25e9cbea3 100644 --- a/test/sql/copy/csv/test_read_csv.test +++ b/test/sql/copy/csv/test_read_csv.test @@ -6,18 +6,18 @@ statement ok PRAGMA enable_verification query II -select delimiter, quote FROM sniff_csv('data/csv/stats_3_muta_10_21.csv') +select delimiter, quote FROM sniff_csv('{DATA_DIR}/csv/stats_3_muta_10_21.csv') ---- , " query III -FROM read_csv('data/csv/comments/comment_skip.csv', comment = '#', delim = ',', skip=1, auto_detect = false, header = 1, columns = {'x':'varchar','y':'varchar','z':'varchar'}) +FROM read_csv('{DATA_DIR}/csv/comments/comment_skip.csv', comment = '#', delim = ',', skip=1, auto_detect = false, header = 1, columns = {'x':'varchar','y':'varchar','z':'varchar'}) ---- 1 2 3 4 5 6 query II -FROM read_csv('data/csv/multi_quote.csv', null_padding = true) +FROM read_csv('{DATA_DIR}/csv/multi_quote.csv', null_padding = true) ---- 2019-01-01 text 2019-02-01 text @@ -28,34 +28,34 @@ FROM read_csv('data/csv/multi_quote.csv', null_padding = true) 2019-09-01 'text' query II -FROM read_csv('data/csv/bad_escape.csv') +FROM read_csv('{DATA_DIR}/csv/bad_escape.csv') ---- 332 Surname, Firstname 123 foo ("foo") query I -FROM read_csv('data/csv/quoted_values_delimited.csv') +FROM read_csv('{DATA_DIR}/csv/quoted_values_delimited.csv') ---- value1;value2;value3 value1;value2;value3 value1;value2;value3 query I -FROM read_csv('data/csv/quoted_values_delimited.csv', ignore_errors = true) +FROM read_csv('{DATA_DIR}/csv/quoted_values_delimited.csv', ignore_errors = true) ---- value1;value2;value3 value1;value2;value3 value1;value2;value3 query III -FROM read_csv('data/csv/quoted_values_delimited.csv', quote = '') +FROM read_csv('{DATA_DIR}/csv/quoted_values_delimited.csv', quote = '') ---- "value1 value2 value3" "value1 value2 value3" "value1 value2 value3" query I -FROM read_csv_auto('data/csv/test/dateformat.csv') +FROM read_csv_auto('{DATA_DIR}/csv/test/dateformat.csv') ---- 2019-06-05 @@ -65,7 +65,7 @@ CREATE TABLE dates (d DATE); # base date format does not work here statement ok -INSERT INTO dates SELECT * FROM read_csv('data/csv/test/dateformat.csv', columns=STRUCT_PACK(d := 'DATE'), header=0) +INSERT INTO dates SELECT * FROM read_csv('{DATA_DIR}/csv/test/dateformat.csv', columns=STRUCT_PACK(d := 'DATE'), header=0) query I SELECT * FROM dates @@ -74,7 +74,7 @@ SELECT * FROM dates # dateformat should also work with auto format statement ok -INSERT INTO dates SELECT * FROM read_csv_auto('data/csv/test/dateformat.csv', dateformat='%m/%d/%Y') +INSERT INTO dates SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/dateformat.csv', dateformat='%m/%d/%Y') query I SELECT * FROM dates ORDER BY 1 @@ -85,7 +85,7 @@ SELECT * FROM dates ORDER BY 1 # we can also do this for timestamps # as long as we make the date format fail statement ok -CREATE TABLE timestamps AS SELECT * FROM read_csv_auto('data/csv/test/dateformat.csv', timestampformat='%m/%d/%Y', columns=STRUCT_PACK(d := 'TIMESTAMP')) +CREATE TABLE timestamps AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/test/dateformat.csv', timestampformat='%m/%d/%Y', columns=STRUCT_PACK(d := 'TIMESTAMP')) query I SELECT * FROM timestamps @@ -94,7 +94,7 @@ SELECT * FROM timestamps # create a view using the read_csv function statement ok -CREATE VIEW lineitem AS SELECT * FROM read_csv('data/csv/real/lineitem_sample.csv', sep='|', columns=STRUCT_PACK(l_orderkey := 'INT', l_partkey := 'INT', l_suppkey := 'INT', l_linenumber := 'INT', l_quantity := 'INTEGER', l_extendedprice := 'DOUBLE', l_discount := 'DOUBLE', l_tax := 'DOUBLE', l_returnflag := 'VARCHAR', l_linestatus := 'VARCHAR', l_shipdate := 'DATE', l_commitdate := 'DATE', l_receiptdate := 'DATE', l_shipinstruct := 'VARCHAR', l_shipmode := 'VARCHAR', l_comment := 'VARCHAR')); +CREATE VIEW lineitem AS SELECT * FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', sep='|', columns=STRUCT_PACK(l_orderkey := 'INT', l_partkey := 'INT', l_suppkey := 'INT', l_linenumber := 'INT', l_quantity := 'INTEGER', l_extendedprice := 'DOUBLE', l_discount := 'DOUBLE', l_tax := 'DOUBLE', l_returnflag := 'VARCHAR', l_linestatus := 'VARCHAR', l_shipdate := 'DATE', l_commitdate := 'DATE', l_receiptdate := 'DATE', l_shipinstruct := 'VARCHAR', l_shipmode := 'VARCHAR', l_comment := 'VARCHAR')); # each of these will read the CSV again through the view query I @@ -115,7 +115,7 @@ SELECT l_partkey, RTRIM(l_comment) FROM lineitem WHERE l_orderkey=1 ORDER BY l_l # test incorrect usage of read_csv function # wrong argument type statement error -SELECT * FROM read_csv('data/csv/real/lineitem_sample.csv', sep='|', columns=STRUCT_PACK(l_orderkey := 5)) +SELECT * FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', sep='|', columns=STRUCT_PACK(l_orderkey := 5)) ---- read_csv requires a type specification as string diff --git a/test/sql/copy/csv/test_replacement_scan_alias.test b/test/sql/copy/csv/test_replacement_scan_alias.test index 495bdbba94ca..db0b9775ad8a 100644 --- a/test/sql/copy/csv/test_replacement_scan_alias.test +++ b/test/sql/copy/csv/test_replacement_scan_alias.test @@ -8,24 +8,24 @@ PRAGMA enable_verification # implicit alias is equal to the file name statement ok -select * from 'data/csv/test/dateformat.csv'; +select * from '{DATA_DIR}/csv/test/dateformat.csv'; statement ok -select * from 'data/csv/test/dateformat.csv', 'data/csv/test/dateformat_2.csv'; +select * from '{DATA_DIR}/csv/test/dateformat.csv', '{DATA_DIR}/csv/test/dateformat_2.csv'; # explicit alias statement ok -select mytbl.column0 from 'data/csv/test/dateformat.csv'mytbl; +select mytbl.column0 from '{DATA_DIR}/csv/test/dateformat.csv'mytbl; statement ok -select mytbl.mycol from 'data/csv/test/dateformat.csv' mytbl(mycol); +select mytbl.mycol from '{DATA_DIR}/csv/test/dateformat.csv' mytbl(mycol); require parquet statement ok -select blob.ids from 'data/parquet-testing/blob.parquet' +select blob.ids from '{DATA_DIR}/parquet-testing/blob.parquet' require json statement ok -select example_n.id from 'data/json/example_n.ndjson' +select example_n.id from '{DATA_DIR}/json/example_n.ndjson' diff --git a/test/sql/copy/csv/test_segfault.test b/test/sql/copy/csv/test_segfault.test index ff29a3454206..1fb26a16807f 100644 --- a/test/sql/copy/csv/test_segfault.test +++ b/test/sql/copy/csv/test_segfault.test @@ -9,7 +9,7 @@ loop i 0 39 # We don't really care if these pass or fail, as long as they don't segfault statement maybe -from 'data/csv/fuzzing/{i}.csv' +from '{DATA_DIR}/csv/fuzzing/{i}.csv' ---- endloop diff --git a/test/sql/copy/csv/test_skip_bom.test b/test/sql/copy/csv/test_skip_bom.test index 5be0232a5a3d..47084f06d075 100644 --- a/test/sql/copy/csv/test_skip_bom.test +++ b/test/sql/copy/csv/test_skip_bom.test @@ -7,7 +7,7 @@ PRAGMA enable_verification # read csv with UTF-8 BOM statement ok -CREATE TABLE people AS SELECT * FROM read_csv('data/csv/people.csv', columns=STRUCT_PACK(a := 'VARCHAR', b := 'VARCHAR'), sep=',', auto_detect='false'); +CREATE TABLE people AS SELECT * FROM read_csv('{DATA_DIR}/csv/people.csv', columns=STRUCT_PACK(a := 'VARCHAR', b := 'VARCHAR'), sep=',', auto_detect='false'); query TT SELECT * FROM people @@ -20,7 +20,7 @@ Cassandra Brandow South # read csv auto with UTF-8 BOM statement ok -CREATE TABLE people2 AS SELECT * FROM read_csv_auto('data/csv/people.csv'); +CREATE TABLE people2 AS SELECT * FROM read_csv_auto('{DATA_DIR}/csv/people.csv'); query TT SELECT * FROM people2 diff --git a/test/sql/copy/csv/test_skip_header.test b/test/sql/copy/csv/test_skip_header.test index c89961bb6102..a2173136f7a4 100644 --- a/test/sql/copy/csv/test_skip_header.test +++ b/test/sql/copy/csv/test_skip_header.test @@ -6,14 +6,14 @@ statement ok PRAGMA enable_verification query III -FROM 'data/csv/skip_header.csv' +FROM '{DATA_DIR}/csv/skip_header.csv' ---- 1 2 3 4 5 6 7 8 9 query III -FROM read_csv_auto('data/csv/skip_header.csv', columns={'a': 'INT32', 'b': 'INT32', 'c': 'INT32'}); +FROM read_csv_auto('{DATA_DIR}/csv/skip_header.csv', columns={'a': 'INT32', 'b': 'INT32', 'c': 'INT32'}); ---- 1 2 3 4 5 6 diff --git a/test/sql/copy/csv/test_sniff_csv.test b/test/sql/copy/csv/test_sniff_csv.test index 14d5b1c0bc61..2aebf8801ea3 100644 --- a/test/sql/copy/csv/test_sniff_csv.test +++ b/test/sql/copy/csv/test_sniff_csv.test @@ -10,82 +10,82 @@ require notwindows # This has a row with an extra column, so with ignore_errors we give preference to that query II -SELECT quote, escape from sniff_csv('data/csv/16857.csv', ignore_errors = true); +SELECT quote, escape from sniff_csv('{DATA_DIR}/csv/16857.csv', ignore_errors = true); ---- " " query II -SELECT quote, escape from sniff_csv('data/csv/16857.csv'); +SELECT quote, escape from sniff_csv('{DATA_DIR}/csv/16857.csv'); ---- " " query III -SELECT escape,quote, delimiter from sniff_csv('data/csv/later_quotes.csv'); +SELECT escape,quote, delimiter from sniff_csv('{DATA_DIR}/csv/later_quotes.csv'); ---- " " , query I -SELECT Prompt FROM sniff_csv('data/csv/real/lineitem_sample.csv'); +SELECT Prompt FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv'); ---- -FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); +FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); query IIIIIIIIIIIIIIII -FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d') limit 1; +FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d') limit 1; ---- 1 15519 785 1 17 24386.67 0.04 0.02 N O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON TRUCK egular courts above the query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv'); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv'); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); # Test Invalid Path statement error -FROM sniff_csv('data/csv/real/non_ecziste.csv'); +FROM sniff_csv('{DATA_DIR}/csv/real/non_ecziste.csv'); ---- -No files found that match the pattern "data/csv/real/non_ecziste.csv" +No files found that match the pattern "{DATA_DIR}/csv/real/non_ecziste.csv" # Test different sample sizes query IIIIIIIIIIII -FROM sniff_csv('data/csv/error/mismatch/big_bad.csv', sample_size=1); +FROM sniff_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', sample_size=1); ---- -, (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': VARCHAR}] NULL NULL sample_size=1 FROM read_csv('data/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR'}, sample_size=1); +, (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': VARCHAR}] NULL NULL sample_size=1 FROM read_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR'}, sample_size=1); statement error -FROM read_csv('data/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=0, columns={'column0': 'BIGINT', 'column1': 'VARCHAR'}, sample_size=1); +FROM read_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=0, columns={'column0': 'BIGINT', 'column1': 'VARCHAR'}, sample_size=1); ---- Conversion Error: CSV Error on Line: 2176 query IIIIIIIIIIII -FROM sniff_csv('data/csv/error/mismatch/big_bad.csv', sample_size=10000); +FROM sniff_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', sample_size=10000); ---- -, (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': VARCHAR}, {'name': A, 'type': VARCHAR}] NULL NULL sample_size=10000 FROM read_csv('data/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'1': 'VARCHAR', 'A': 'VARCHAR'}, sample_size=10000); +, (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': VARCHAR}, {'name': A, 'type': VARCHAR}] NULL NULL sample_size=10000 FROM read_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'1': 'VARCHAR', 'A': 'VARCHAR'}, sample_size=10000); query IIIIIIIIIIII -FROM sniff_csv('data/csv/error/mismatch/big_bad.csv', sample_size=-1); +FROM sniff_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', sample_size=-1); ---- -, (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': VARCHAR}, {'name': A, 'type': VARCHAR}] NULL NULL sample_size=-1 FROM read_csv('data/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'1': 'VARCHAR', 'A': 'VARCHAR'}, sample_size=-1); +, (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': VARCHAR}, {'name': A, 'type': VARCHAR}] NULL NULL sample_size=-1 FROM read_csv('{DATA_DIR}/csv/error/mismatch/big_bad.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'1': 'VARCHAR', 'A': 'VARCHAR'}, sample_size=-1); # Test with defined time and timestamp query IIIIIIIIIIII -FROM sniff_csv('data/csv/test/dateformat.csv') +FROM sniff_csv('{DATA_DIR}/csv/test/dateformat.csv') ---- -, (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': DATE}] %d/%m/%Y NULL NULL FROM read_csv('data/csv/test/dateformat.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'DATE'}, dateformat='%d/%m/%Y'); +, (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': DATE}] %d/%m/%Y NULL NULL FROM read_csv('{DATA_DIR}/csv/test/dateformat.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'DATE'}, dateformat='%d/%m/%Y'); query I -FROM read_csv('data/csv/test/dateformat.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'column0': 'DATE'}, dateformat='%d/%m/%Y'); +FROM read_csv('{DATA_DIR}/csv/test/dateformat.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'column0': 'DATE'}, dateformat='%d/%m/%Y'); ---- 2019-06-05 query IIIIIIIIIIII -FROM sniff_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv') +FROM sniff_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv') ---- -, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S NULL FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); +, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S NULL FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); query IIIII -FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); +FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); ---- 123 TEST2 12:12:12 2000-01-01 2000-01-01 12:12:00 345 TEST2 14:15:30 2002-02-02 2002-02-02 14:15:00 @@ -93,35 +93,35 @@ FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=fa # Test with dirty rows query IIIIIIIIIIII -FROM sniff_csv('data/csv/inconsistent_cells.csv') +FROM sniff_csv('{DATA_DIR}/csv/inconsistent_cells.csv') ---- -, (empty) (empty) \n (empty) 5 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': BIGINT}, {'name': column2, 'type': BIGINT}, {'name': column3, 'type': BIGINT}, {'name': column4, 'type': BIGINT}] NULL NULL NULL FROM read_csv('data/csv/inconsistent_cells.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=5, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'BIGINT', 'column2': 'BIGINT', 'column3': 'BIGINT', 'column4': 'BIGINT'}); +, (empty) (empty) \n (empty) 5 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': BIGINT}, {'name': column2, 'type': BIGINT}, {'name': column3, 'type': BIGINT}, {'name': column4, 'type': BIGINT}] NULL NULL NULL FROM read_csv('{DATA_DIR}/csv/inconsistent_cells.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=5, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'BIGINT', 'column2': 'BIGINT', 'column3': 'BIGINT', 'column4': 'BIGINT'}); query IIIII -FROM read_csv('data/csv/inconsistent_cells.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=5, header=false, columns={'column0': 'BIGINT', 'column1': 'BIGINT', 'column2': 'BIGINT', 'column3': 'BIGINT', 'column4': 'BIGINT'}); +FROM read_csv('{DATA_DIR}/csv/inconsistent_cells.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=5, header=false, columns={'column0': 'BIGINT', 'column1': 'BIGINT', 'column2': 'BIGINT', 'column3': 'BIGINT', 'column4': 'BIGINT'}); ---- 1 2 3 4 5 1 2 3 4 5 # Test Header and quote ' query IIIIIIIIIIII -FROM sniff_csv('data/csv/timings.csv') +FROM sniff_csv('{DATA_DIR}/csv/timings.csv') ---- -| (empty) (empty) \n (empty) 0 1 [{'name': tool, 'type': VARCHAR}, {'name': sf, 'type': BIGINT}, {'name': day, 'type': DATE}, {'name': batch_type, 'type': VARCHAR}, {'name': q, 'type': VARCHAR}, {'name': parameters, 'type': VARCHAR}, {'name': time, 'type': DOUBLE}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/timings.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'tool': 'VARCHAR', 'sf': 'BIGINT', 'day': 'DATE', 'batch_type': 'VARCHAR', 'q': 'VARCHAR', 'parameters': 'VARCHAR', 'time': 'DOUBLE'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 1 [{'name': tool, 'type': VARCHAR}, {'name': sf, 'type': BIGINT}, {'name': day, 'type': DATE}, {'name': batch_type, 'type': VARCHAR}, {'name': q, 'type': VARCHAR}, {'name': parameters, 'type': VARCHAR}, {'name': time, 'type': DOUBLE}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/timings.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'tool': 'VARCHAR', 'sf': 'BIGINT', 'day': 'DATE', 'batch_type': 'VARCHAR', 'q': 'VARCHAR', 'parameters': 'VARCHAR', 'time': 'DOUBLE'}, dateformat='%Y-%m-%d'); query IIIIIII -FROM read_csv('data/csv/timings.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, header=true, columns={'tool': 'VARCHAR', 'sf': 'BIGINT', 'day': 'DATE', 'batch_type': 'VARCHAR', 'q': 'VARCHAR', 'parameters': 'VARCHAR', 'time': 'DOUBLE'}, dateformat='%Y-%m-%d') order by all limit 1; +FROM read_csv('{DATA_DIR}/csv/timings.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, header=true, columns={'tool': 'VARCHAR', 'sf': 'BIGINT', 'day': 'DATE', 'batch_type': 'VARCHAR', 'q': 'VARCHAR', 'parameters': 'VARCHAR', 'time': 'DOUBLE'}, dateformat='%Y-%m-%d') order by all limit 1; ---- Umbra 100 2012-11-29 power 1 {"datetime": "2010-02-26T03:51:21.000+00:00"} 0.05473947525024414 # Test backslash option query IIIIIIIIIIII -FROM sniff_csv('data/csv/auto/backslash_escape.csv') +FROM sniff_csv('{DATA_DIR}/csv/auto/backslash_escape.csv') ---- -| " \ \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': VARCHAR}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('data/csv/auto/backslash_escape.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR', 'column2': 'VARCHAR'}); +| " \ \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': VARCHAR}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('{DATA_DIR}/csv/auto/backslash_escape.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR', 'column2': 'VARCHAR'}); query III -FROM read_csv('data/csv/auto/backslash_escape.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR', 'column2': 'VARCHAR'}); +FROM read_csv('{DATA_DIR}/csv/auto/backslash_escape.csv', auto_detect=false, delim='|', quote='"', escape='\', new_line='\n', skip=0, header=false, columns={'column0': 'BIGINT', 'column1': 'VARCHAR', 'column2': 'VARCHAR'}); ---- 123 TEST7 text1 345 TEST7 text"2" @@ -129,13 +129,13 @@ FROM read_csv('data/csv/auto/backslash_escape.csv', auto_detect=false, delim='|' # Add tests with the comment option query IIIIIIIIIIII -FROM sniff_csv('data/csv/comments/simple.csv'); +FROM sniff_csv('{DATA_DIR}/csv/comments/simple.csv'); ---- -; (empty) (empty) \n # 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': BIGINT}] NULL NULL NULL FROM read_csv('data/csv/comments/simple.csv', auto_detect=false, delim=';', quote='', escape='', new_line='\n', skip=0, comment='#', header=true, columns={'a': 'BIGINT', 'b': 'BIGINT'}); +; (empty) (empty) \n # 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': BIGINT}] NULL NULL NULL FROM read_csv('{DATA_DIR}/csv/comments/simple.csv', auto_detect=false, delim=';', quote='', escape='', new_line='\n', skip=0, comment='#', header=true, columns={'a': 'BIGINT', 'b': 'BIGINT'}); # Test Prompt query II -FROM read_csv('data/csv/comments/simple.csv', auto_detect=false, delim=';', quote='', escape='', new_line='\n', skip=0, comment='#', header=true, columns={'a': 'BIGINT', 'b': 'BIGINT'}); +FROM read_csv('{DATA_DIR}/csv/comments/simple.csv', auto_detect=false, delim=';', quote='', escape='', new_line='\n', skip=0, comment='#', header=true, columns={'a': 'BIGINT', 'b': 'BIGINT'}); ---- 1 3 6 7 \ No newline at end of file diff --git a/test/sql/copy/csv/test_sniff_csv_options.test b/test/sql/copy/csv/test_sniff_csv_options.test index cefbe6b04caf..579e17dfc12d 100644 --- a/test/sql/copy/csv/test_sniff_csv_options.test +++ b/test/sql/copy/csv/test_sniff_csv_options.test @@ -10,7 +10,7 @@ require notwindows # Test user giving wrong option statement error -FROM sniff_csv('data/csv/real/lineitem_sample.csv', delim = ','); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim = ','); ---- It was not possible to automatically detect the CSV parsing dialect @@ -18,163 +18,163 @@ It was not possible to automatically detect the CSV parsing dialect # delimiter query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', delim='|'); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', delim='|'); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL delim='|' FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', delim='|'); +| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL delim='|' FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', delim='|'); # quote query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', quote='"'); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', quote='"'); ---- -| " (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL quote='"' FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', quote='"'); +| " (empty) \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL quote='"' FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', quote='"'); # escape query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', escape='"'); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', escape='"'); ---- -| " " \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL escape='"' FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', escape='"'); +| " " \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL escape='"' FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', escape='"'); # column names query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', escape='"'); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', escape='"'); ---- -| " " \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL escape='"' FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', escape='"'); +| " " \n (empty) 0 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL escape='"' FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', escape='"'); # column names and types query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', names=['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10', 'c11', 'c12', 'c13', 'c14', 'c15', 'c16']); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10', 'c11', 'c12', 'c13', 'c14', 'c15', 'c16']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); query IIIIIIIIIIIIIIII -FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d') limit 1; +FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d') limit 1; ---- 1 15519 785 1 17 24386.67 0.04 0.02 N O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON TRUCK egular courts above the query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); # skip rows query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', skip=1); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', skip=1); ---- -| (empty) (empty) \n (empty) 1 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL skip=1 FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', skip=1); +| (empty) (empty) \n (empty) 1 0 [{'name': column00, 'type': BIGINT}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL skip=1 FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', comment='', header=false, columns={'column00': 'BIGINT', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d', skip=1); # header exists query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', header=true); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', header=true); ---- -| (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': BIGINT}, {'name': 15519, 'type': BIGINT}, {'name': 785, 'type': BIGINT}, {'name': 1_1, 'type': BIGINT}, {'name': 17, 'type': BIGINT}, {'name': 24386.670000, 'type': DOUBLE}, {'name': 0.040000, 'type': DOUBLE}, {'name': 0.020000, 'type': DOUBLE}, {'name': N, 'type': VARCHAR}, {'name': O, 'type': VARCHAR}, {'name': 1996-03-13, 'type': DATE}, {'name': 1996-02-12, 'type': DATE}, {'name': 1996-03-22, 'type': DATE}, {'name': DELIVER IN PERSON, 'type': VARCHAR}, {'name': TRUCK, 'type': VARCHAR}, {'name': egular courts above the, 'type': VARCHAR}] %Y-%m-%d NULL header=true FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', columns={'1': 'BIGINT', '15519': 'BIGINT', '785': 'BIGINT', '1_1': 'BIGINT', '17': 'BIGINT', '24386.670000': 'DOUBLE', '0.040000': 'DOUBLE', '0.020000': 'DOUBLE', 'N': 'VARCHAR', 'O': 'VARCHAR', '1996-03-13': 'DATE', '1996-02-12': 'DATE', '1996-03-22': 'DATE', 'DELIVER IN PERSON': 'VARCHAR', 'TRUCK': 'VARCHAR', 'egular courts above the': 'VARCHAR'}, dateformat='%Y-%m-%d', header=true); +| (empty) (empty) \n (empty) 0 1 [{'name': 1, 'type': BIGINT}, {'name': 15519, 'type': BIGINT}, {'name': 785, 'type': BIGINT}, {'name': 1_1, 'type': BIGINT}, {'name': 17, 'type': BIGINT}, {'name': 24386.670000, 'type': DOUBLE}, {'name': 0.040000, 'type': DOUBLE}, {'name': 0.020000, 'type': DOUBLE}, {'name': N, 'type': VARCHAR}, {'name': O, 'type': VARCHAR}, {'name': 1996-03-13, 'type': DATE}, {'name': 1996-02-12, 'type': DATE}, {'name': 1996-03-22, 'type': DATE}, {'name': DELIVER IN PERSON, 'type': VARCHAR}, {'name': TRUCK, 'type': VARCHAR}, {'name': egular courts above the, 'type': VARCHAR}] %Y-%m-%d NULL header=true FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', columns={'1': 'BIGINT', '15519': 'BIGINT', '785': 'BIGINT', '1_1': 'BIGINT', '17': 'BIGINT', '24386.670000': 'DOUBLE', '0.040000': 'DOUBLE', '0.020000': 'DOUBLE', 'N': 'VARCHAR', 'O': 'VARCHAR', '1996-03-13': 'DATE', '1996-02-12': 'DATE', '1996-03-22': 'DATE', 'DELIVER IN PERSON': 'VARCHAR', 'TRUCK': 'VARCHAR', 'egular courts above the': 'VARCHAR'}, dateformat='%Y-%m-%d', header=true); # timestampformat query IIIIIIIIIIII -FROM sniff_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', dateformat='%Y.%m.%d') +FROM sniff_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', dateformat='%Y.%m.%d') ---- -, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S dateformat='%Y.%m.%d' FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, timestampformat='%Y.%m.%d %H:%M:%S', dateformat='%Y.%m.%d'); +, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S dateformat='%Y.%m.%d' FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, timestampformat='%Y.%m.%d %H:%M:%S', dateformat='%Y.%m.%d'); # dateformat query IIIIIIIIIIII -FROM sniff_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', timestampformat='%Y.%m.%d %H:%M:%S') +FROM sniff_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', timestampformat='%Y.%m.%d %H:%M:%S') ---- -, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S timestampformat='%Y.%m.%d %H:%M:%S' FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); +, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S timestampformat='%Y.%m.%d %H:%M:%S' FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); # Test a combination query IIIIIIIIIIII -FROM sniff_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S') +FROM sniff_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S') ---- -, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S' FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); +, (empty) (empty) \n (empty) 0 1 [{'name': a, 'type': BIGINT}, {'name': b, 'type': VARCHAR}, {'name': t, 'type': TIME}, {'name': d, 'type': DATE}, {'name': ts, 'type': TIMESTAMP}] %Y.%m.%d %Y.%m.%d %H:%M:%S dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S' FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='', escape='', new_line='\n', skip=0, comment='', header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, dateformat='%Y.%m.%d', timestampformat='%Y.%m.%d %H:%M:%S'); query IIIII -FROM read_csv('data/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, timestampformat='%Y.%m.%d %H:%M:%S', dateformat='%Y.%m.%d') order by all limit 1; +FROM read_csv('{DATA_DIR}/csv/auto/time_date_timestamp_yyyy.mm.dd.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, header=true, columns={'a': 'BIGINT', 'b': 'VARCHAR', 't': 'TIME', 'd': 'DATE', 'ts': 'TIMESTAMP'}, timestampformat='%Y.%m.%d %H:%M:%S', dateformat='%Y.%m.%d') order by all limit 1; ---- 123 TEST2 12:12:12 2000-01-01 2000-01-01 12:12:00 # test auto_detect=false (that's illegal sir) statement error -FROM sniff_csv('data/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, auto_detect = false); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, auto_detect = false); ---- sniff_csv function does not accept auto_detect variable set to false query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, auto_detect = true); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, auto_detect = true); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': c1, 'type': BIGINT}, {'name': c2, 'type': BIGINT}, {'name': c3, 'type': BIGINT}, {'name': c4, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c6, 'type': DOUBLE}, {'name': c7, 'type': DOUBLE}, {'name': c8, 'type': DOUBLE}, {'name': c9, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c1': 'BIGINT', 'c2': 'BIGINT', 'c3': 'BIGINT', 'c4': 'BIGINT', 'c5': 'BIGINT', 'c6': 'DOUBLE', 'c7': 'DOUBLE', 'c8': 'DOUBLE', 'c9': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); # auto_type_candidates query IIIIIIIIIIII -FROM sniff_csv('data/csv/autotypecandidates.csv', auto_type_candidates=['SMALLINT','BIGINT', 'DOUBLE', 'FLOAT','VARCHAR']); +FROM sniff_csv('{DATA_DIR}/csv/autotypecandidates.csv', auto_type_candidates=['SMALLINT','BIGINT', 'DOUBLE', 'FLOAT','VARCHAR']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': SMALLINT}, {'name': column1, 'type': FLOAT}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('data/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'SMALLINT', 'column1': 'FLOAT', 'column2': 'VARCHAR'}); +| (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': SMALLINT}, {'name': column1, 'type': FLOAT}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('{DATA_DIR}/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'SMALLINT', 'column1': 'FLOAT', 'column2': 'VARCHAR'}); query III -FROM read_csv('data/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'column0': 'SMALLINT', 'column1': 'FLOAT', 'column2': 'VARCHAR'}); +FROM read_csv('{DATA_DIR}/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='"', escape='"', new_line='\n', skip=0, header=false, columns={'column0': 'SMALLINT', 'column1': 'FLOAT', 'column2': 'VARCHAR'}); ---- 1 1.1 bla # don't accept globs statement error -FROM sniff_csv('data/csv/hive-partitioning/simple/*/*/test.csv'); +FROM sniff_csv('{DATA_DIR}/csv/hive-partitioning/simple/*/*/test.csv'); ---- Not implemented Error: sniff_csv does not operate on more than one file yet # don't accept madeup options statement error -FROM sniff_csv('data/csv/autotypecandidates.csv', oop = True); +FROM sniff_csv('{DATA_DIR}/csv/autotypecandidates.csv', oop = True); ---- Invalid named parameter "oop" for function sniff_csv # Ignore multi-partitioning options (maybe even error on them?) query IIIIIIIIIIII -FROM sniff_csv('data/csv/autotypecandidates.csv', HIVE_PARTITIONING=1); +FROM sniff_csv('{DATA_DIR}/csv/autotypecandidates.csv', HIVE_PARTITIONING=1); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': DOUBLE}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('data/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'DOUBLE', 'column2': 'VARCHAR'}); +| (empty) (empty) \n (empty) 0 0 [{'name': column0, 'type': BIGINT}, {'name': column1, 'type': DOUBLE}, {'name': column2, 'type': VARCHAR}] NULL NULL NULL FROM read_csv('{DATA_DIR}/csv/autotypecandidates.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column0': 'BIGINT', 'column1': 'DOUBLE', 'column2': 'VARCHAR'}); query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', types=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', types=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', dtypes=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', dtypes=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', column_types=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', column_types=['INTEGER','BIGINT','BIGINT','BIGINT','BIGINT', 'DOUBLE','DOUBLE','DOUBLE','VARCHAR', 'VARCHAR','DATE', 'DATE', 'DATE', 'VARCHAR', 'VARCHAR', 'VARCHAR']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': column00, 'type': INTEGER}, {'name': column01, 'type': BIGINT}, {'name': column02, 'type': BIGINT}, {'name': column03, 'type': BIGINT}, {'name': column04, 'type': BIGINT}, {'name': column05, 'type': DOUBLE}, {'name': column06, 'type': DOUBLE}, {'name': column07, 'type': DOUBLE}, {'name': column08, 'type': VARCHAR}, {'name': column09, 'type': VARCHAR}, {'name': column10, 'type': DATE}, {'name': column11, 'type': DATE}, {'name': column12, 'type': DATE}, {'name': column13, 'type': VARCHAR}, {'name': column14, 'type': VARCHAR}, {'name': column15, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'column00': 'INTEGER', 'column01': 'BIGINT', 'column02': 'BIGINT', 'column03': 'BIGINT', 'column04': 'BIGINT', 'column05': 'DOUBLE', 'column06': 'DOUBLE', 'column07': 'DOUBLE', 'column08': 'VARCHAR', 'column09': 'VARCHAR', 'column10': 'DATE', 'column11': 'DATE', 'column12': 'DATE', 'column13': 'VARCHAR', 'column14': 'VARCHAR', 'column15': 'VARCHAR'}, dateformat='%Y-%m-%d'); query IIIIIIIIIIII -FROM sniff_csv('data/csv/real/lineitem_sample.csv', names=['c01','c02','c03','c04','c5', 'c06','c07','c08','c09', 'c10','c11', 'c12', 'c13', 'c14', 'c15', 'c16']); +FROM sniff_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', names=['c01','c02','c03','c04','c5', 'c06','c07','c08','c09', 'c10','c11', 'c12', 'c13', 'c14', 'c15', 'c16']); ---- -| (empty) (empty) \n (empty) 0 0 [{'name': c01, 'type': BIGINT}, {'name': c02, 'type': BIGINT}, {'name': c03, 'type': BIGINT}, {'name': c04, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c06, 'type': DOUBLE}, {'name': c07, 'type': DOUBLE}, {'name': c08, 'type': DOUBLE}, {'name': c09, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('data/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c01': 'BIGINT', 'c02': 'BIGINT', 'c03': 'BIGINT', 'c04': 'BIGINT', 'c5': 'BIGINT', 'c06': 'DOUBLE', 'c07': 'DOUBLE', 'c08': 'DOUBLE', 'c09': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); +| (empty) (empty) \n (empty) 0 0 [{'name': c01, 'type': BIGINT}, {'name': c02, 'type': BIGINT}, {'name': c03, 'type': BIGINT}, {'name': c04, 'type': BIGINT}, {'name': c5, 'type': BIGINT}, {'name': c06, 'type': DOUBLE}, {'name': c07, 'type': DOUBLE}, {'name': c08, 'type': DOUBLE}, {'name': c09, 'type': VARCHAR}, {'name': c10, 'type': VARCHAR}, {'name': c11, 'type': DATE}, {'name': c12, 'type': DATE}, {'name': c13, 'type': DATE}, {'name': c14, 'type': VARCHAR}, {'name': c15, 'type': VARCHAR}, {'name': c16, 'type': VARCHAR}] %Y-%m-%d NULL NULL FROM read_csv('{DATA_DIR}/csv/real/lineitem_sample.csv', auto_detect=false, delim='|', quote='', escape='', new_line='\n', skip=0, comment='', header=false, columns={'c01': 'BIGINT', 'c02': 'BIGINT', 'c03': 'BIGINT', 'c04': 'BIGINT', 'c5': 'BIGINT', 'c06': 'DOUBLE', 'c07': 'DOUBLE', 'c08': 'DOUBLE', 'c09': 'VARCHAR', 'c10': 'VARCHAR', 'c11': 'DATE', 'c12': 'DATE', 'c13': 'DATE', 'c14': 'VARCHAR', 'c15': 'VARCHAR', 'c16': 'VARCHAR'}, dateformat='%Y-%m-%d'); # Test that rejects returs correct values query I -SELECT delimiter FROM sniff_csv('data/csv/ignore_errors.csv'); +SELECT delimiter FROM sniff_csv('{DATA_DIR}/csv/ignore_errors.csv'); ---- ; query I -SELECT delimiter FROM sniff_csv('data/csv/ignore_errors.csv', ignore_errors=true); +SELECT delimiter FROM sniff_csv('{DATA_DIR}/csv/ignore_errors.csv', ignore_errors=true); ---- ; query I -SELECT delimiter FROM sniff_csv('data/csv/ignore_errors.csv', store_rejects=true); +SELECT delimiter FROM sniff_csv('{DATA_DIR}/csv/ignore_errors.csv', store_rejects=true); ---- ; query I -SELECT delimiter FROM sniff_csv('data/csv/ignore_errors.csv', rejects_scan='a_1'); +SELECT delimiter FROM sniff_csv('{DATA_DIR}/csv/ignore_errors.csv', rejects_scan='a_1'); ---- ; query I -SELECT delimiter FROM sniff_csv('data/csv/ignore_errors.csv', rejects_table='a_2'); +SELECT delimiter FROM sniff_csv('{DATA_DIR}/csv/ignore_errors.csv', rejects_table='a_2'); ---- ; \ No newline at end of file diff --git a/test/sql/copy/csv/test_sniffer_hang.test b/test/sql/copy/csv/test_sniffer_hang.test index cc4e171c62d8..34cbc30bb559 100644 --- a/test/sql/copy/csv/test_sniffer_hang.test +++ b/test/sql/copy/csv/test_sniffer_hang.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification statement error -FROM read_csv('data/csv/bad_csv_file_2047.csv', sample_size = -1) +FROM read_csv('{DATA_DIR}/csv/bad_csv_file_2047.csv', sample_size = -1) ---- Error when sniffing file \ No newline at end of file diff --git a/test/sql/copy/csv/test_sniffer_tab_delimiter.test b/test/sql/copy/csv/test_sniffer_tab_delimiter.test index 6a7f74c11042..ea0524260783 100644 --- a/test/sql/copy/csv/test_sniffer_tab_delimiter.test +++ b/test/sql/copy/csv/test_sniffer_tab_delimiter.test @@ -6,40 +6,40 @@ statement ok PRAGMA enable_verification query I -select columns from sniff_csv('data/csv/test_apple_financial.csv.gz', header = 1, skip=3, delim = '\t') +select columns from sniff_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz', header = 1, skip=3, delim = '\t') ---- [{'name': Transaction Date, 'type': DATE}, {'name': Settlement Date, 'type': DATE}, {'name': Apple Identifier, 'type': BIGINT}, {'name': SKU, 'type': VARCHAR}, {'name': Title, 'type': VARCHAR}, {'name': Developer Name, 'type': VARCHAR}, {'name': Product Type Identifier, 'type': VARCHAR}, {'name': Country of Sale, 'type': VARCHAR}, {'name': Quantity, 'type': BIGINT}, {'name': Partner Share, 'type': DOUBLE}, {'name': Extended Partner Share, 'type': DOUBLE}, {'name': Partner Share Currency, 'type': VARCHAR}, {'name': Customer Price, 'type': DOUBLE}, {'name': Customer Currency, 'type': VARCHAR}, {'name': Sale or Return, 'type': VARCHAR}, {'name': Promo Code, 'type': VARCHAR}, {'name': Order Type, 'type': VARCHAR}, {'name': Region, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/test_apple_financial.csv.gz', skip=3, delim = '\t') +select columns from sniff_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz', skip=3, delim = '\t') ---- [{'name': Transaction Date, 'type': DATE}, {'name': Settlement Date, 'type': DATE}, {'name': Apple Identifier, 'type': BIGINT}, {'name': SKU, 'type': VARCHAR}, {'name': Title, 'type': VARCHAR}, {'name': Developer Name, 'type': VARCHAR}, {'name': Product Type Identifier, 'type': VARCHAR}, {'name': Country of Sale, 'type': VARCHAR}, {'name': Quantity, 'type': BIGINT}, {'name': Partner Share, 'type': DOUBLE}, {'name': Extended Partner Share, 'type': DOUBLE}, {'name': Partner Share Currency, 'type': VARCHAR}, {'name': Customer Price, 'type': DOUBLE}, {'name': Customer Currency, 'type': VARCHAR}, {'name': Sale or Return, 'type': VARCHAR}, {'name': Promo Code, 'type': VARCHAR}, {'name': Order Type, 'type': VARCHAR}, {'name': Region, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/test_apple_financial.csv.gz', skip=3) +select columns from sniff_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz', skip=3) ---- [{'name': Transaction Date, 'type': DATE}, {'name': Settlement Date, 'type': DATE}, {'name': Apple Identifier, 'type': BIGINT}, {'name': SKU, 'type': VARCHAR}, {'name': Title, 'type': VARCHAR}, {'name': Developer Name, 'type': VARCHAR}, {'name': Product Type Identifier, 'type': VARCHAR}, {'name': Country of Sale, 'type': VARCHAR}, {'name': Quantity, 'type': BIGINT}, {'name': Partner Share, 'type': DOUBLE}, {'name': Extended Partner Share, 'type': DOUBLE}, {'name': Partner Share Currency, 'type': VARCHAR}, {'name': Customer Price, 'type': DOUBLE}, {'name': Customer Currency, 'type': VARCHAR}, {'name': Sale or Return, 'type': VARCHAR}, {'name': Promo Code, 'type': VARCHAR}, {'name': Order Type, 'type': VARCHAR}, {'name': Region, 'type': VARCHAR}] query I -select columns from sniff_csv('data/csv/test_apple_financial.csv.gz') +select columns from sniff_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz') ---- [{'name': Transaction Date, 'type': DATE}, {'name': Settlement Date, 'type': DATE}, {'name': Apple Identifier, 'type': BIGINT}, {'name': SKU, 'type': VARCHAR}, {'name': Title, 'type': VARCHAR}, {'name': Developer Name, 'type': VARCHAR}, {'name': Product Type Identifier, 'type': VARCHAR}, {'name': Country of Sale, 'type': VARCHAR}, {'name': Quantity, 'type': BIGINT}, {'name': Partner Share, 'type': DOUBLE}, {'name': Extended Partner Share, 'type': DOUBLE}, {'name': Partner Share Currency, 'type': VARCHAR}, {'name': Customer Price, 'type': DOUBLE}, {'name': Customer Currency, 'type': VARCHAR}, {'name': Sale or Return, 'type': VARCHAR}, {'name': Promo Code, 'type': VARCHAR}, {'name': Order Type, 'type': VARCHAR}, {'name': Region, 'type': VARCHAR}] query IIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/test_apple_financial.csv.gz', header = 1, skip=3) +FROM read_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz', header = 1, skip=3) ---- 2024-06-19 2024-06-22 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 1 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL 2024-06-14 2024-06-14 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 2 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL query IIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/test_apple_financial.csv.gz', skip=3) +FROM read_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz', skip=3) ---- 2024-06-19 2024-06-22 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 1 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL 2024-06-14 2024-06-14 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 2 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL query IIIIIIIIIIIIIIIIII -FROM read_csv('data/csv/test_apple_financial.csv.gz') +FROM read_csv('{DATA_DIR}/csv/test_apple_financial.csv.gz') ---- 2024-06-19 2024-06-22 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 1 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL 2024-06-14 2024-06-14 1435055555 monthly_full_subscription Monthly Subscription NULL XXX XX 2 99.99 99.99 XXD 99.99 XXD S NULL NULL NULL diff --git a/test/sql/copy/csv/test_soccer_kaggle.test b/test/sql/copy/csv/test_soccer_kaggle.test index fc2e588089f1..d847b6e8b69b 100644 --- a/test/sql/copy/csv/test_soccer_kaggle.test +++ b/test/sql/copy/csv/test_soccer_kaggle.test @@ -6,6 +6,6 @@ statement ok PRAGMA enable_verification query I -select count(*) from 'data/csv/soccer_kaggle.csv' +select count(*) from '{DATA_DIR}/csv/soccer_kaggle.csv' ---- 14 diff --git a/test/sql/copy/csv/test_thijs_unquoted_file.test b/test/sql/copy/csv/test_thijs_unquoted_file.test index 64a2fc222192..6ff2eab851fa 100644 --- a/test/sql/copy/csv/test_thijs_unquoted_file.test +++ b/test/sql/copy/csv/test_thijs_unquoted_file.test @@ -7,12 +7,12 @@ PRAGMA enable_verification statement error -from read_csv('data/csv/thijs_unquoted.csv', quote='"', sep='|', escape='"', columns={'a':'varchar', 'b': 'varchar', 'c': 'integer'}, auto_detect=false); +from read_csv('{DATA_DIR}/csv/thijs_unquoted.csv', quote='"', sep='|', escape='"', columns={'a':'varchar', 'b': 'varchar', 'c': 'integer'}, auto_detect=false); ---- * Disable the parser's strict mode (strict_mode=false) to allow reading rows that do not comply with the CSV standard. query III -from read_csv('data/csv/thijs_unquoted.csv', quote='"', sep='|', escape='"', columns={'a':'varchar', 'b': 'varchar', 'c': 'integer'}, auto_detect=false, strict_mode = False); +from read_csv('{DATA_DIR}/csv/thijs_unquoted.csv', quote='"', sep='|', escape='"', columns={'a':'varchar', 'b': 'varchar', 'c': 'integer'}, auto_detect=false, strict_mode = False); ---- HYDRONIC GESELLSCHAFT FÜR WASSERTECHNIK MBH 2011 ANTON SONNENSCHUTZSYSTEME GESELLSCHAFT MIT BESCHRÄNKTER HAFTUN 2012 @@ -20,12 +20,12 @@ ENERGYS MAINTENANCE S 2015 SYSTEMAT BELGIUM S 2013 query II -SELECT DELIMITER, QUOTE FROM sniff_csv('data/csv/rabo-anon.csv.gz', strict_mode=FALSE) +SELECT DELIMITER, QUOTE FROM sniff_csv('{DATA_DIR}/csv/rabo-anon.csv.gz', strict_mode=FALSE) ---- , " statement ok -CREATE TABLE T AS FROM read_csv('data/csv/rabo-anon.csv.gz', strict_mode=FALSE); +CREATE TABLE T AS FROM read_csv('{DATA_DIR}/csv/rabo-anon.csv.gz', strict_mode=FALSE); query I select count(*) from T diff --git a/test/sql/copy/csv/test_thousands_separator.test b/test/sql/copy/csv/test_thousands_separator.test index 47bd4242fc97..64a6aee144af 100644 --- a/test/sql/copy/csv/test_thousands_separator.test +++ b/test/sql/copy/csv/test_thousands_separator.test @@ -7,39 +7,39 @@ PRAGMA enable_verification # Test NULL statement ok -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = NULL, delim = NULL) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = NULL, delim = NULL) # Test Empty statement ok -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = '', delim = NULL) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = '', delim = NULL) # Test more than one char statement error -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = 'bla', delim = NULL) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = 'bla', delim = NULL) ---- Unsupported parameter for THOUSANDS: should be max one character # Must be different from decimal separator statement error -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', decimal_separator = ',') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', decimal_separator = ',') ---- THOUSANDS must not appear in the DECIMAL_SEPARATOR specification and vice versa statement error -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = '.') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = '.') ---- THOUSANDS must not appear in the DECIMAL_SEPARATOR specification and vice versa # Test sniffer prompt query I SELECT COUNT(*) > 0 AS has_match -FROM sniff_csv('data/csv/thousands_separator/simple.csv', thousands = ',') +FROM sniff_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',') WHERE prompt LIKE '%thousands='',''%'; ---- TRUE query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'double'}, header = False, auto_detect = false) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'double'}, header = False, auto_detect = false) ---- 100000.0 300000.2 @@ -48,7 +48,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim 9999999999.2 query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'float'}, header = False, auto_detect = false) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'float'}, header = False, auto_detect = false) ---- 100000.0 300000.2 @@ -57,7 +57,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim 9999999999.2 query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'decimal(32,3)'}, header = False, auto_detect = false) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', columns = {'a':'decimal(32,3)'}, header = False, auto_detect = false) ---- 100000.0 300000.2 @@ -67,7 +67,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim # Now try with auto-detect query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False) ---- 100000.0 300000.2 @@ -76,7 +76,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim 9999999999.2 query I -FROM read_csv('data/csv/thousands_separator/simple_quoted.csv', thousands = ',', header = False) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple_quoted.csv', thousands = ',', header = False) ---- 100000.0 300000.2 @@ -86,7 +86,7 @@ FROM read_csv('data/csv/thousands_separator/simple_quoted.csv', thousands = ',', # Check auto-detect doesn't reject this is foat or decimal query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False, columns = {'a':'float'}) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False, columns = {'a':'float'}) ---- 100000.0 300000.2 @@ -95,7 +95,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim 9999999999.2 query I -FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False, columns = {'a':'decimal(32,3)'}) +FROM read_csv('{DATA_DIR}/csv/thousands_separator/simple.csv', thousands = ',', delim = ';', header = False, columns = {'a':'decimal(32,3)'}) ---- 100000.0 300000.2 @@ -104,7 +104,7 @@ FROM read_csv('data/csv/thousands_separator/simple.csv', thousands = ',', delim 9999999999.2 query III -FROM read_csv('data/csv/thousands_separator/multi_column.csv', thousands = ',') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/multi_column.csv', thousands = ',') ---- rick 100000.0 amsterdam lorenzo 300000.2 amsterdam @@ -114,7 +114,7 @@ patrick 9999999999.2 amsterdam query III -FROM read_csv('data/csv/thousands_separator/multi_column_quote.csv', thousands = ',') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/multi_column_quote.csv', thousands = ',') ---- rick 100000.0 amsterdam lorenzo 300000.2 amsterdam @@ -127,7 +127,7 @@ statement ok CREATE TABLE T (name varchar, money double, city varchar); statement ok -COPY T FROM 'data/csv/thousands_separator/multi_column_quote.csv' (THOUSANDS ',') ; +COPY T FROM '{DATA_DIR}/csv/thousands_separator/multi_column_quote.csv' (THOUSANDS ',') ; query III FROM T; @@ -140,7 +140,7 @@ patrick 9999999999.2 amsterdam # Test with non-sensical thousands separator - Pandas just removes them, so we do the same. query I -FROM read_csv('data/csv/thousands_separator/thousands_broken.csv', thousands = ',') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/thousands_broken.csv', thousands = ',') ---- 100000.0 300000.2 @@ -149,7 +149,7 @@ FROM read_csv('data/csv/thousands_separator/thousands_broken.csv', thousands = ' # Test an integer query I -FROM read_csv('data/csv/thousands_separator/integers.csv', thousands = ',') +FROM read_csv('{DATA_DIR}/csv/thousands_separator/integers.csv', thousands = ',') ---- 100000000000 140000000000 @@ -157,6 +157,6 @@ FROM read_csv('data/csv/thousands_separator/integers.csv', thousands = ',') 900 query I -select columns from sniff_csv('data/csv/thousands_separator/integers.csv', thousands = ',') +select columns from sniff_csv('{DATA_DIR}/csv/thousands_separator/integers.csv', thousands = ',') ---- [{'name': bigvalues, 'type': BIGINT}] diff --git a/test/sql/copy/csv/test_time.test b/test/sql/copy/csv/test_time.test index f1676cb61535..5d9ff4d1a7c8 100644 --- a/test/sql/copy/csv/test_time.test +++ b/test/sql/copy/csv/test_time.test @@ -7,23 +7,23 @@ PRAGMA enable_verification query I -from read_csv('data/csv/auto/invalid_time.csv', header = 0) +from read_csv('{DATA_DIR}/csv/auto/invalid_time.csv', header = 0) ---- 12:0 query I -from 'data/csv/auto/time.csv' +from '{DATA_DIR}/csv/auto/time.csv' ---- 05:40:00 21:30:00 17:45:00 query IIIII -from 'data/csv/auto/various_time_formats.csv' +from '{DATA_DIR}/csv/auto/various_time_formats.csv' ---- 12: 12:0 12:00:00 12:0: 12:00: query I -select columns from sniff_csv('data/csv/auto/various_time_formats.csv') +select columns from sniff_csv('{DATA_DIR}/csv/auto/various_time_formats.csv') ---- [{'name': column0, 'type': VARCHAR}, {'name': column1, 'type': VARCHAR}, {'name': column2, 'type': TIME}, {'name': column3, 'type': VARCHAR}, {'name': column4, 'type': VARCHAR}] diff --git a/test/sql/copy/csv/test_timestamp_offset.test b/test/sql/copy/csv/test_timestamp_offset.test index fe513f27d2bb..8fb4ae8c04dd 100644 --- a/test/sql/copy/csv/test_timestamp_offset.test +++ b/test/sql/copy/csv/test_timestamp_offset.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query II -SELECT col1, typeof(col1) FROM 'data/csv/test/timestampoffset.csv' t1(col1); +SELECT col1, typeof(col1) FROM '{DATA_DIR}/csv/test/timestampoffset.csv' t1(col1); ---- 2020-12-30 00:25:58.745232+00 TIMESTAMP WITH TIME ZONE 2020-12-30 01:25:58.745232+00 TIMESTAMP WITH TIME ZONE diff --git a/test/sql/copy/csv/test_timestamptz_12926.test b/test/sql/copy/csv/test_timestamptz_12926.test index 95aeb63cef76..7c79a639c40c 100644 --- a/test/sql/copy/csv/test_timestamptz_12926.test +++ b/test/sql/copy/csv/test_timestamptz_12926.test @@ -6,12 +6,12 @@ statement ok PRAGMA enable_verification statement error -FROM read_csv('data/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ]); +FROM read_csv('{DATA_DIR}/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ]); ---- Could not convert string "1/1/2020" to 'TIMESTAMP WITH TIME ZONE' query I -FROM read_csv('data/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ], timestampformat = '%d/%m/%Y'); +FROM read_csv('{DATA_DIR}/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ], timestampformat = '%d/%m/%Y'); ---- 2020-01-01 00:00:00+00 2020-01-01 00:00:00+00 @@ -22,12 +22,12 @@ statement ok CREATE TABLE test (column0 timestamptz); statement error -INSERT INTO test SELECT * FROM 'data/csv/timestamp_tz.csv'; +INSERT INTO test SELECT * FROM '{DATA_DIR}/csv/timestamp_tz.csv'; ---- Could not convert string "1/1/2020" to 'TIMESTAMP WITH TIME ZONE' statement ok -INSERT INTO test SELECT * FROM read_csv('data/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ], timestampformat = '%d/%m/%Y'); +INSERT INTO test SELECT * FROM read_csv('{DATA_DIR}/csv/timestamp_tz.csv', dtypes = [TIMESTAMPTZ], timestampformat = '%d/%m/%Y'); query I FROM test diff --git a/test/sql/copy/csv/test_union_by_name.test b/test/sql/copy/csv/test_union_by_name.test index 9cd8cca8875f..2cd29ced44bf 100644 --- a/test/sql/copy/csv/test_union_by_name.test +++ b/test/sql/copy/csv/test_union_by_name.test @@ -33,18 +33,18 @@ INSERT INTO ubn3 VALUES (100,101), (102, 103); # Write them to temporary files statement ok -COPY ubn1 TO '__TEST_DIR__/ubn1.csv' WITH ( DELIMITER ','); +COPY ubn1 TO '{TEMP_DIR}/ubn1.csv' WITH ( DELIMITER ','); statement ok -COPY ubn2 TO '__TEST_DIR__/ubn2.csv' WITH (DELIMITER ','); +COPY ubn2 TO '{TEMP_DIR}/ubn2.csv' WITH (DELIMITER ','); statement ok -COPY ubn3 TO '__TEST_DIR__/ubn3.csv' WITH (DELIMITER ','); +COPY ubn3 TO '{TEMP_DIR}/ubn3.csv' WITH (DELIMITER ','); # Read from them (the order matters) query I -SELECT * FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', '__TEST_DIR__/ubn3.csv']); +SELECT * FROM read_csv_auto(['{TEMP_DIR}/ubn1.csv', '{TEMP_DIR}/ubn2.csv', '{TEMP_DIR}/ubn3.csv']); ---- 1 2 @@ -56,7 +56,7 @@ SELECT * FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', ' query III SELECT a, b, c -FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', '__TEST_DIR__/ubn3.csv'], UNION_BY_NAME=TRUE) +FROM read_csv_auto(['{TEMP_DIR}/ubn1.csv', '{TEMP_DIR}/ubn2.csv', '{TEMP_DIR}/ubn3.csv'], UNION_BY_NAME=TRUE) ORDER BY a; ---- 1 NULL NULL @@ -68,8 +68,8 @@ ORDER BY a; 9223372036854775807 NULL NULL query IIII -SELECT a, b, c, replace(replace(filename, '__TEST_DIR__', ''), '\', '/')[2:] -FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', '__TEST_DIR__/ubn3.csv'], UNION_BY_NAME=TRUE) +SELECT a, b, c, replace(replace(filename, '{TEMP_DIR}', ''), '\', '/')[2:] +FROM read_csv_auto(['{TEMP_DIR}/ubn1.csv', '{TEMP_DIR}/ubn2.csv', '{TEMP_DIR}/ubn3.csv'], UNION_BY_NAME=TRUE) ORDER BY a; ---- 1 NULL NULL ubn1.csv @@ -82,26 +82,26 @@ ORDER BY a; query IIII SELECT COUNT(a), COUNT(b), COUNT(c), COUNT(filename) -FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', '__TEST_DIR__/ubn3.csv'], UNION_BY_NAME=TRUE) +FROM read_csv_auto(['{TEMP_DIR}/ubn1.csv', '{TEMP_DIR}/ubn2.csv', '{TEMP_DIR}/ubn3.csv'], UNION_BY_NAME=TRUE) ---- 7 2 2 7 query TTT SELECT typeof(a), typeof(b), typeof(c) -FROM read_csv_auto(['__TEST_DIR__/ubn1.csv', '__TEST_DIR__/ubn2.csv', '__TEST_DIR__/ubn3.csv'], UNION_BY_NAME=TRUE) +FROM read_csv_auto(['{TEMP_DIR}/ubn1.csv', '{TEMP_DIR}/ubn2.csv', '{TEMP_DIR}/ubn3.csv'], UNION_BY_NAME=TRUE) LIMIT 1; ---- BIGINT BIGINT BIGINT # Test timestamp type statement error -SELECT * FROM read_csv_auto(['data/csv/union-by-name/ubn1.csv', 'data/csv/union-by-name/ubn2.csv', 'data/csv/union-by-name/ubn3.csv', 'data/csv/union-by-name/ubn4.csv']) +SELECT * FROM read_csv_auto(['{DATA_DIR}/csv/union-by-name/ubn1.csv', '{DATA_DIR}/csv/union-by-name/ubn2.csv', '{DATA_DIR}/csv/union-by-name/ubn3.csv', '{DATA_DIR}/csv/union-by-name/ubn4.csv']) ---- is missing query IIIII SELECT a, b, c, ts, k -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY a, c, ts ---- NULL NULL 3 2003-06-30 12:03:10 6 @@ -120,7 +120,7 @@ test 88 NULL 2020-12-30 01:25:58.745232+01 NULL query TTTTT SELECT typeof(a), typeof(b), typeof(c), typeof(ts), typeof(k) -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) LIMIT 1; ---- VARCHAR BIGINT BIGINT VARCHAR BIGINT @@ -128,7 +128,7 @@ VARCHAR BIGINT BIGINT VARCHAR BIGINT # projection pushdown query II SELECT c, k -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY c NULLS LAST, k NULLS LAST ---- 3 6 @@ -148,7 +148,7 @@ NULL NULL # projection pushdown query I SELECT ts -FROM read_csv_auto('data/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn*.csv',UNION_BY_NAME=TRUE) ORDER BY ts NULLS LAST ---- 2003-06-30 12:03:10 @@ -166,27 +166,27 @@ NULL NULL query TTT -SELECT typeof(a), typeof(b), typeof(ts) FROM read_csv_auto('data/csv/union-by-name/ubn2.csv') LIMIT 1; +SELECT typeof(a), typeof(b), typeof(ts) FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn2.csv') LIMIT 1; ---- VARCHAR BIGINT TIMESTAMP WITH TIME ZONE # Test glob pattern [12] and union_by_name with filename options query IIIII -SELECT a, b, c, ts, replace(filename, '\', '/') -FROM read_csv_auto('data/csv/union-by-name/ubn[12].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) +SELECT a, b, c, ts, parse_filename(filename) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn[12].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) ORDER BY a, c, ts ---- -1 2 3 NULL data/csv/union-by-name/ubn1.csv -3 4 5 NULL data/csv/union-by-name/ubn1.csv -34fd321 91 NULL 2020-12-30 02:25:58.745232+00 data/csv/union-by-name/ubn2.csv -4 5 6 NULL data/csv/union-by-name/ubn1.csv -8cb123cb8 90 NULL 2020-12-30 01:25:58.745232+00 data/csv/union-by-name/ubn2.csv -fg5391jn4 92 NULL 2020-12-30 03:25:58.745232+00 data/csv/union-by-name/ubn2.csv -test 88 NULL 2020-12-30 00:25:58.745232+00 data/csv/union-by-name/ubn2.csv +1 2 3 NULL ubn1.csv +3 4 5 NULL ubn1.csv +34fd321 91 NULL 2020-12-30 02:25:58.745232+00 ubn2.csv +4 5 6 NULL ubn1.csv +8cb123cb8 90 NULL 2020-12-30 01:25:58.745232+00 ubn2.csv +fg5391jn4 92 NULL 2020-12-30 03:25:58.745232+00 ubn2.csv +test 88 NULL 2020-12-30 00:25:58.745232+00 ubn2.csv query TTTT SELECT typeof(a), typeof(b), typeof(c), typeof(ts) -FROM read_csv_auto('data/csv/union-by-name/ubn[12].csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn[12].csv',UNION_BY_NAME=TRUE) LIMIT 1 ---- VARCHAR BIGINT BIGINT TIMESTAMP WITH TIME ZONE @@ -195,33 +195,33 @@ VARCHAR BIGINT BIGINT TIMESTAMP WITH TIME ZONE # Test ubn[23] timestamp type casting query T SELECT typeof(ts) -FROM read_csv_auto('data/csv/union-by-name/ubn[23].csv',UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn[23].csv',UNION_BY_NAME=TRUE) LIMIT 1 ---- TIMESTAMP WITH TIME ZONE # Test glob pattern [!1-2] and union_by_name with filename options query IIII -SELECT k, c, ts, replace(filename, '\', '/') -FROM read_csv_auto('data/csv/union-by-name/ubn[!1-2].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) +SELECT k, c, ts, parse_filename(filename) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/ubn[!1-2].csv',FILENAME=TRUE ,UNION_BY_NAME=TRUE) ORDER BY c ---- -6 3 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -6 5 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -6 6 2003-06-30 12:03:10 data/csv/union-by-name/ubn3.csv -NULL 100 Monday data/csv/union-by-name/ubn4.csv -NULL 200 Sunday data/csv/union-by-name/ubn4.csv -NULL 300 Friday data/csv/union-by-name/ubn4.csv +6 3 2003-06-30 12:03:10 ubn3.csv +6 5 2003-06-30 12:03:10 ubn3.csv +6 6 2003-06-30 12:03:10 ubn3.csv +NULL 100 Monday ubn4.csv +NULL 200 Sunday ubn4.csv +NULL 300 Friday ubn4.csv # Test hive_partition with union_by_name statement error -SELECT * FROM read_csv_auto('data/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE, null_padding=0) +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE, null_padding=0) ---- If you are trying to read files with different schemas, try setting union_by_name=True query IIII SELECT id, value, a, part -FROM read_csv_auto('data/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) +FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/part=[ab]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) ORDER BY id ---- 1 value1 aaa a @@ -229,12 +229,12 @@ ORDER BY id # Test hive_partition with union_by_name statement error -SELECT * FROM read_csv_auto('data/csv/union-by-name/*[!a]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/union-by-name/*[!a]/*',HIVE_PARTITIONING=TRUE ,UNION_BY_NAME=TRUE) ---- # Test one thread per file CSV union by name where file only had header. query IIII -from read_csv('data/csv/union_by_name_2/*.csv', union_by_name=True, parallel = false); +from read_csv('{DATA_DIR}/csv/union_by_name_2/*.csv', union_by_name=True, parallel = false); ---- 1 1 1 NULL diff --git a/test/sql/copy/csv/test_union_by_name_types.test b/test/sql/copy/csv/test_union_by_name_types.test index b5c5b83609f3..9fd348cee488 100644 --- a/test/sql/copy/csv/test_union_by_name_types.test +++ b/test/sql/copy/csv/test_union_by_name_types.test @@ -7,13 +7,13 @@ PRAGMA enable_verification query IIIIIIIIIIIII SELECT typeof(#1), typeof(#2), typeof(#3), typeof(#4), typeof(#5), typeof(#6), typeof(#7), typeof(#8), typeof(#9), typeof(#10), typeof(#11), typeof(#12), typeof(#13) -FROM read_csv(['data/csv/union-by-name/gabor/Post/*.csv', 'data/csv/union-by-name/gabor/Comment/*.csv'], union_by_name=true) +FROM read_csv(['{DATA_DIR}/csv/union-by-name/gabor/Post/*.csv', '{DATA_DIR}/csv/union-by-name/gabor/Comment/*.csv'], union_by_name=true) LIMIT 1; ---- TIMESTAMP WITH TIME ZONE BIGINT VARCHAR VARCHAR VARCHAR VARCHAR VARCHAR BIGINT BIGINT BIGINT BIGINT BIGINT BIGINT query IIIIIIIIIIIII -FROM read_csv(['data/csv/union-by-name/gabor/Post/*.csv', 'data/csv/union-by-name/gabor/Comment/*.csv'], union_by_name=true) +FROM read_csv(['{DATA_DIR}/csv/union-by-name/gabor/Post/*.csv', '{DATA_DIR}/csv/union-by-name/gabor/Comment/*.csv'], union_by_name=true) ORDER BY ALL LIMIT 1; ---- diff --git a/test/sql/copy/csv/test_unquoted_csv.test_slow b/test/sql/copy/csv/test_unquoted_csv.test_slow index b5f53723247a..f6b0780f6f20 100644 --- a/test/sql/copy/csv/test_unquoted_csv.test_slow +++ b/test/sql/copy/csv/test_unquoted_csv.test_slow @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification statement ok -FROM read_csv('data/csv/click_mini.tsv.gz') +FROM read_csv('{DATA_DIR}/csv/click_mini.tsv.gz') statement ok -FROM read_csv('data/csv/click_mini.tsv.gz', quote = '') +FROM read_csv('{DATA_DIR}/csv/click_mini.tsv.gz', quote = '') diff --git a/test/sql/copy/csv/test_validator.test b/test/sql/copy/csv/test_validator.test index e44d97b23014..fa316f38359a 100644 --- a/test/sql/copy/csv/test_validator.test +++ b/test/sql/copy/csv/test_validator.test @@ -9,15 +9,15 @@ PRAGMA enable_verification require notwindows statement error -FROM read_csv('data/csv/evil_nullpadding.csv', buffer_size=20, quote = '"') +FROM read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', buffer_size=20, quote = '"') ---- The Parallel CSV Reader currently does not support a full read on this file. statement ok -FROM read_csv('data/csv/evil_nullpadding.csv', buffer_size=20) +FROM read_csv('{DATA_DIR}/csv/evil_nullpadding.csv', buffer_size=20) query I -FROM read_csv('data/csv/validator/single_column.csv', header = 0) +FROM read_csv('{DATA_DIR}/csv/validator/single_column.csv', header = 0) ---- 123 123 @@ -31,31 +31,31 @@ one 123 statement error -FROM read_csv('data/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false) +FROM read_csv('{DATA_DIR}/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false) ---- Error when converting column "a". Could not convert string "one" to 'INTEGER' statement error -FROM read_csv('data/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false, buffer_size = 11) +FROM read_csv('{DATA_DIR}/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false, buffer_size = 11) ---- Error when converting column "a". Could not convert string "one" to 'INTEGER' statement error -FROM read_csv('data/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false, buffer_size = 11, parallel = false) +FROM read_csv('{DATA_DIR}/csv/validator/single_column.csv', header = 0, columns = {'a': 'integer'}, auto_detect = false, buffer_size = 11, parallel = false) ---- Error when converting column "a". Could not convert string "one" to 'INTEGER' statement ok -FROM read_csv('data/csv/validator/quoted_new_value.csv') +FROM read_csv('{DATA_DIR}/csv/validator/quoted_new_value.csv') statement ok -FROM read_csv('data/csv/validator/quoted_new_value.csv', columns = {'band': 'varchar', 'album': 'varchar', 'release': 'varchar'}, quote = '''', delim = ';', header = 0) +FROM read_csv('{DATA_DIR}/csv/validator/quoted_new_value.csv', columns = {'band': 'varchar', 'album': 'varchar', 'release': 'varchar'}, quote = '''', delim = ';', header = 0) statement ok -FROM read_csv('data/csv/validator/quoted_new_value.csv', columns = {'band': 'varchar', 'album': 'varchar', 'release': 'varchar'}, quote = '''', delim = ';', header = 0, buffer_size = 48) +FROM read_csv('{DATA_DIR}/csv/validator/quoted_new_value.csv', columns = {'band': 'varchar', 'album': 'varchar', 'release': 'varchar'}, quote = '''', delim = ';', header = 0, buffer_size = 48) statement ok -FROM read_csv('data/csv/validator/single_column_quoted_newline.csv', columns = {'Raffaella Carrà': 'varchar'}, quote = '"', buffer_size = 24) +FROM read_csv('{DATA_DIR}/csv/validator/single_column_quoted_newline.csv', columns = {'Raffaella Carrà': 'varchar'}, quote = '"', buffer_size = 24) statement ok -FROM read_csv('data/csv/validator/single_column_notquoted_newline.csv', columns = {'Raffaella Carrà': 'varchar'}, quote = '"', buffer_size = 22) \ No newline at end of file +FROM read_csv('{DATA_DIR}/csv/validator/single_column_notquoted_newline.csv', columns = {'Raffaella Carrà': 'varchar'}, quote = '"', buffer_size = 22) \ No newline at end of file diff --git a/test/sql/copy/csv/test_web_page.test b/test/sql/copy/csv/test_web_page.test index 6081ce27ca81..26815e4c5989 100644 --- a/test/sql/copy/csv/test_web_page.test +++ b/test/sql/copy/csv/test_web_page.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE web_page(wp_web_page_sk integer not null, wp_web_page_id char(16) not null, wp_rec_start_date date, wp_rec_end_date date, wp_creation_date_sk integer, wp_access_date_sk integer, wp_autogen_flag char(1), wp_customer_sk integer, wp_url varchar(100), wp_type char(50), wp_char_count integer, wp_link_count integer, wp_image_count integer, wp_max_ad_count integer, primary key (wp_web_page_sk)); query I -COPY web_page FROM 'data/csv/real/web_page.csv' DELIMITER '|'; +COPY web_page FROM '{DATA_DIR}/csv/real/web_page.csv' DELIMITER '|'; ---- 60 @@ -21,7 +21,7 @@ SELECT * FROM web_page ORDER BY wp_web_page_sk LIMIT 3; 3 AAAAAAAACAAAAAAA 2000-09-03 NULL 2450814 2452611 N NULL http://www.foo.com feedback 1564 4 3 4 query I -COPY web_page TO '__TEST_DIR__/web_page.csv' DELIMITER ' ' HEADER; +COPY web_page TO '{TEMP_DIR}/web_page.csv' DELIMITER ' ' HEADER; ---- 60 @@ -35,7 +35,7 @@ SELECT * FROM web_page; # now copy back into the table query I -COPY web_page FROM '__TEST_DIR__/web_page.csv' DELIMITER ' ' HEADER; +COPY web_page FROM '{TEMP_DIR}/web_page.csv' DELIMITER ' ' HEADER; ---- 60 diff --git a/test/sql/copy/csv/test_windows_newline.test b/test/sql/copy/csv/test_windows_newline.test index 18d5bbb9942a..04bfa6143eb3 100644 --- a/test/sql/copy/csv/test_windows_newline.test +++ b/test/sql/copy/csv/test_windows_newline.test @@ -11,7 +11,7 @@ statement ok CREATE TABLE test (a INTEGER, b VARCHAR, c INTEGER); query I -COPY test FROM 'data/csv/test/windows_newline.csv'; +COPY test FROM '{DATA_DIR}/csv/test/windows_newline.csv'; ---- 20000 @@ -25,7 +25,7 @@ DELETE FROM test; # now do the same with a multi-byte quote that is not actually used statement error -COPY test FROM 'data/csv/test/windows_newline.csv' (QUOTE 'BLABLABLA', AUTO_DETECT FALSE); +COPY test FROM '{DATA_DIR}/csv/test/windows_newline.csv' (QUOTE 'BLABLABLA', AUTO_DETECT FALSE); ---- The quote option cannot exceed a size of 1 byte. @@ -38,7 +38,7 @@ statement ok CREATE TABLE test (a INTEGER); query I -COPY test FROM 'data/csv/test/windows_newline_empty.csv' (HEADER 0); +COPY test FROM '{DATA_DIR}/csv/test/windows_newline_empty.csv' (HEADER 0); ---- 20000 diff --git a/test/sql/copy/csv/test_wrong_newline_delimiter.test b/test/sql/copy/csv/test_wrong_newline_delimiter.test index 96f99eb28a3b..96cd3382cac6 100644 --- a/test/sql/copy/csv/test_wrong_newline_delimiter.test +++ b/test/sql/copy/csv/test_wrong_newline_delimiter.test @@ -6,21 +6,21 @@ statement ok PRAGMA enable_verification statement error -FROM read_csv('data/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\r', strict_mode=false) +FROM read_csv('{DATA_DIR}/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\r', strict_mode=false) ---- new_line = \r (Set By User) statement error -FROM read_csv('data/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\n', strict_mode=false) +FROM read_csv('{DATA_DIR}/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\n', strict_mode=false) ---- new_line = \n (Set By User) statement error -FROM read_csv('data/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\r\n', auto_detect = false, strict_mode=false) +FROM read_csv('{DATA_DIR}/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\r\n', auto_detect = false, strict_mode=false) ---- new_line = \r\n (Set By User) statement error -FROM read_csv('data/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\n\r', strict_mode=false) +FROM read_csv('{DATA_DIR}/csv/timestamp.csv', columns = {'a': 'BIGINT'}, new_line= '\n\r', strict_mode=false) ---- This is not accepted as a newline: \n\r \ No newline at end of file diff --git a/test/sql/copy/csv/timestamp_with_tz.test b/test/sql/copy/csv/timestamp_with_tz.test index d6c5cdd4398e..5103591bee8f 100644 --- a/test/sql/copy/csv/timestamp_with_tz.test +++ b/test/sql/copy/csv/timestamp_with_tz.test @@ -10,7 +10,7 @@ CREATE TABLE tbl(id int, ts timestamp); # this fails without ICU loaded statement error -COPY tbl FROM 'data/csv/timestamp_with_tz.csv' (HEADER) +COPY tbl FROM '{DATA_DIR}/csv/timestamp_with_tz.csv' (HEADER) ---- Error when converting column "ts". Could not convert string "2021-05-25 04:55:03.382494 EST" to 'TIMESTAMP' @@ -28,7 +28,7 @@ statement ok CREATE TABLE tbl_tz(id int, ts timestamptz); statement ok -COPY tbl_tz FROM 'data/csv/timestamp_with_tz.csv' (HEADER) +COPY tbl_tz FROM '{DATA_DIR}/csv/timestamp_with_tz.csv' (HEADER) statement ok SET TimeZone='UTC' @@ -40,7 +40,7 @@ SELECT * FROM tbl_tz 2 2021-05-25 09:55:03.382494+00 query III -FROM read_csv('data/csv/17705.csv') +FROM read_csv('{DATA_DIR}/csv/17705.csv') ---- Night 38615452 2022/01/27 11:04:57 PM Night 38615452 2022/01/27 11:04:57 PM diff --git a/test/sql/copy/csv/unicode_filename.test b/test/sql/copy/csv/unicode_filename.test index 48a3bc463dcf..4a1d4ae9d9f7 100644 --- a/test/sql/copy/csv/unicode_filename.test +++ b/test/sql/copy/csv/unicode_filename.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query II -SELECT * FROM 'data/csv/issue2628_中文.csv' +SELECT * FROM '{DATA_DIR}/csv/issue2628_中文.csv' ---- 1 1 2 2 @@ -14,7 +14,7 @@ SELECT * FROM 'data/csv/issue2628_中文.csv' # unicode glob query II -SELECT * FROM 'data/csv/*中文.csv' +SELECT * FROM '{DATA_DIR}/csv/*中文.csv' ---- 1 1 2 2 @@ -22,7 +22,7 @@ SELECT * FROM 'data/csv/*中文.csv' # unicode directory query II -SELECT * FROM 'data/csv/中文/*.csv' ORDER BY 1 +SELECT * FROM '{DATA_DIR}/csv/中文/*.csv' ORDER BY 1 ---- 1 1 2 2 @@ -30,7 +30,7 @@ SELECT * FROM 'data/csv/中文/*.csv' ORDER BY 1 4 4 query II -SELECT * FROM 'data/csv/中*/*.csv' ORDER BY 1 +SELECT * FROM '{DATA_DIR}/csv/中*/*.csv' ORDER BY 1 ---- 1 1 2 2 diff --git a/test/sql/copy/csv/unquoted_escape/basic.test b/test/sql/copy/csv/unquoted_escape/basic.test index f1fa644751d2..853e99417d3c 100644 --- a/test/sql/copy/csv/unquoted_escape/basic.test +++ b/test/sql/copy/csv/unquoted_escape/basic.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query IT -SELECT * FROM read_csv('data/csv/unquoted_escape/plain.csv', escape = '\', sep = ',', strict_mode = false, nullstr = '\N'); +SELECT * FROM read_csv('{DATA_DIR}/csv/unquoted_escape/plain.csv', escape = '\', sep = ',', strict_mode = false, nullstr = '\N'); ---- 0 \ 1 , @@ -36,7 +36,7 @@ loop buffer_size 10 25 # replace CRLF with LF to pass the test on Windows query I -SELECT bool_and(b = replace(s, E'\r\n', E'\n'))::int FROM special_char JOIN read_csv('data/csv/unquoted_escape/basic.tsv', quote = '', escape = '\', sep = '\t', strict_mode = false) t (i, s, j) ON i = a; +SELECT bool_and(b = replace(s, E'\r\n', E'\n'))::int FROM special_char JOIN read_csv('{DATA_DIR}/csv/unquoted_escape/basic.tsv', quote = '', escape = '\', sep = '\t', strict_mode = false) t (i, s, j) ON i = a; ---- 1 diff --git a/test/sql/copy/csv/unquoted_escape/identical.test b/test/sql/copy/csv/unquoted_escape/identical.test index 5e1ae94dc85f..03aae9ab35dc 100644 --- a/test/sql/copy/csv/unquoted_escape/identical.test +++ b/test/sql/copy/csv/unquoted_escape/identical.test @@ -6,7 +6,7 @@ statement ok PRAGMA enable_verification query TT -SELECT concat('#', columns(*), '#') FROM read_csv('data/csv/unquoted_escape/identical.csv', quote = '"', escape = '"', sep = ',', strict_mode = false); +SELECT concat('#', columns(*), '#') FROM read_csv('{DATA_DIR}/csv/unquoted_escape/identical.csv', quote = '"', escape = '"', sep = ',', strict_mode = false); ---- ## #a""b# #c""d# ## diff --git a/test/sql/copy/csv/unquoted_escape/mixed.test b/test/sql/copy/csv/unquoted_escape/mixed.test index 5b83d49d48b5..4ddbc03f9dda 100644 --- a/test/sql/copy/csv/unquoted_escape/mixed.test +++ b/test/sql/copy/csv/unquoted_escape/mixed.test @@ -11,6 +11,6 @@ SELECT hamming(replace(string_agg(w, '|' ORDER BY y), E'\r\n', E'\n'), E'\\|,|"|\n'), hamming(string_agg(z, '|' ORDER BY y), '"|"a"|"b|c"'), bool_and(x = concat(w, '"', w))::int -FROM read_csv('data/csv/unquoted_escape/mixed.csv', quote = '"', escape = '\', sep = ',', strict_mode = false); +FROM read_csv('{DATA_DIR}/csv/unquoted_escape/mixed.csv', quote = '"', escape = '\', sep = ',', strict_mode = false); ---- 0 0 1 \ No newline at end of file diff --git a/test/sql/copy/csv/zstd_crash.test b/test/sql/copy/csv/zstd_crash.test index f2b957f2aedf..bd6e2a050771 100644 --- a/test/sql/copy/csv/zstd_crash.test +++ b/test/sql/copy/csv/zstd_crash.test @@ -9,7 +9,7 @@ require no_extension_autoloading "EXPECTED: zstd requires the parquet extension, # zstd requires the parquet extension statement error -CREATE TABLE test_zst AS SELECT * FROM read_csv('data/csv/broken/test.csv.zst', AUTO_DETECT=TRUE); +CREATE TABLE test_zst AS SELECT * FROM read_csv('{DATA_DIR}/csv/broken/test.csv.zst', AUTO_DETECT=TRUE); ---- Attempting to open a compressed file, but the compression type is not supported @@ -18,27 +18,27 @@ CREATE TABLE test_zst(a INTEGER, b INTEGER, c INTEGER, d VARCHAR, e VARCHAR); # what if we try to load this with random other compressions statement error -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION ZSTD); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION ZSTD); ---- Attempting to open a compressed file, but the compression type is not supported statement error -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION GZIP); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION GZIP); ---- -Input is not a GZIP stream: data/csv/broken/test.csv.zst +Input is not a GZIP stream: {DATA_DIR}/csv/broken/test.csv.zst statement error -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION NONE); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION NONE); ---- * Check you are using the correct file compression, otherwise set it (e.g., compression = 'zstd') statement error -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION INFER); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION INFER); ---- Attempting to open a compressed file, but the compression type is not supported statement error -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION UNKNOWN); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION UNKNOWN); ---- Unrecognized file compression type "UNKNOWN" @@ -46,20 +46,20 @@ Unrecognized file compression type "UNKNOWN" require parquet statement ok -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION ZSTD, HEADER); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION ZSTD, HEADER); statement ok -COPY test_zst FROM 'data/csv/broken/test.csv.zst' (COMPRESSION ZSTD, AUTO_DETECT 1); +COPY test_zst FROM '{DATA_DIR}/csv/broken/test.csv.zst' (COMPRESSION ZSTD, AUTO_DETECT 1); # what if we try to load a gzip file with zstd statement error -COPY test_zst FROM 'data/csv/lineitem1k.tbl.gz' (COMPRESSION ZSTD); +COPY test_zst FROM '{DATA_DIR}/csv/lineitem1k.tbl.gz' (COMPRESSION ZSTD); ---- Unknown frame descriptor # we can read/write a ZSTD file also without the extension if we specify the compression type statement ok -COPY test_zst TO '__TEST_DIR__/noext.csv' (COMPRESSION ZSTD); +COPY test_zst TO '{TEMP_DIR}/noext.csv' (COMPRESSION ZSTD); statement ok -COPY test_zst FROM '__TEST_DIR__/noext.csv' (COMPRESSION ZSTD); +COPY test_zst FROM '{TEMP_DIR}/noext.csv' (COMPRESSION ZSTD); diff --git a/test/sql/copy/csv/zstd_fs.test b/test/sql/copy/csv/zstd_fs.test index 08f3dc205435..c23d1f5cddf1 100644 --- a/test/sql/copy/csv/zstd_fs.test +++ b/test/sql/copy/csv/zstd_fs.test @@ -31,7 +31,7 @@ CREATE TABLE lineitem(l_orderkey INT NOT NULL, l_comment VARCHAR(44) NOT NULL); statement ok -COPY lineitem FROM 'data/csv/zstd/lineitem1k.tbl.zst' DELIMITER '|'; +COPY lineitem FROM '{DATA_DIR}/csv/zstd/lineitem1k.tbl.zst' DELIMITER '|'; query I SELECT COUNT(*) FROM lineitem @@ -53,7 +53,7 @@ statement ok CREATE TABLE IF NOT EXISTS ncvoters(county_id INTEGER, county_desc STRING, voter_reg_num STRING,status_cd STRING, voter_status_desc STRING, reason_cd STRING, voter_status_reason_desc STRING, absent_ind STRING, name_prefx_cd STRING,last_name STRING, first_name STRING, midl_name STRING, name_sufx_cd STRING, full_name_rep STRING,full_name_mail STRING, house_num STRING, half_code STRING, street_dir STRING, street_name STRING, street_type_cd STRING, street_sufx_cd STRING, unit_designator STRING, unit_num STRING, res_city_desc STRING,state_cd STRING, zip_code STRING, res_street_address STRING, res_city_state_zip STRING, mail_addr1 STRING, mail_addr2 STRING, mail_addr3 STRING, mail_addr4 STRING, mail_city STRING, mail_state STRING, mail_zipcode STRING, mail_city_state_zip STRING, area_cd STRING, phone_num STRING, full_phone_number STRING, drivers_lic STRING, race_code STRING, race_desc STRING, ethnic_code STRING, ethnic_desc STRING, party_cd STRING, party_desc STRING, sex_code STRING, sex STRING, birth_age STRING, birth_place STRING, registr_dt STRING, precinct_abbrv STRING, precinct_desc STRING,municipality_abbrv STRING, municipality_desc STRING, ward_abbrv STRING, ward_desc STRING, cong_dist_abbrv STRING, cong_dist_desc STRING, super_court_abbrv STRING, super_court_desc STRING, judic_dist_abbrv STRING, judic_dist_desc STRING, nc_senate_abbrv STRING, nc_senate_desc STRING, nc_house_abbrv STRING, nc_house_desc STRING,county_commiss_abbrv STRING, county_commiss_desc STRING, township_abbrv STRING, township_desc STRING,school_dist_abbrv STRING, school_dist_desc STRING, fire_dist_abbrv STRING, fire_dist_desc STRING, water_dist_abbrv STRING, water_dist_desc STRING, sewer_dist_abbrv STRING, sewer_dist_desc STRING, sanit_dist_abbrv STRING, sanit_dist_desc STRING, rescue_dist_abbrv STRING, rescue_dist_desc STRING, munic_dist_abbrv STRING, munic_dist_desc STRING, dist_1_abbrv STRING, dist_1_desc STRING, dist_2_abbrv STRING, dist_2_desc STRING, confidential_ind STRING, age STRING, ncid STRING, vtd_abbrv STRING, vtd_desc STRING); query I -COPY ncvoters FROM 'data/csv/zstd/ncvoter.csv.zst' DELIMITER ' '; +COPY ncvoters FROM '{DATA_DIR}/csv/zstd/ncvoter.csv.zst' DELIMITER ' '; ---- 10 diff --git a/test/sql/copy/encryption/concurrent_encrypted_attach.test b/test/sql/copy/encryption/concurrent_encrypted_attach.test new file mode 100644 index 000000000000..b6d14b326594 --- /dev/null +++ b/test/sql/copy/encryption/concurrent_encrypted_attach.test @@ -0,0 +1,11 @@ +# name: test/sql/copy/encryption/concurrent_encrypted_attach.test +# group: [encryption] + +require httpfs + +concurrentloop i 0 10 + +statement ok +ATTACH '__TEST_DIR__/concurrent_encrypted${i}.duckdb' AS encrypted${i} (ENCRYPTION_KEY 'asdf${i}'); + +endloop diff --git a/test/sql/copy/encryption/different_aes_ciphers.test b/test/sql/copy/encryption/different_aes_ciphers.test index ca147d7644f8..339611744058 100644 --- a/test/sql/copy/encryption/different_aes_ciphers.test +++ b/test/sql/copy/encryption/different_aes_ciphers.test @@ -4,6 +4,9 @@ statement ok PRAGMA enable_verification +# We need httpfs to do encrypted writes +require httpfs + statement error ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY ''); ---- @@ -59,17 +62,11 @@ FROM encrypted.fuu statement ok DETACH encrypted -# or open it without specifying the cipher, it will be read from file -statement ok +# opening the database without cipher for CTR is not possible for security reasons +statement error ATTACH '__TEST_DIR__/encrypted_default_cipher.duckdb' AS encrypted (ENCRYPTION_KEY 'asdf'); - -query I -FROM encrypted.fuu ---- -42 - -statement ok -DETACH encrypted +Catalog Error: Cannot open encrypted database # but it will fail if we specify the wrong one statement error diff --git a/test/sql/copy/encryption/encrypted_to_unencrypted.test_slow b/test/sql/copy/encryption/encrypted_to_unencrypted.test_slow index 751b16a0686f..1cfa19a5a006 100644 --- a/test/sql/copy/encryption/encrypted_to_unencrypted.test_slow +++ b/test/sql/copy/encryption/encrypted_to_unencrypted.test_slow @@ -5,6 +5,9 @@ require skip_reload require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification diff --git a/test/sql/copy/encryption/encryption_storage_versions.test b/test/sql/copy/encryption/encryption_storage_versions.test index 62467fd5e48b..39f33ee27c09 100644 --- a/test/sql/copy/encryption/encryption_storage_versions.test +++ b/test/sql/copy/encryption/encryption_storage_versions.test @@ -1,6 +1,9 @@ # name: test/sql/copy/encryption/encryption_storage_versions.test # group: [encryption] +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification @@ -140,4 +143,4 @@ SELECT SUM(i) FROM unencrypted_v_1_2_0.tbl; query I SELECT SUM(i) FROM unencrypted_new.tbl; ---- -45 \ No newline at end of file +45 diff --git a/test/sql/copy/encryption/multiple_encrypted_databases.test_slow b/test/sql/copy/encryption/multiple_encrypted_databases.test_slow index 2ada71319ec4..6c03124f0992 100644 --- a/test/sql/copy/encryption/multiple_encrypted_databases.test_slow +++ b/test/sql/copy/encryption/multiple_encrypted_databases.test_slow @@ -5,6 +5,9 @@ require skip_reload require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification diff --git a/test/sql/copy/encryption/reencrypt.test_slow b/test/sql/copy/encryption/reencrypt.test_slow index 1974e3e915e7..44cc1afb1a89 100644 --- a/test/sql/copy/encryption/reencrypt.test_slow +++ b/test/sql/copy/encryption/reencrypt.test_slow @@ -5,6 +5,9 @@ require skip_reload require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification diff --git a/test/sql/copy/encryption/tpch_sf1_encrypted.test_slow b/test/sql/copy/encryption/tpch_sf1_encrypted.test_slow index 47c5458026d1..4946541fabf0 100644 --- a/test/sql/copy/encryption/tpch_sf1_encrypted.test_slow +++ b/test/sql/copy/encryption/tpch_sf1_encrypted.test_slow @@ -4,6 +4,9 @@ require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok pragma verify_external @@ -47,4 +50,4 @@ PRAGMA tpch(${i}) ---- :extension/tpch/dbgen/answers/sf1/q${i}.csv -endloop \ No newline at end of file +endloop diff --git a/test/sql/copy/encryption/unencrypted_to_encrypted.test b/test/sql/copy/encryption/unencrypted_to_encrypted.test index d79741decf88..19526e6fc549 100644 --- a/test/sql/copy/encryption/unencrypted_to_encrypted.test +++ b/test/sql/copy/encryption/unencrypted_to_encrypted.test @@ -5,6 +5,9 @@ require skip_reload require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification diff --git a/test/sql/copy/encryption/unencrypted_to_encrypted_direct_query.test b/test/sql/copy/encryption/unencrypted_to_encrypted_direct_query.test index f77f8db63384..08ed9ea3e285 100644 --- a/test/sql/copy/encryption/unencrypted_to_encrypted_direct_query.test +++ b/test/sql/copy/encryption/unencrypted_to_encrypted_direct_query.test @@ -5,6 +5,9 @@ require skip_reload require tpch +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification @@ -26,4 +29,4 @@ COPY FROM DATABASE unencrypted to encrypted; query I SELECT SUM(i) FROM encrypted.tbl; ---- -45 \ No newline at end of file +45 diff --git a/test/sql/copy/encryption/write_encrypted_database.test b/test/sql/copy/encryption/write_encrypted_database.test index f7f9508fba7f..a2124925f019 100644 --- a/test/sql/copy/encryption/write_encrypted_database.test +++ b/test/sql/copy/encryption/write_encrypted_database.test @@ -3,6 +3,9 @@ require skip_reload +# We need httpfs to do encrypted writes +require httpfs + statement ok PRAGMA enable_verification @@ -35,4 +38,4 @@ ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY 'asdf'); query I SELECT SUM(i) FROM encrypted.tbl ---- -45 \ No newline at end of file +45 diff --git a/test/sql/copy/hive_types.test_slow b/test/sql/copy/hive_types.test_slow index 71adc1ddfc76..0e8c16bfe900 100644 --- a/test/sql/copy/hive_types.test_slow +++ b/test/sql/copy/hive_types.test_slow @@ -2,7 +2,7 @@ # description: basic tests hive_types flag + hive_types_autocast flag # group: [copy] -# dir: data/csv/hive-partitioning/hive_types/ +# dir: {DATA_DIR}/csv/hive-partitioning/hive_types/ require parquet @@ -10,47 +10,47 @@ statement ok PRAGMA enable_verification statement ok -copy 'data/csv/hive-partitioning/hive_types/himym.csv' to '__TEST_DIR__/partition' (format parquet, partition_by(season,director,aired)); +copy '{DATA_DIR}/csv/hive-partitioning/hive_types/himym.csv' to '{TEMP_DIR}/partition' (format parquet, partition_by(season,director,aired)); statement error -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=0, hive_types={'season':smallint}) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=0, hive_types={'season':smallint}) limit 1; ---- Invalid Input Error: cannot disable hive_partitioning # basic tests with hive_types query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=1, hive_types={'season':'smallint','director':'varchar','aired':'date'}) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=1, hive_types={'season':'smallint','director':'varchar','aired':'date'}) limit 1; ---- SMALLINT VARCHAR DATE query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types={'season':smallint,'director':varchar,'aired':date}) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types={'season':smallint,'director':varchar,'aired':date}) limit 1; ---- SMALLINT VARCHAR DATE statement error -select season,director,aired from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types={'season':date}) limit 1; +select season,director,aired from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types={'season':date}) limit 1; ---- Invalid Input Error: Unable to cast statement error -from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types={'seasons':smallint}); +from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types={'seasons':smallint}); ---- Invalid Input Error: Unknown hive_type query I -select typeof(season) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types={season:smallint}) limit 1; +select typeof(season) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types={season:smallint}) limit 1; ---- SMALLINT statement error -from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types=true); +from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types=true); ---- Invalid Input Error: 'hive_types' only accepts a STRUCT statement error -from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types={season:-42}); +from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types={season:-42}); ---- Invalid Input Error: hive_types: 'season' must be a VARCHAR, instead: 'INTEGER' was provided @@ -59,41 +59,41 @@ Invalid Input Error: hive_types: 'season' must be a VARCHAR, instead: 'INTEGER' # when hive_partitioning=0, data won't be read from directory names unless partition columns are written to files. statement error -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=0) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=0) limit 1; ---- Binder Error: Referenced column "season" not found in FROM clause! query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=1, hive_types_autocast=0) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=1, hive_types_autocast=0) limit 1; ---- VARCHAR VARCHAR VARCHAR query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=1, hive_types_autocast=1) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=1, hive_types_autocast=1) limit 1; ---- BIGINT VARCHAR DATE query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_partitioning=1) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_partitioning=1) limit 1; ---- BIGINT VARCHAR DATE query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet', hive_types_autocast=0) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet', hive_types_autocast=0) limit 1; ---- VARCHAR VARCHAR VARCHAR query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition/**/*.parquet') limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition/**/*.parquet') limit 1; ---- BIGINT VARCHAR DATE # hive types mix statement ok -copy (select 1 AS a, 1 AS b, '123' AS partition UNION ALL SELECT 2, 2, '1992-01-01' UNION ALL SELECT 3, 3, 'abc') TO '__TEST_DIR__/partition_types' (FORMAT PARQUET, PARTITION_BY(partition)); +copy (select 1 AS a, 1 AS b, '123' AS partition UNION ALL SELECT 2, 2, '1992-01-01' UNION ALL SELECT 3, 3, 'abc') TO '{TEMP_DIR}/partition_types' (FORMAT PARQUET, PARTITION_BY(partition)); query III -SELECT * FROM '__TEST_DIR__/partition_types/**/*.parquet' ORDER BY 1 +SELECT * FROM '{TEMP_DIR}/partition_types/**/*.parquet' ORDER BY 1 ---- 1 1 123 2 2 1992-01-01 @@ -101,31 +101,31 @@ SELECT * FROM '__TEST_DIR__/partition_types/**/*.parquet' ORDER BY 1 # explicit overwrite statement error -select * from read_parquet('__TEST_DIR__/partition_types/**/*.parquet', hive_types={'partition':smallint}) +select * from read_parquet('{TEMP_DIR}/partition_types/**/*.parquet', hive_types={'partition':smallint}) ---- Unable to cast # Complex filter filtering first file, filter should be pruned completely if hive_partitioning=1 statement error -explain from parquet_scan('__TEST_DIR__/partition/**/*.parquet', HIVE_PARTITIONING=0, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; +explain from parquet_scan('{TEMP_DIR}/partition/**/*.parquet', HIVE_PARTITIONING=0, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; ---- Binder Error: Referenced column "aired" not found in FROM clause! query II -explain (FORMAT JSON) from parquet_scan('__TEST_DIR__/partition/**/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; +explain (FORMAT JSON) from parquet_scan('{TEMP_DIR}/partition/**/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; ---- physical_plan :.*(PARQUET_SCAN.*File Filters": "\(aired \<).* # When partition columns are written, partition columns can be read even with HIVE_PARTITIONING=0 statement ok -copy 'data/csv/hive-partitioning/hive_types/himym.csv' to '__TEST_DIR__/partition-written' (format parquet, partition_by(season,director,aired), write_partition_columns); +copy '{DATA_DIR}/csv/hive-partitioning/hive_types/himym.csv' to '{TEMP_DIR}/partition-written' (format parquet, partition_by(season,director,aired), write_partition_columns); query III -select typeof(season),typeof(director),typeof(aired) from read_parquet('__TEST_DIR__/partition-written/**/*.parquet', hive_partitioning=0) limit 1; +select typeof(season),typeof(director),typeof(aired) from read_parquet('{TEMP_DIR}/partition-written/**/*.parquet', hive_partitioning=0) limit 1; ---- BIGINT VARCHAR DATE query II -explain from parquet_scan('__TEST_DIR__/partition-written/**/*.parquet', HIVE_PARTITIONING=0, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; +explain from parquet_scan('{TEMP_DIR}/partition-written/**/*.parquet', HIVE_PARTITIONING=0, HIVE_TYPES_AUTOCAST=0) where aired < '2006-1-1'; ---- physical_plan :.*PARQUET_SCAN.*Filters:.*aired.* diff --git a/test/sql/copy/parquet/afl.test b/test/sql/copy/parquet/afl.test index 8b8e05b798c1..3c4e4dea87f2 100644 --- a/test/sql/copy/parquet/afl.test +++ b/test/sql/copy/parquet/afl.test @@ -12,7 +12,7 @@ PRAGMA enable_verification foreach i 1 2 6 statement error -select * from parquet_scan('data/parquet-testing/afl/${i}.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/afl/${i}.parquet') ---- Invalid dictionary page header @@ -23,7 +23,7 @@ foreach i 3 4 5 7 statement error -select * from parquet_scan('data/parquet-testing/afl/3.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/afl/3.parquet') ---- Invalid Error: Parquet file is likely corrupted, missing dictionary diff --git a/test/sql/copy/parquet/alltypes-dictionaries.test b/test/sql/copy/parquet/alltypes-dictionaries.test index a2bf081f9ad9..b68922068075 100644 --- a/test/sql/copy/parquet/alltypes-dictionaries.test +++ b/test/sql/copy/parquet/alltypes-dictionaries.test @@ -3,6 +3,9 @@ require parquet +# +# do tests with IMPLICIT parquet_version, (default is v1) +# foreach type TINYINT SMALLINT INTEGER BIGINT HUGEINT UTINYINT USMALLINT UINTEGER UBIGINT UHUGEINT FLOAT DOUBLE VARCHAR @@ -12,7 +15,7 @@ copy (select (r1.range * 10)::${type} r from range(10) r1, range(1000) r2) to '_ query I select first(encodings) from parquet_metadata('__TEST_DIR__/dict-${type}.parquet') group by encodings; ---- -RLE_DICTIONARY +PLAIN_DICTIONARY query I SELECT COUNT(*) from '__TEST_DIR__/dict-${type}.parquet' WHERE r='20' @@ -29,4 +32,66 @@ select column_id, BOOL_AND(bloom_filter_offset > 4), BOOL_AND(bloom_filter_lengt #---- #true +endloop + +# +# same tests with EXPLICIT parquet_version v1 +# + +foreach type TINYINT SMALLINT INTEGER BIGINT HUGEINT UTINYINT USMALLINT UINTEGER UBIGINT UHUGEINT FLOAT DOUBLE VARCHAR + +statement ok +copy (select (r1.range * 10)::${type} r from range(10) r1, range(1000) r2) to '__TEST_DIR__/dict-${type}-v1.parquet' (row_group_size 2048, parquet_version v1); + +query I +select first(encodings) from parquet_metadata('__TEST_DIR__/dict-${type}-v1.parquet') group by encodings; +---- +PLAIN_DICTIONARY + +query I +SELECT COUNT(*) from '__TEST_DIR__/dict-${type}-v1.parquet' WHERE r='20' +---- +1000 + +query III +select column_id, BOOL_AND(bloom_filter_offset > 4), BOOL_AND(bloom_filter_length > 1) from parquet_metadata('__TEST_DIR__/dict-${type}-v1.parquet') group by column_id order by column_id; +---- +0 true true + +#query I +#SELECT bloom_filter_excludes FROM parquet_bloom_probe('__TEST_DIR__/dict-${type}-v1.parquet', 'r', '11'); +#---- +#true + +endloop + +# +# same tests with EXPLICIT parquet_version v2 +# + +foreach type TINYINT SMALLINT INTEGER BIGINT HUGEINT UTINYINT USMALLINT UINTEGER UBIGINT UHUGEINT FLOAT DOUBLE VARCHAR + +statement ok +copy (select (r1.range * 10)::${type} r from range(10) r1, range(1000) r2) to '__TEST_DIR__/dict-${type}-v2.parquet' (row_group_size 2048, parquet_version v2); + +query I +select first(encodings) from parquet_metadata('__TEST_DIR__/dict-${type}-v2.parquet') group by encodings; +---- +RLE_DICTIONARY + +query I +SELECT COUNT(*) from '__TEST_DIR__/dict-${type}-v2.parquet' WHERE r='20' +---- +1000 + +query III +select column_id, BOOL_AND(bloom_filter_offset > 4), BOOL_AND(bloom_filter_length > 1) from parquet_metadata('__TEST_DIR__/dict-${type}-v2.parquet') group by column_id order by column_id; +---- +0 true true + +#query I +#SELECT bloom_filter_excludes FROM parquet_bloom_probe('__TEST_DIR__/dict-${type}-v1.parquet', 'r', '11'); +#---- +#true + endloop \ No newline at end of file diff --git a/test/sql/copy/parquet/aws2.test b/test/sql/copy/parquet/aws2.test index dbe05394bab2..cebbf8f8a28b 100644 --- a/test/sql/copy/parquet/aws2.test +++ b/test/sql/copy/parquet/aws2.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query I -SELECT * FROM 'data/parquet-testing/aws2.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/aws2.parquet' ---- READY diff --git a/test/sql/copy/parquet/aws_kinesis.test b/test/sql/copy/parquet/aws_kinesis.test index 7638a703e6c7..774bc49981e9 100644 --- a/test/sql/copy/parquet/aws_kinesis.test +++ b/test/sql/copy/parquet/aws_kinesis.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM 'data/parquet-testing/aws_kinesis.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/aws_kinesis.parquet' ---- 2022 11 22 2022-11-22 00:01:00.871 2022-11-22 00:01:01 -129 Hamburg NULL Germany 53.6304 9.98823 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL http://localhost:3000/ / t=tr&ts=1669075260871&u=http%253A%252F%252Flocalhost%253A3000%252F&hn=localhost&pa=%252F&en=tabVisible&pr=%257B%257D 495 200 Hit 0 3320 NULL tabVisible NULL NULL NULL de3bc04229406da23ee45e234a42a66cc542b335517ab585ca43f55cd2dcf781 3bc2c0a60f9f2dd212db07ed80e817f9dd43aa999d16b0b2b8db91ab092a8102 ab40d20596d7595049399578929ffc598abdb8f539bdfa7637cb509f8613dcc7 track 2022 11 22 2022-11-22 00:01:07.67 2022-11-22 00:01:10 -2330 Hamburg NULL Germany 53.6304 9.98823 NULL NULL Chrome 107.0.0.0 Mac OS 10.15.7 Europe/Berlin de-DE NULL NULL 3440x1440 1356x902 24.0 MacIntel 8.0 8.0 NULL NULL NULL NULL NULL http://localhost:3000/azure /azure t=pv&ts=1669075267670&u=http%253A%252F%252Flocalhost%253A3000%252Fazure&hn=localhost&pa=%252Fazure&ua=Mozilla%252F5.0%2520(Macintosh%253B%2520Intel%2520Mac%2520OS%2520X%252010_15_7)%2520AppleWebKit%252F537.36%2520(KHTML%252C%2520like%2520Gecko)%2520Chrome%252F107.0.0.0%2520Safari%252F537.36&iw=1356&ih=902&ti=Map%2520the%2520Cloud&w=3440&h=1440&d=24&l=de-DE&p=MacIntel&m=8&c=8&tz=Europe%252FBerlin 291 200 Hit 1 3320 NULL NULL NULL NULL NULL cb7736f1c3ce9b9a21bb9a7b17edfd9507e7c6261ded2a8ebc7f19189d6de8c6 4e5aea60a9b73aa0f2aa69967e9de9a58b3341d5bc342f94d2eef07859b658da ab40d20596d7595049399578929ffc598abdb8f539bdfa7637cb509f8613dcc7 pageview @@ -17,6 +17,6 @@ SELECT * FROM 'data/parquet-testing/aws_kinesis.parquet' query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM 'data/parquet-testing/aws_kinesis.parquet' WHERE event_timestamp=TIMESTAMP '2022-11-22 00:01:13.175'; +SELECT * FROM '{DATA_DIR}/parquet-testing/aws_kinesis.parquet' WHERE event_timestamp=TIMESTAMP '2022-11-22 00:01:13.175'; ---- 2022 11 22 2022-11-22 00:01:13.175 2022-11-22 00:01:16 -2825 Hamburg NULL Germany 53.6304 9.98823 NULL NULL Chrome 107.0.0.0 Mac OS 10.15.7 Europe/Berlin de-DE NULL NULL 3440x1440 1356x902 24.0 MacIntel 8.0 8.0 NULL NULL NULL NULL NULL http://localhost:3000/aws /aws t=pv&ts=1669075273175&u=http%253A%252F%252Flocalhost%253A3000%252Faws&hn=localhost&pa=%252Faws&ua=Mozilla%252F5.0%2520(Macintosh%253B%2520Intel%2520Mac%2520OS%2520X%252010_15_7)%2520AppleWebKit%252F537.36%2520(KHTML%252C%2520like%2520Gecko)%2520Chrome%252F107.0.0.0%2520Safari%252F537.36&iw=1356&ih=902&ti=Map%2520the%2520Cloud%2520-%2520Azure%2520Services%2520%2526%2520Regions&w=3440&h=1440&d=24&l=de-DE&p=MacIntel&m=8&c=8&tz=Europe%252FBerlin 315 200 Hit 0 3320 NULL NULL NULL NULL NULL 4326dbc4bbfbef6aec0584b3d6437625551ab22323ed0f81ff79ab54bcfb97db cf7ee5dae81cbe75b0e78aaf200b8f1fb93349dc33fe65d3623df79ff31c53fd ab40d20596d7595049399578929ffc598abdb8f539bdfa7637cb509f8613dcc7 pageview diff --git a/test/sql/copy/parquet/bigdecimal.test b/test/sql/copy/parquet/bigdecimal.test index d2aff96208f5..d6f36c35cef2 100644 --- a/test/sql/copy/parquet/bigdecimal.test +++ b/test/sql/copy/parquet/bigdecimal.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query I -FROM 'data/parquet-testing/bigdecimal.parquet' +FROM '{DATA_DIR}/parquet-testing/bigdecimal.parquet' ---- 0.5 -0.5 diff --git a/test/sql/copy/parquet/bloom_filters.test b/test/sql/copy/parquet/bloom_filters.test index 1c40e009963d..b37cf6c147d5 100644 --- a/test/sql/copy/parquet/bloom_filters.test +++ b/test/sql/copy/parquet/bloom_filters.test @@ -273,21 +273,31 @@ from range(100) r1, range(100) order by r) to '__TEST_DIR__/bloom9.parquet' (for ---- Binder Error -# test some repeated large strings -# this should give dictionary +# test some repeated large strings (with default parquet_version v1) +# this should give PLAIN_DICTIONARY dictionary statement ok copy (select repeat('abc', 500_000) || (range % 10) s from range(100)) to '__TEST_DIR__/my.parquet'; +query I +select encodings from parquet_metadata('__TEST_DIR__/my.parquet'); +---- +PLAIN_DICTIONARY + +# test some repeated large strings with parquet_version v2 +# this should give RLE_DICTIONARY dictionary +statement ok +copy (select repeat('abc', 500_000) || (range % 10) s from range(100)) to '__TEST_DIR__/my.parquet' (parquet_version v2); + query I select encodings from parquet_metadata('__TEST_DIR__/my.parquet'); ---- RLE_DICTIONARY -# this cannot do dictionary because the strings exceed the limit +# this cannot do any dictionaries because the strings exceed the limit statement ok copy (select repeat('abc', 500_000) || (range % 10) s from range(100)) to '__TEST_DIR__/my.parquet' (STRING_DICTIONARY_PAGE_SIZE_LIMIT 4_000_000); query I -select encodings = 'RLE_DICTIONARY' from parquet_metadata('__TEST_DIR__/my.parquet'); +select encodings in ('PLAIN_DICTIONARY', 'RLE_DICTIONARY') from parquet_metadata('__TEST_DIR__/my.parquet'); ---- false diff --git a/test/sql/copy/parquet/byte_stream_split.test b/test/sql/copy/parquet/byte_stream_split.test index c7bbb82c015b..acc12da23566 100644 --- a/test/sql/copy/parquet/byte_stream_split.test +++ b/test/sql/copy/parquet/byte_stream_split.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query III -SELECT * FROM 'data/parquet-testing/byte_stream_split.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/byte_stream_split.parquet' ---- :data/parquet-testing/byte_stream_split.csv diff --git a/test/sql/copy/parquet/case_insensitive_replacement.test b/test/sql/copy/parquet/case_insensitive_replacement.test index 2ddf80318032..e8b0f50752fd 100644 --- a/test/sql/copy/parquet/case_insensitive_replacement.test +++ b/test/sql/copy/parquet/case_insensitive_replacement.test @@ -5,6 +5,6 @@ require parquet query I -SELECT data FROM 'data/parquet-testing/CASE_INSENSITIVE.PARQUET' +SELECT data FROM '{DATA_DIR}/parquet-testing/CASE_INSENSITIVE.PARQUET' ---- \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F diff --git a/test/sql/copy/parquet/copy_ninf_stats.test b/test/sql/copy/parquet/copy_ninf_stats.test new file mode 100644 index 000000000000..773afc07ca75 --- /dev/null +++ b/test/sql/copy/parquet/copy_ninf_stats.test @@ -0,0 +1,13 @@ +# name: test/sql/copy/parquet/copy_ninf_stats.test +# description: Negative infinity stats +# group: [parquet] + +require parquet + +statement ok +copy (select '-infinity'::double ninf) to '__TEST_DIR__/ninf.parquet'; + +query II +SELECT stats_min, stats_max FROM parquet_metadata('__TEST_DIR__/ninf.parquet') +---- +NULL NULL diff --git a/test/sql/copy/parquet/copy_option_non_foldable.test b/test/sql/copy/parquet/copy_option_non_foldable.test new file mode 100644 index 000000000000..efb6126efe4f --- /dev/null +++ b/test/sql/copy/parquet/copy_option_non_foldable.test @@ -0,0 +1,24 @@ +# name: test/sql/copy/parquet/copy_option_non_foldable.test +# group: [parquet] + +require parquet + +statement ok +PREPARE statement2 as COPY ( + SELECT + 42 AS number, + true AS is_even +) TO '__TEST_DIR__/non_foldable_copy_option.parquet' ( + FORMAT parquet, + KV_METADATA { + number: random() + } +); + +statement ok +EXECUTE statement2; + +query II +select * from '__TEST_DIR__/non_foldable_copy_option.parquet' +---- +42 true diff --git a/test/sql/copy/parquet/copy_option_prepared.test b/test/sql/copy/parquet/copy_option_prepared.test new file mode 100644 index 000000000000..4a13e1d47a1c --- /dev/null +++ b/test/sql/copy/parquet/copy_option_prepared.test @@ -0,0 +1,24 @@ +# name: test/sql/copy/parquet/copy_option_prepared.test +# group: [parquet] + +require parquet + +statement ok +PREPARE statement as COPY ( + SELECT + 42 AS number, + true AS is_even +) TO '__TEST_DIR__/prepared_parquet_copy.parquet' ( + FORMAT parquet, + KV_METADATA { + number: $1, + } +); + +statement ok +execute statement(42) + +query II +select * from '__TEST_DIR__/prepared_parquet_copy.parquet'; +---- +42 true diff --git a/test/sql/copy/parquet/corrupt_stats.test b/test/sql/copy/parquet/corrupt_stats.test index 146c27f9d2e4..66914f50aa41 100644 --- a/test/sql/copy/parquet/corrupt_stats.test +++ b/test/sql/copy/parquet/corrupt_stats.test @@ -5,7 +5,7 @@ require parquet statement error -SELECT a FROM 'data/parquet-testing/corrupt_stats.parquet' GROUP BY a; +SELECT a FROM '{DATA_DIR}/parquet-testing/corrupt_stats.parquet' GROUP BY a; ---- This likely means that the statistics in your data source are corrupt @@ -13,6 +13,6 @@ statement ok PRAGMA disable_optimizer query I -SELECT a FROM 'data/parquet-testing/corrupt_stats.parquet' GROUP BY a; +SELECT a FROM '{DATA_DIR}/parquet-testing/corrupt_stats.parquet' GROUP BY a; ---- 2021-01-01 12:00:00 diff --git a/test/sql/copy/parquet/decimal_filter.test b/test/sql/copy/parquet/decimal_filter.test index 4dceb545f6e2..b4c3394c80d9 100644 --- a/test/sql/copy/parquet/decimal_filter.test +++ b/test/sql/copy/parquet/decimal_filter.test @@ -8,7 +8,7 @@ statement ok pragma enable_verification query IIII -select * from 'data/parquet-testing/decimals.parquet' +select * from '{DATA_DIR}/parquet-testing/decimals.parquet' ---- 0.1 0.1 0.1 0.1 -0.1 -0.1 -0.1 -0.1 @@ -16,12 +16,12 @@ select * from 'data/parquet-testing/decimals.parquet' loop i 1 5 query IIII -select * from 'data/parquet-testing/decimals.parquet' WHERE l${i}=0.1 +select * from '{DATA_DIR}/parquet-testing/decimals.parquet' WHERE l${i}=0.1 ---- 0.1 0.1 0.1 0.1 query IIII -select * from 'data/parquet-testing/decimals.parquet' WHERE l${i}=-0.1 +select * from '{DATA_DIR}/parquet-testing/decimals.parquet' WHERE l${i}=-0.1 ---- -0.1 -0.1 -0.1 -0.1 diff --git a/test/sql/copy/parquet/delta_byte_array.test b/test/sql/copy/parquet/delta_byte_array.test index b297749e5a59..b3f28243b2b2 100644 --- a/test/sql/copy/parquet/delta_byte_array.test +++ b/test/sql/copy/parquet/delta_byte_array.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query IIIIIIIII -SELECT * FROM 'data/parquet-testing/delta_byte_array.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/delta_byte_array.parquet' ---- AAAAAAAAIODAAAAA Sir Mark Bailey N MOROCCO NULL Mark.Bailey@rg9qCNVJ0s7qeY.com 2452443 AAAAAAAAHODAAAAA Mrs. Lisa Clark Y ITALY NULL Lisa.Clark@goPYS4tMB0.org 2452646 @@ -1012,7 +1012,7 @@ AAAAAAAACAAAAAAA Dr. Amy Moses Y TOGO NULL Amy.Moses@Ovk9KjHH.com 2452318 AAAAAAAABAAAAAAA Mr. Javier Lewis Y CHILE NULL Javier.Lewis@VFAxlnZEvOx.org 2452508 query I -SELECT * FROM 'data/parquet-testing/delta_length_byte_array.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/delta_length_byte_array.parquet' ---- apple_banana_mango0 apple_banana_mango1 diff --git a/test/sql/copy/parquet/describe_parquet.test b/test/sql/copy/parquet/describe_parquet.test index f8b7eb7b3900..24f99a35562f 100644 --- a/test/sql/copy/parquet/describe_parquet.test +++ b/test/sql/copy/parquet/describe_parquet.test @@ -5,10 +5,10 @@ require parquet query IIIIII nosort describeresult -DESCRIBE 'data/parquet-testing/delta_byte_array.parquet' +DESCRIBE '{DATA_DIR}/parquet-testing/delta_byte_array.parquet' query IIIIII nosort describeresult -DESCRIBE "data/parquet-testing/delta_byte_array.parquet" +DESCRIBE "{DATA_DIR}/parquet-testing/delta_byte_array.parquet" query IIIIII nosort describeresult -DESCRIBE FROM read_parquet("data/parquet-testing/delta_byte_array.parquet") +DESCRIBE FROM read_parquet("{DATA_DIR}/parquet-testing/delta_byte_array.parquet") diff --git a/test/sql/copy/parquet/enum_converted_type.test b/test/sql/copy/parquet/enum_converted_type.test index 25c2b6499b21..c2604c4dd2d3 100644 --- a/test/sql/copy/parquet/enum_converted_type.test +++ b/test/sql/copy/parquet/enum_converted_type.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query IIIIIII -select * from 'data/parquet-testing/enum.parquet'; +select * from '{DATA_DIR}/parquet-testing/enum.parquet'; ---- 1 0 t1 test_span 1612550512340953 500000 [{'key': service_name, 'v_type': STRING, 'v_str': test_service, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': http_method, 'v_type': STRING, 'v_str': POST, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': method, 'v_type': STRING, 'v_str': callbacks.flannel, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': boolean, 'v_type': BOOL, 'v_str': '', 'v_bool': true, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': int, 'v_type': INT64, 'v_str': '', 'v_bool': false, 'v_int64': 1000, 'v_float64': 1001.2, 'v_binary': ''}, {'key': float, 'v_type': FLOAT64, 'v_str': '', 'v_bool': false, 'v_int64': 1000, 'v_float64': 1001.2, 'v_binary': ''}, {'key': binary, 'v_type': BINARY, 'v_str': ignored, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': binaryTagValue}, {'key': type, 'v_type': STRING, 'v_str': msg_type, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}] 2 1 t1 test_span 1612550512340954 500001 [{'key': service_name, 'v_type': STRING, 'v_str': test_service, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': http_method, 'v_type': STRING, 'v_str': POST, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': method, 'v_type': STRING, 'v_str': callbacks.flannel, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': boolean, 'v_type': BOOL, 'v_str': '', 'v_bool': true, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}, {'key': int, 'v_type': INT64, 'v_str': '', 'v_bool': false, 'v_int64': 1000, 'v_float64': 1001.2, 'v_binary': ''}, {'key': float, 'v_type': FLOAT64, 'v_str': '', 'v_bool': false, 'v_int64': 1000, 'v_float64': 1001.2, 'v_binary': ''}, {'key': binary, 'v_type': BINARY, 'v_str': ignored, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': binaryTagValue}, {'key': type, 'v_type': STRING, 'v_str': msg_type, 'v_bool': false, 'v_int64': 0, 'v_float64': 0.0, 'v_binary': ''}] diff --git a/test/sql/copy/parquet/file_metadata.test b/test/sql/copy/parquet/file_metadata.test index 0d1505510cfe..6dad4e993eb8 100644 --- a/test/sql/copy/parquet/file_metadata.test +++ b/test/sql/copy/parquet/file_metadata.test @@ -7,11 +7,11 @@ statement ok SET parquet_metadata_cache = true; query IIIIIIIII -SELECT * FROM parquet_file_metadata('data/parquet-testing/arrow/alltypes_dictionary.parquet') +SELECT * FROM parquet_file_metadata('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet') ---- -data/parquet-testing/arrow/alltypes_dictionary.parquet impala version 1.3.0-INTERNAL (build 8a48ddb1eff84592b3fc06bc6f51ec120e1fffc9) 2 1 1 NULL NULL 1698 723 +{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet impala version 1.3.0-INTERNAL (build 8a48ddb1eff84592b3fc06bc6f51ec120e1fffc9) 2 1 1 NULL NULL 1698 723 query IIIIIIIII -SELECT * FROM parquet_file_metadata('data/parquet-testing/arrow/alltypes_dictionary.parquet') +SELECT * FROM parquet_file_metadata('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet') ---- -data/parquet-testing/arrow/alltypes_dictionary.parquet impala version 1.3.0-INTERNAL (build 8a48ddb1eff84592b3fc06bc6f51ec120e1fffc9) 2 1 1 NULL NULL 1698 723 +{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet impala version 1.3.0-INTERNAL (build 8a48ddb1eff84592b3fc06bc6f51ec120e1fffc9) 2 1 1 NULL NULL 1698 723 diff --git a/test/sql/copy/parquet/fixed.test b/test/sql/copy/parquet/fixed.test index 7fe3f8b0cfcb..d2cfd060cd4c 100644 --- a/test/sql/copy/parquet/fixed.test +++ b/test/sql/copy/parquet/fixed.test @@ -5,6 +5,6 @@ require parquet query I -SELECT data FROM parquet_scan('data/parquet-testing/fixed.parquet') +SELECT data FROM parquet_scan('{DATA_DIR}/parquet-testing/fixed.parquet') ---- \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F diff --git a/test/sql/copy/parquet/float16.test b/test/sql/copy/parquet/float16.test index 973bc4757a37..0b65bca01f77 100644 --- a/test/sql/copy/parquet/float16.test +++ b/test/sql/copy/parquet/float16.test @@ -8,17 +8,17 @@ statement ok PRAGMA enable_verification query III -select type, type_length, logical_type from parquet_schema('data/parquet-testing/float16.parquet') where name = 'x' +select type, type_length, logical_type from parquet_schema('{DATA_DIR}/parquet-testing/float16.parquet') where name = 'x' ---- FIXED_LEN_BYTE_ARRAY 2 Float16Type() query I -select typeof(x) from read_parquet('data/parquet-testing/float16.parquet') limit 1; +select typeof(x) from read_parquet('{DATA_DIR}/parquet-testing/float16.parquet') limit 1; ---- FLOAT query I -select x from read_parquet('data/parquet-testing/float16.parquet') order by x; +select x from read_parquet('{DATA_DIR}/parquet-testing/float16.parquet') order by x; ---- -inf 0.0 @@ -30,7 +30,7 @@ nan -nan query I -select x from read_parquet('data/parquet-testing/float16.parquet') where x > 1.1 order by x; +select x from read_parquet('{DATA_DIR}/parquet-testing/float16.parquet') where x > 1.1 order by x; ---- 1.5 inf diff --git a/test/sql/copy/parquet/incorrect_converted_type.test b/test/sql/copy/parquet/incorrect_converted_type.test index 8089977ff7f6..ff7600c36c20 100644 --- a/test/sql/copy/parquet/incorrect_converted_type.test +++ b/test/sql/copy/parquet/incorrect_converted_type.test @@ -5,46 +5,46 @@ require parquet statement error -SELECT * FROM 'data/parquet-testing/broken/broken_bigint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_bigint.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_date.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_date.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_int.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_int.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_smallint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_smallint.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_timestamp.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_timestamp.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_timestamp_ms.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_timestamp_ms.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_tinyint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_tinyint.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_ubigint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_ubigint.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_uinteger.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_uinteger.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_usmallint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_usmallint.parquet'; ---- statement error -SELECT * FROM 'data/parquet-testing/broken/broken_utinyint.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/broken/broken_utinyint.parquet'; ---- diff --git a/test/sql/copy/parquet/json_parquet.test b/test/sql/copy/parquet/json_parquet.test index 20384997015b..94a01d5d1d83 100644 --- a/test/sql/copy/parquet/json_parquet.test +++ b/test/sql/copy/parquet/json_parquet.test @@ -7,7 +7,7 @@ require parquet require json statement ok -CREATE TABLE json_tbl AS FROM 'data/parquet-testing/json_convertedtype.parquet'; +CREATE TABLE json_tbl AS FROM '{DATA_DIR}/parquet-testing/json_convertedtype.parquet'; query I SELECT json_extract(TX_JSON[1], 'block_hash') FROM json_tbl diff --git a/test/sql/copy/parquet/lineitem_arrow.test b/test/sql/copy/parquet/lineitem_arrow.test index 4d3d318cb429..0e2224ff440c 100644 --- a/test/sql/copy/parquet/lineitem_arrow.test +++ b/test/sql/copy/parquet/lineitem_arrow.test @@ -7,7 +7,7 @@ require tpch require parquet statement ok -CREATE TABLE lineitem AS SELECT * FROM 'data/parquet-testing/arrow/lineitem-arrow.parquet' +CREATE TABLE lineitem AS SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet' query I nosort q01 PRAGMA tpch(1) @@ -21,7 +21,7 @@ statement ok DROP TABLE lineitem statement ok -CREATE VIEW lineitem AS SELECT * FROM 'data/parquet-testing/arrow/lineitem-arrow.parquet' +CREATE VIEW lineitem AS SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet' query I nosort q01 PRAGMA tpch(1) diff --git a/test/sql/copy/parquet/parallel_parquet_glob.test b/test/sql/copy/parquet/parallel_parquet_glob.test index 10ae596acadf..db96fe68e4ef 100644 --- a/test/sql/copy/parquet/parallel_parquet_glob.test +++ b/test/sql/copy/parquet/parallel_parquet_glob.test @@ -11,27 +11,27 @@ statement ok PRAGMA threads=4 query I -select count(*) from parquet_scan('data/parquet-testing/glob/t?.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/glob/t?.parquet') ---- 2 query I -select count(*) from parquet_scan('data/parquet-testing/glob/*') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/glob/*') ---- 2 query I -select count(*) from parquet_scan('data/parquet-testing/glob/*.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/glob/*.parquet') ---- 2 query I -select count(*) from parquet_scan('data/parquet-testing/g*/*.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/g*/*.parquet') ---- 3 query I -select count(*) from parquet_scan('data/parquet-testing/g*/t1.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/g*/t1.parquet') ---- 2 @@ -39,11 +39,11 @@ statement ok SET parquet_metadata_cache=true query I -select count(*) from parquet_scan('data/parquet-testing/g*/t1.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/g*/t1.parquet') ---- 2 query I -select count(*) from parquet_scan('data/parquet-testing/g*/t1.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/g*/t1.parquet') ---- 2 diff --git a/test/sql/copy/parquet/parquet2.test b/test/sql/copy/parquet/parquet2.test index fe6aa3ab114d..03474ec7c028 100644 --- a/test/sql/copy/parquet/parquet2.test +++ b/test/sql/copy/parquet/parquet2.test @@ -18,7 +18,7 @@ require parquet query I -SELECT id FROM 'data/parquet-testing/p2.parquet' offset 4968; +SELECT id FROM '{DATA_DIR}/parquet-testing/p2.parquet' offset 4968; ---- 1436 2596 @@ -34,7 +34,7 @@ SELECT id FROM 'data/parquet-testing/p2.parquet' offset 4968; query I -SELECT id FROM 'data/parquet-testing/p2.parquet' limit 10; +SELECT id FROM '{DATA_DIR}/parquet-testing/p2.parquet' limit 10; ---- 2644 8534 @@ -50,7 +50,7 @@ SELECT id FROM 'data/parquet-testing/p2.parquet' limit 10; query I -SELECT id FROM 'data/parquet-testing/p2.parquet' limit 100; +SELECT id FROM '{DATA_DIR}/parquet-testing/p2.parquet' limit 100; ---- 2644 8534 @@ -156,7 +156,7 @@ SELECT id FROM 'data/parquet-testing/p2.parquet' limit 100; query I -SELECT id_with_null FROM 'data/parquet-testing/p2.parquet' limit 100; +SELECT id_with_null FROM '{DATA_DIR}/parquet-testing/p2.parquet' limit 100; ---- 2644 8534 @@ -261,20 +261,20 @@ NULL query IIIIIIII -select min(id), max(id), sum(id), count(id), min(id_with_null), max(id_with_null), sum(id_with_null), count(id_with_null) from 'data/parquet-testing/p2.parquet' +select min(id), max(id), sum(id), count(id), min(id_with_null), max(id_with_null), sum(id_with_null), count(id_with_null) from '{DATA_DIR}/parquet-testing/p2.parquet' ---- 42 9998 24994580 4979 42 9998 19999680 3984 query IIII -select min(id_int), max(id_int), sum(id_int), count(id_int) from 'data/parquet-testing/p2.parquet' +select min(id_int), max(id_int), sum(id_int), count(id_int) from '{DATA_DIR}/parquet-testing/p2.parquet' ---- 42 9998 19999680 3984 # from bug 2882 query I -select * from 'data/parquet-testing/7-set.snappy.arrow2.parquet'; +select * from '{DATA_DIR}/parquet-testing/7-set.snappy.arrow2.parquet'; ---- 0 1 diff --git a/test/sql/copy/parquet/parquet2strings.test b/test/sql/copy/parquet/parquet2strings.test index 89d4fe962a0a..d36f8136cede 100644 --- a/test/sql/copy/parquet/parquet2strings.test +++ b/test/sql/copy/parquet/parquet2strings.test @@ -23,5 +23,5 @@ require parquet query I -SELECT id_string FROM 'data/parquet-testing/p2strings.parquet' limit 10; +SELECT id_string FROM '{DATA_DIR}/parquet-testing/p2strings.parquet' limit 10; ---- diff --git a/test/sql/copy/parquet/parquet_10148.test b/test/sql/copy/parquet/parquet_10148.test index 0b75454f9be6..a8b95e6cb21d 100644 --- a/test/sql/copy/parquet/parquet_10148.test +++ b/test/sql/copy/parquet/parquet_10148.test @@ -5,7 +5,7 @@ require parquet query I -SELECT CDCONO FROM 'data/parquet-testing/bug10148-wide-decimal-stats.parquet' +SELECT CDCONO FROM '{DATA_DIR}/parquet-testing/bug10148-wide-decimal-stats.parquet' ---- 0 0 diff --git a/test/sql/copy/parquet/parquet_10279.test b/test/sql/copy/parquet/parquet_10279.test index 8a5f53f87856..731b1e65032b 100644 --- a/test/sql/copy/parquet/parquet_10279.test +++ b/test/sql/copy/parquet/parquet_10279.test @@ -5,7 +5,7 @@ require parquet query IIIIIIII -SELECT * FROM 'data/parquet-testing/issue10279_delta_encoding.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/issue10279_delta_encoding.parquet' ---- MIN_VALUE false -128 -32768 -2147483648 -9223372036854775808 1e-45 5e-324 MAX_VALUE true 127 32767 2147483647 9223372036854775807 3.4028235e+38 1.7976931348623157e+308 diff --git a/test/sql/copy/parquet/parquet_12621.test b/test/sql/copy/parquet/parquet_12621.test index 3b302fe7c52e..23370d3dc95f 100644 --- a/test/sql/copy/parquet/parquet_12621.test +++ b/test/sql/copy/parquet/parquet_12621.test @@ -6,7 +6,7 @@ require parquet query I select * -from read_parquet('data/parquet-testing/issue12621.parquet') +from read_parquet('{DATA_DIR}/parquet-testing/issue12621.parquet') limit 1; ---- 0.0000 \ No newline at end of file diff --git a/test/sql/copy/parquet/parquet_13053_duplicate_column_names.test b/test/sql/copy/parquet/parquet_13053_duplicate_column_names.test index 5818975a80c6..a9031249ba4b 100644 --- a/test/sql/copy/parquet/parquet_13053_duplicate_column_names.test +++ b/test/sql/copy/parquet/parquet_13053_duplicate_column_names.test @@ -6,7 +6,7 @@ require parquet # original names query I -select name from parquet_schema( 'data/parquet-testing/bug13053.parquet') offset 1; +select name from parquet_schema( '{DATA_DIR}/parquet-testing/bug13053.parquet') offset 1; ---- column COLUMN @@ -14,7 +14,7 @@ Column # renamed names query I -SELECT column_name FROM (DESCRIBE FROM 'data/parquet-testing/bug13053.parquet') +SELECT column_name FROM (DESCRIBE FROM '{DATA_DIR}/parquet-testing/bug13053.parquet') ---- column COLUMN_1 @@ -22,14 +22,14 @@ Column_2 # case where _1 is already a column, maybe bit ugly but fine and consistent with CSV reader query I -select name from parquet_schema( 'data/parquet-testing/bug13053-2.parquet') offset 1; +select name from parquet_schema( '{DATA_DIR}/parquet-testing/bug13053-2.parquet') offset 1; ---- column column_1 column query I -SELECT column_name FROM (DESCRIBE FROM 'data/parquet-testing/bug13053-2.parquet') +SELECT column_name FROM (DESCRIBE FROM '{DATA_DIR}/parquet-testing/bug13053-2.parquet') ---- column column_1 diff --git a/test/sql/copy/parquet/parquet_1554.test b/test/sql/copy/parquet/parquet_1554.test index 70f73f9d26ab..91fe5350e1d0 100644 --- a/test/sql/copy/parquet/parquet_1554.test +++ b/test/sql/copy/parquet/parquet_1554.test @@ -5,12 +5,12 @@ require parquet query I -SELECT COUNT(backlink_count) FROM parquet_scan('data/parquet-testing/bug1554.parquet') WHERE http_status_code=200 +SELECT COUNT(backlink_count) FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1554.parquet') WHERE http_status_code=200 ---- 0 query II -SELECT http_status_code, COUNT(backlink_count) FROM parquet_scan('data/parquet-testing/bug1554.parquet') GROUP BY http_status_code ORDER BY http_status_code +SELECT http_status_code, COUNT(backlink_count) FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1554.parquet') GROUP BY http_status_code ORDER BY http_status_code ---- 200 0 301 0 diff --git a/test/sql/copy/parquet/parquet_1588.test b/test/sql/copy/parquet/parquet_1588.test index 497843e112a0..dee6030c697c 100644 --- a/test/sql/copy/parquet/parquet_1588.test +++ b/test/sql/copy/parquet/parquet_1588.test @@ -8,7 +8,7 @@ statement ok pragma enable_verification # pandas equivalent: -# df = pandas.read_parquet('data/parquet-testing/bug1588.parquet') +# df = pandas.read_parquet('{DATA_DIR}/parquet-testing/bug1588.parquet') # df[(df.has_image_link == 1) & ((df.has_image_alt_text == 1) | df.is_image_alt_text_empty == 1)] statement ok @@ -29,26 +29,26 @@ select count(*) from some_bools where val = '1'::bool; query I -SELECT has_image_link FROM parquet_scan('data/parquet-testing/bug1588.parquet') where has_image_link = 1 +SELECT has_image_link FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1588.parquet') where has_image_link = 1 ---- 1 1 1 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/bug1588.parquet') WHERE has_image_link = 1 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1588.parquet') WHERE has_image_link = 1 ---- 3 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/bug1588.parquet') WHERE has_image_link = '1'::bool +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1588.parquet') WHERE has_image_link = '1'::bool ---- 3 # original query for the lolz query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/bug1588.parquet') WHERE (has_image_link = 1 AND (has_image_alt_text = 0 OR is_image_alt_text_empty = 1)) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1588.parquet') WHERE (has_image_link = 1 AND (has_image_alt_text = 0 OR is_image_alt_text_empty = 1)) ---- 2 diff --git a/test/sql/copy/parquet/parquet_1589.test b/test/sql/copy/parquet/parquet_1589.test index 28994292e3a5..4455fa83a60b 100644 --- a/test/sql/copy/parquet/parquet_1589.test +++ b/test/sql/copy/parquet/parquet_1589.test @@ -8,9 +8,9 @@ statement ok pragma enable_verification query I -SELECT backlink_count FROM parquet_scan('data/parquet-testing/bug1589.parquet') LIMIT 1 +SELECT backlink_count FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1589.parquet') LIMIT 1 ---- NULL statement ok -SELECT * FROM parquet_scan('data/parquet-testing/bug1589.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1589.parquet') diff --git a/test/sql/copy/parquet/parquet_1618_struct_strings.test b/test/sql/copy/parquet/parquet_1618_struct_strings.test index 06f3fbe49e67..475b4bb8ed2b 100644 --- a/test/sql/copy/parquet/parquet_1618_struct_strings.test +++ b/test/sql/copy/parquet/parquet_1618_struct_strings.test @@ -5,19 +5,19 @@ require parquet query I -SELECT "inner"['str_field'] FROM parquet_scan('data/parquet-testing/bug1618_struct_strings.parquet') +SELECT "inner"['str_field'] FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1618_struct_strings.parquet') ---- hello NULL query I -SELECT "inner"['f64_field'] FROM parquet_scan('data/parquet-testing/bug1618_struct_strings.parquet') +SELECT "inner"['f64_field'] FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1618_struct_strings.parquet') ---- NULL 1.23 query I -SELECT "inner" FROM parquet_scan('data/parquet-testing/bug1618_struct_strings.parquet') +SELECT "inner" FROM parquet_scan('{DATA_DIR}/parquet-testing/bug1618_struct_strings.parquet') ---- {'str_field': hello, 'f64_field': NULL} {'str_field': NULL, 'f64_field': 1.23} diff --git a/test/sql/copy/parquet/parquet_1619.test b/test/sql/copy/parquet/parquet_1619.test index f3c05fcb5595..f5da06d66fef 100644 --- a/test/sql/copy/parquet/parquet_1619.test +++ b/test/sql/copy/parquet/parquet_1619.test @@ -5,19 +5,19 @@ require parquet query I -select struct_extract("inner", 'f64_field') from parquet_scan('data/parquet-testing/struct.parquet'); +select struct_extract("inner", 'f64_field') from parquet_scan('{DATA_DIR}/parquet-testing/struct.parquet'); ---- NULL 1.23 query I -select ("inner")."f64_field" from parquet_scan('data/parquet-testing/struct.parquet'); +select ("inner")."f64_field" from parquet_scan('{DATA_DIR}/parquet-testing/struct.parquet'); ---- NULL 1.23 query I -select "inner"['f64_field'] from parquet_scan('data/parquet-testing/struct.parquet'); +select "inner"['f64_field'] from parquet_scan('{DATA_DIR}/parquet-testing/struct.parquet'); ---- NULL 1.23 diff --git a/test/sql/copy/parquet/parquet_1723.test_slow b/test/sql/copy/parquet/parquet_1723.test_slow index 4bc5b88df928..b5992d3c26c9 100644 --- a/test/sql/copy/parquet/parquet_1723.test_slow +++ b/test/sql/copy/parquet/parquet_1723.test_slow @@ -5,10 +5,10 @@ require parquet query I nosort query -select * from 'data/parquet-testing/leftdate3_192_loop_1.parquet' +select * from '{DATA_DIR}/parquet-testing/leftdate3_192_loop_1.parquet' statement ok -create table test as select * from 'data/parquet-testing/leftdate3_192_loop_1.parquet' +create table test as select * from '{DATA_DIR}/parquet-testing/leftdate3_192_loop_1.parquet' query I nosort query select * from test diff --git a/test/sql/copy/parquet/parquet_2267.test b/test/sql/copy/parquet/parquet_2267.test index cc477181c136..f6657c2ee9b1 100644 --- a/test/sql/copy/parquet/parquet_2267.test +++ b/test/sql/copy/parquet/parquet_2267.test @@ -5,11 +5,11 @@ require parquet query I -SELECT * FROM parquet_scan('data/parquet-testing/bug2267.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug2267.parquet') ---- [{'disabledPlans': [bea4c11e-220a-4e6d-8eb8-8ea15d019f90], 'skuId': c7df2760-2c81-4ef7-b578-5b5392b571df}, {'disabledPlans': [8a256a2b-b617-496d-b51b-e76466e88db0, 41781fb2-bc02-4b7c-bd55-b576c07bb09d, eec0eb4f-6444-4f95-aba0-50c24d67f998], 'skuId': 84a661c4-e949-4bd2-a560-ed7766fcaf2b}, {'disabledPlans': [], 'skuId': b05e124f-c7cc-45a0-a6aa-8cf78c946968}, {'disabledPlans': [], 'skuId': f30db892-07e9-47e9-837c-80727f46fd3d}] query I -SELECT assignedLicenses[1] FROM parquet_scan('data/parquet-testing/bug2267.parquet') +SELECT assignedLicenses[1] FROM parquet_scan('{DATA_DIR}/parquet-testing/bug2267.parquet') ---- {'disabledPlans': [bea4c11e-220a-4e6d-8eb8-8ea15d019f90], 'skuId': c7df2760-2c81-4ef7-b578-5b5392b571df} diff --git a/test/sql/copy/parquet/parquet_4442.test b/test/sql/copy/parquet/parquet_4442.test index 30893f52ef69..338f444ec174 100644 --- a/test/sql/copy/parquet/parquet_4442.test +++ b/test/sql/copy/parquet/parquet_4442.test @@ -5,6 +5,6 @@ require parquet query IIIIIIIIIIIIIIIII -SELECT * FROM 'data/parquet-testing/bug4442.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/bug4442.parquet' ---- 12 5184 1 22 2011-10-06 22:21:49.58+00 outbound 323020033 {} 2100 33 0 7 10 0 1317427200000 1317939709580 11 diff --git a/test/sql/copy/parquet/parquet_4859.test b/test/sql/copy/parquet/parquet_4859.test index 5d59d393ac64..d23bac37882b 100644 --- a/test/sql/copy/parquet/parquet_4859.test +++ b/test/sql/copy/parquet/parquet_4859.test @@ -5,4 +5,4 @@ require parquet statement ok -select "repositoryTopics.edges" from "data/parquet-testing/bug4859.parquet" +select "repositoryTopics.edges" from "{DATA_DIR}/parquet-testing/bug4859.parquet" diff --git a/test/sql/copy/parquet/parquet_4903.test b/test/sql/copy/parquet/parquet_4903.test index 3ed54c3fb854..0bf684bfeae1 100644 --- a/test/sql/copy/parquet/parquet_4903.test +++ b/test/sql/copy/parquet/parquet_4903.test @@ -6,5 +6,5 @@ require parquet # file is corrupt statement error -SELECT type_param_constraints FROM 'data/parquet-testing/bug4903.parquet' limit 10 +SELECT type_param_constraints FROM '{DATA_DIR}/parquet-testing/bug4903.parquet' limit 10 ---- diff --git a/test/sql/copy/parquet/parquet_6580.test b/test/sql/copy/parquet/parquet_6580.test index 6f2fb1ccf611..5f53f1680bab 100644 --- a/test/sql/copy/parquet/parquet_6580.test +++ b/test/sql/copy/parquet/parquet_6580.test @@ -6,7 +6,7 @@ require parquet query IIII select *, make_timestamp(dt2*1000*1000) -from read_parquet('data/parquet-testing/bug4903.parquet') +from read_parquet('{DATA_DIR}/parquet-testing/bug4903.parquet') where dt2 <= -9214570800 limit 10 ---- diff --git a/test/sql/copy/parquet/parquet_6630_union_by_name.test b/test/sql/copy/parquet/parquet_6630_union_by_name.test index 1be94c96dc8c..119c326a38a5 100644 --- a/test/sql/copy/parquet/parquet_6630_union_by_name.test +++ b/test/sql/copy/parquet/parquet_6630_union_by_name.test @@ -8,7 +8,7 @@ query II select distinct name, true as is_suspended_or_cancelled -from read_parquet('data/parquet-testing/issue6630_*.parquet',union_by_name=True) +from read_parquet('{DATA_DIR}/parquet-testing/issue6630_*.parquet',union_by_name=True) where "timestamp" between '2023-01-26 20:00:00' and '2023-01-28 04:00:00' and (suspended = true or cancelled <> '' or state='SUSPENDED') and actual_time is null; @@ -18,7 +18,7 @@ query II select distinct name, true as is_suspended_or_cancelled -from read_parquet('data/parquet-testing/issue6630_*.parquet', union_by_name=False) +from read_parquet('{DATA_DIR}/parquet-testing/issue6630_*.parquet', union_by_name=False) where "timestamp" between '2023-01-26 20:00:00' and '2023-01-28 04:00:00' and (suspended = true or cancelled <> '' or state='SUSPENDED') and actual_time is null; diff --git a/test/sql/copy/parquet/parquet_6990.test_slow b/test/sql/copy/parquet/parquet_6990.test_slow index 550eb4607f67..322935baa4ce 100644 --- a/test/sql/copy/parquet/parquet_6990.test_slow +++ b/test/sql/copy/parquet/parquet_6990.test_slow @@ -8,4 +8,4 @@ statement ok PRAGMA enable_verification statement ok -SELECT * FROM 'data/parquet-testing/issue6990.parquet'; +SELECT * FROM '{DATA_DIR}/parquet-testing/issue6990.parquet'; diff --git a/test/sql/copy/parquet/parquet_arrow_timestamp.test b/test/sql/copy/parquet/parquet_arrow_timestamp.test index 5bd52c04b520..21315568dd35 100644 --- a/test/sql/copy/parquet/parquet_arrow_timestamp.test +++ b/test/sql/copy/parquet/parquet_arrow_timestamp.test @@ -5,11 +5,11 @@ require parquet query T -select * from parquet_scan('data/parquet-testing/timestamp.parquet') order by 1 +select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp.parquet') order by 1 ---- 2020-10-05 17:21:49.48844 query T -select * from parquet_scan('data/parquet-testing/timestamp-ms.parquet') order by 1 +select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp-ms.parquet') order by 1 ---- 2020-10-05 17:21:49 diff --git a/test/sql/copy/parquet/parquet_blob.test b/test/sql/copy/parquet/parquet_blob.test index 9bb238da6df9..6b89d6ab4193 100644 --- a/test/sql/copy/parquet/parquet_blob.test +++ b/test/sql/copy/parquet/parquet_blob.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query III -select * from parquet_scan('data/parquet-testing/blob.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/blob.parquet') ---- 1 \x04\x00 str1 2 \x04\x00\x80 str2 diff --git a/test/sql/copy/parquet/parquet_blob_string.test b/test/sql/copy/parquet/parquet_blob_string.test index d70c90ef5ae2..fb7d6693d7fe 100644 --- a/test/sql/copy/parquet/parquet_blob_string.test +++ b/test/sql/copy/parquet/parquet_blob_string.test @@ -8,32 +8,32 @@ statement ok PRAGMA enable_verification query I -SELECT typeof(#1) FROM parquet_scan('data/parquet-testing/binary_string.parquet',binary_as_string=False) limit 1 +SELECT typeof(#1) FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet',binary_as_string=False) limit 1 ---- BLOB query I -SELECT * FROM parquet_scan('data/parquet-testing/binary_string.parquet',binary_as_string=False) +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet',binary_as_string=False) ---- foo bar baz query I -SELECT typeof(#1) FROM parquet_scan('data/parquet-testing/binary_string.parquet',binary_as_string=True) limit 1 +SELECT typeof(#1) FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet',binary_as_string=True) limit 1 ---- VARCHAR query I -SELECT * FROM parquet_scan('data/parquet-testing/binary_string.parquet',binary_as_string=True) +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet',binary_as_string=True) ---- foo bar baz query I -SELECT converted_type FROM parquet_schema('data/parquet-testing/binary_string.parquet') +SELECT converted_type FROM parquet_schema('{DATA_DIR}/parquet-testing/binary_string.parquet') ---- NULL NULL @@ -46,12 +46,12 @@ statement ok SET binary_as_string=true query I -SELECT typeof(#1) FROM parquet_scan('data/parquet-testing/binary_string.parquet') limit 1 +SELECT typeof(#1) FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet') limit 1 ---- VARCHAR query I -SELECT * FROM parquet_scan('data/parquet-testing/binary_string.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet') ---- foo bar @@ -61,12 +61,12 @@ statement ok SET binary_as_string=false query I -SELECT typeof(#1) FROM parquet_scan('data/parquet-testing/binary_string.parquet') limit 1 +SELECT typeof(#1) FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet') limit 1 ---- BLOB query I -SELECT * FROM parquet_scan('data/parquet-testing/binary_string.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet') ---- foo bar @@ -77,13 +77,13 @@ statement ok PRAGMA binary_as_string=1 query I -SELECT typeof(#1) FROM parquet_scan('data/parquet-testing/binary_string.parquet' ,binary_as_string=False) limit 1 +SELECT typeof(#1) FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet' ,binary_as_string=False) limit 1 ---- BLOB query I -SELECT * FROM parquet_scan('data/parquet-testing/binary_string.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/binary_string.parquet') ---- foo bar diff --git a/test/sql/copy/parquet/parquet_corrupt_stats.test b/test/sql/copy/parquet/parquet_corrupt_stats.test index fb5a5362dfcd..1c494d65c3c1 100644 --- a/test/sql/copy/parquet/parquet_corrupt_stats.test +++ b/test/sql/copy/parquet/parquet_corrupt_stats.test @@ -5,6 +5,6 @@ require parquet query I -FROM 'data/parquet-testing/out_of_range_stats.parquet' +FROM '{DATA_DIR}/parquet-testing/out_of_range_stats.parquet' ---- 255 diff --git a/test/sql/copy/parquet/parquet_count_star.test b/test/sql/copy/parquet/parquet_count_star.test index 5a5c53dce84e..9c4d42c5c28c 100644 --- a/test/sql/copy/parquet/parquet_count_star.test +++ b/test/sql/copy/parquet/parquet_count_star.test @@ -5,11 +5,11 @@ require parquet query I -SELECT COUNT(*) FROM 'data/parquet-testing/out_of_range_stats.parquet' +SELECT COUNT(*) FROM '{DATA_DIR}/parquet-testing/out_of_range_stats.parquet' ---- 1 query I -select COUNT(*) from parquet_scan('data/parquet-testing/glob*/t?.parquet') +select COUNT(*) from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet') ---- 3 diff --git a/test/sql/copy/parquet/parquet_encryption.test b/test/sql/copy/parquet/parquet_encryption.test index a8255204084c..a1d7ff68e288 100644 --- a/test/sql/copy/parquet/parquet_encryption.test +++ b/test/sql/copy/parquet/parquet_encryption.test @@ -7,6 +7,9 @@ require parquet # parquet keys are not persisted across restarts require noforcestorage +# writing encrypted parquet requires httpfs to be loaded +require httpfs + statement ok PRAGMA enable_verification diff --git a/test/sql/copy/parquet/parquet_encryption_tpch.test_slow b/test/sql/copy/parquet/parquet_encryption_tpch.test_slow index 7f2644162f25..9268ee88a574 100644 --- a/test/sql/copy/parquet/parquet_encryption_tpch.test_slow +++ b/test/sql/copy/parquet/parquet_encryption_tpch.test_slow @@ -6,6 +6,8 @@ require parquet require tpch +require httpfs + statement ok CALL dbgen(sf=1) diff --git a/test/sql/copy/parquet/parquet_enum_test.test b/test/sql/copy/parquet/parquet_enum_test.test index 5cb1cd62ba45..00cb4f4ef73c 100644 --- a/test/sql/copy/parquet/parquet_enum_test.test +++ b/test/sql/copy/parquet/parquet_enum_test.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query IIIIIIIIIIIIIIIIIIIIII -SELECT * FROM parquet_scan('data/parquet-testing/adam_genotypes.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/adam_genotypes.parquet') ---- {'referenceName': NULL, 'start': NULL, 'end': NULL, 'names': [name], 'splitFromMultiAllelic': false, 'referenceAllele': NULL, 'alternateAllele': NULL, 'quality': NULL, 'filtersApplied': NULL, 'filtersPassed': NULL, 'filtersFailed': [], 'annotation': NULL} NULL NULL NULL NULL NULL NULL NULL [] NULL NULL NULL NULL NULL NULL [] [] [] false false NULL NULL diff --git a/test/sql/copy/parquet/parquet_external_access.test b/test/sql/copy/parquet/parquet_external_access.test index 48ba632b02ce..5df111ee5363 100644 --- a/test/sql/copy/parquet/parquet_external_access.test +++ b/test/sql/copy/parquet/parquet_external_access.test @@ -6,42 +6,42 @@ require parquet # we cannot read parquet files statement ok -CREATE TABLE lineitem AS SELECT * FROM 'data/parquet-testing/arrow/lineitem-arrow.parquet' +CREATE TABLE lineitem AS SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet' statement ok SET enable_external_access=false; # we cannot read parquet files statement error -SELECT * FROM 'data/parquet-testing/arrow/lineitem-arrow.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet' ---- # or their metadata statement error -SELECT * FROM parquet_metadata('data/parquet-testing/arrow/lineitem-arrow.parquet') +SELECT * FROM parquet_metadata('{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet') ---- statement error -SELECT * FROM parquet_schema('data/parquet-testing/arrow/lineitem-arrow.parquet') +SELECT * FROM parquet_schema('{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet') ---- # also not in a list statement error -SELECT * FROM parquet_scan(['data/parquet-testing/arrow/lineitem-arrow.parquet', 'data/parquet-testing/arrow/lineitem-arrow.parquet']) +SELECT * FROM parquet_scan(['{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet', '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet']) ---- # neither can we glob statement error -SELECT * FROM glob('data/parquet-testing/arrow/lineitem-arrow.parquet') +SELECT * FROM glob('{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet') ---- # or copy to/from... statement error -COPY lineitem FROM 'data/parquet-testing/arrow/lineitem-arrow.parquet' +COPY lineitem FROM '{DATA_DIR}/parquet-testing/arrow/lineitem-arrow.parquet' ---- statement error -COPY lineitem TO '__TEST_DIR__/lineitem.parquet' +COPY lineitem TO '{TEMP_DIR}/lineitem.parquet' ---- # we also can't just enable external access again diff --git a/test/sql/copy/parquet/parquet_filename.test b/test/sql/copy/parquet/parquet_filename.test index 43dfd080dbbd..04dc053d3e74 100644 --- a/test/sql/copy/parquet/parquet_filename.test +++ b/test/sql/copy/parquet/parquet_filename.test @@ -4,47 +4,49 @@ require parquet -# Simple glob with filenames, note that we replace \ for / to make tests pass on windows +# Simple glob with filenames query III -select i, j, replace(filename, '\', '/') from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) order by i; +select i, j, parse_path(filename)[-2:] from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) order by i; ---- -1 a data/parquet-testing/glob/t1.parquet -2 b data/parquet-testing/glob/t2.parquet -3 c data/parquet-testing/glob2/t1.parquet +1 a [glob, t1.parquet] +2 b [glob, t2.parquet] +3 c [glob2, t1.parquet] # Filter on filename col query III -select i, j, replace(filename, '\', '/') as file from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='data/parquet-testing/glob2/t1.parquet'; +SELECT i, j, filename_tail3: array_to_string(parse_path(filename)[-3:], '/') +FROM parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) +WHERE filename_tail3 = 'parquet-testing/glob2/t1.parquet' ---- -3 c data/parquet-testing/glob2/t1.parquet +3 c parquet-testing/glob2/t1.parquet # filter on multiple vector sizes of rows query I -SELECT count(filename) FROM parquet_scan('data/parquet-testing/p2.parquet', FILENAME=1) where id < 1000; +SELECT count(filename) FROM parquet_scan('{DATA_DIR}/parquet-testing/p2.parquet', FILENAME=1) where id < 1000; ---- 479 # filter pushdown on filename query I -SELECT count(id) FROM parquet_scan('data/parquet-testing/p2.parquet', FILENAME=1) where filename >= 'data'; +SELECT count(id) FROM parquet_scan('{DATA_DIR}/parquet-testing/p2.parquet', FILENAME=1) where filename >= '{DATA_DIR}'; ---- 4979 # Filter on non-filename col query I -select replace(filename, '\', '/') from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) where i=2; +select parse_path(filename)[-2:] from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) where i=2; ---- -data/parquet-testing/glob/t2.parquet +[glob, t2.parquet] statement ok CREATE TABLE test_csv AS SELECT 1 as id, 'test_csv_content' as filename; statement ok -COPY test_csv TO '__TEST_DIR__/filename_as_column.csv' WITH HEADER; +COPY test_csv TO '{TEMP_DIR}/filename_as_column.csv' WITH HEADER; # This currently fails with a binder error statement error -SELECT id, filename FROM read_csv_auto('__TEST_DIR__/filename_as_column.csv', FILENAME=1); +SELECT id, filename FROM read_csv_auto('{TEMP_DIR}/filename_as_column.csv', FILENAME=1); ---- # Parquet filename name conflict @@ -52,11 +54,11 @@ statement ok CREATE TABLE test AS SELECT 1 as id, 'test' as filename; statement ok -COPY test TO '__TEST_DIR__/filename_as_column.parquet'; +COPY test TO '{TEMP_DIR}/filename_as_column.parquet'; # we currently don't support filename as a column name when using the filename option statement error -SELECT * FROM parquet_scan('__TEST_DIR__/filename_as_column.parquet', FILENAME=1); +SELECT * FROM parquet_scan('{TEMP_DIR}/filename_as_column.parquet', FILENAME=1); ---- # Now also test copy @@ -64,24 +66,24 @@ statement ok CREATE TABLE test_copy (i INT, j VARCHAR, filename VARCHAR); statement ok -INSERT INTO test_copy FROM read_parquet('data/parquet-testing/glob/t1.parquet', filename=1, binary_as_string=1); +INSERT INTO test_copy FROM read_parquet('{DATA_DIR}/parquet-testing/glob/t1.parquet', filename=1, binary_as_string=1); query III -SELECT i, j, replace(filename, '\', '/') FROM test_copy +SELECT i, j, parse_path(filename)[-2:] FROM test_copy ---- -1 a data/parquet-testing/glob/t1.parquet +1 a [glob, t1.parquet] statement ok -INSERT INTO test_copy FROM read_parquet('data/parquet-testing/glob/t1.parquet', filename=1); +INSERT INTO test_copy FROM read_parquet('{DATA_DIR}/parquet-testing/glob/t1.parquet', filename=1); query III -SELECT i, j, replace(filename, '\', '/') FROM test_copy +SELECT i, j, parse_path(filename)[-2:] FROM test_copy ---- -1 a data/parquet-testing/glob/t1.parquet -1 a data/parquet-testing/glob/t1.parquet +1 a [glob, t1.parquet] +1 a [glob, t1.parquet] statement error -COPY test_copy FROM 'data/parquet-testing/glob/t1.parquet'; +COPY test_copy FROM '{DATA_DIR}/parquet-testing/glob/t1.parquet'; ---- column count mismatch @@ -90,18 +92,18 @@ statement ok CREATE TABLE test_table_large AS SELECT * FROM range(0,10000) tbl(i); statement ok -COPY test_table_large TO '__TEST_DIR__/test_table_large.parquet' (ROW_GROUP_SIZE 1000); +COPY test_table_large TO '{TEMP_DIR}/test_table_large.parquet' (ROW_GROUP_SIZE 1000); query II -SELECT sum(i), max(regexp_replace(filename, '^.*/', '')) FROM parquet_scan('__TEST_DIR__/test_table_large.parquet', FILENAME=1) where i>5000; +SELECT sum(i), max(parse_filename(filename)) FROM parquet_scan('{TEMP_DIR}/test_table_large.parquet', FILENAME=1) where i>5000; ---- 37492500 test_table_large.parquet # Same file twice query III -SELECT i, j, replace(filename, '\', '/') as file FROM parquet_scan(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t2.parquet'], FILENAME=1) where file like '%t1%' +SELECT i, j, parse_path(filename)[-2:] as file FROM parquet_scan(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet'], FILENAME=1) where filename like '%t1%' ---- -1 a data/parquet-testing/glob/t1.parquet -1 a data/parquet-testing/glob/t1.parquet +1 a [glob, t1.parquet] +1 a [glob, t1.parquet] diff --git a/test/sql/copy/parquet/parquet_filename_filter.test b/test/sql/copy/parquet/parquet_filename_filter.test index a10611ef267b..671cbac24529 100644 --- a/test/sql/copy/parquet/parquet_filename_filter.test +++ b/test/sql/copy/parquet/parquet_filename_filter.test @@ -5,65 +5,65 @@ require parquet query III -select id, value as f, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where filename='value1'; +select id, value as f, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where filename='value1'; ---- # requires notwindows for windows-style path backslash reasons require notwindows query III -select i, j, filename from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) order by i; +select i, j, filename from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) order by i; ---- -1 a data/parquet-testing/glob/t1.parquet -2 b data/parquet-testing/glob/t2.parquet -3 c data/parquet-testing/glob2/t1.parquet +1 a {DATA_DIR}/parquet-testing/glob/t1.parquet +2 b {DATA_DIR}/parquet-testing/glob/t2.parquet +3 c {DATA_DIR}/parquet-testing/glob2/t1.parquet query III -select i, j, filename as file from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='data/parquet-testing/glob2/t1.parquet' or file='data/parquet-testing/glob/t2.parquet' order by i; +select i, j, filename as file from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='{DATA_DIR}/parquet-testing/glob2/t1.parquet' or file='{DATA_DIR}/parquet-testing/glob/t2.parquet' order by i; ---- -2 b data/parquet-testing/glob/t2.parquet -3 c data/parquet-testing/glob2/t1.parquet +2 b {DATA_DIR}/parquet-testing/glob/t2.parquet +3 c {DATA_DIR}/parquet-testing/glob2/t1.parquet query III -select i, j, filename as file from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='data/parquet-testing/glob2/t1.parquet' and i=3 order by i; +select i, j, filename as file from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='{DATA_DIR}/parquet-testing/glob2/t1.parquet' and i=3 order by i; ---- -3 c data/parquet-testing/glob2/t1.parquet +3 c {DATA_DIR}/parquet-testing/glob2/t1.parquet query III -select i, j, filename as file from parquet_scan('data/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='data/parquet-testing/glob2/t1.parquet' and i=2 order by i; +select i, j, filename as file from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet', FILENAME=1) where file='{DATA_DIR}/parquet-testing/glob2/t1.parquet' and i=2 order by i; ---- # This query should trigger the file skipping mechanism, which prevents reading metadata for files that are not scanned query IIII -select id, value, date, filename from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) order by id; +select id, value, date, filename from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) order by id; ---- -1 value1 2012-01-01 data/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet -2 value2 2013-01-01 data/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet +1 value1 2012-01-01 {DATA_DIR}/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet +2 value2 2013-01-01 {DATA_DIR}/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet # These queries test that the file skipping mechanism works even for complex filters on multiple filename-based filters query IIII -select id, value, date, filename from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) where concat(date,filename)='2013-01-01data/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet'; +select id, value, date, filename from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) where concat(date,filename)='2013-01-01{DATA_DIR}/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet'; ---- -2 value2 2013-01-01 data/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet +2 value2 2013-01-01 {DATA_DIR}/parquet-testing/hive-partitioning/different_order/part=b/date=2013-01-01/test.parquet query IIII -select id, value, date, filename from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) where concat(date,filename)='2012-01-01data/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet'; +select id, value, date, filename from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) where concat(date,filename)='2012-01-01{DATA_DIR}/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet'; ---- -1 value1 2012-01-01 data/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet +1 value1 2012-01-01 {DATA_DIR}/parquet-testing/hive-partitioning/different_order/date=2012-01-01/part=a/test.parquet # Ensure we don't somehow endup mixing things up query III -select id, value as f, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where f='value2'; +select id, value as f, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where f='value2'; ---- 2 value2 2013-01-01 query III -select id, value as f, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where f='value1'; +select id, value as f, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where f='value1'; ---- 1 value1 2012-01-01 query III -select id, value as f, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where filename='value1'; +select id, value as f, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where filename='value1'; ---- # These tests confirm that the ParquetScanStats will properly handle the pruned files list @@ -72,31 +72,31 @@ statement ok SET parquet_metadata_cache=true; query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and id > 1; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and id > 1; ---- 2 value2 query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and id > 1; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and id > 1; ---- 2 value2 query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and value = 'value1'; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and value = 'value1'; ---- 1 value1 query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and value = 'value2'; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%mismatching_count%' and value = 'value2'; ---- 2 value2 query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%simple%' and value = 'value1'; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%simple%' and value = 'value1'; ---- 1 value1 query II -select id, value from parquet_scan('data/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%simple%' and value = 'value2'; +select id, value from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/*/*/*/test.parquet', FILENAME=1) where filename like '%simple%' and value = 'value2'; ---- 2 value2 diff --git a/test/sql/copy/parquet/parquet_filter_bug1391.test b/test/sql/copy/parquet/parquet_filter_bug1391.test index 841171b64494..0f95c1395b11 100644 --- a/test/sql/copy/parquet/parquet_filter_bug1391.test +++ b/test/sql/copy/parquet/parquet_filter_bug1391.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE VIEW tbl AS SELECT * FROM PARQUET_SCAN('data/parquet-testing/filter_bug1391.parquet'); +CREATE VIEW tbl AS SELECT * FROM PARQUET_SCAN('{DATA_DIR}/parquet-testing/filter_bug1391.parquet'); query I SELECT ORGUNITID FROM tbl LIMIT 10 diff --git a/test/sql/copy/parquet/parquet_fixed_length_blob_dict.test b/test/sql/copy/parquet/parquet_fixed_length_blob_dict.test index 41e64f4ca865..ec0d8ff43068 100644 --- a/test/sql/copy/parquet/parquet_fixed_length_blob_dict.test +++ b/test/sql/copy/parquet/parquet_fixed_length_blob_dict.test @@ -12,7 +12,7 @@ SELECT MIN(classification), MAX(classification), MIN(return_number), MAX(return_number), MIN(number_of_returns), MAX(number_of_returns) -FROM parquet_scan('data/parquet-testing/sorted.zstd_18_131072_small.parquet') +FROM parquet_scan('{DATA_DIR}/parquet-testing/sorted.zstd_18_131072_small.parquet') ---- \x00\xA0e\xFB\xF8|\xF0\xA8_t\x16\x9A \x03,\xDF$)\xF5\x13\x11\x9B\x11k\x10 diff --git a/test/sql/copy/parquet/parquet_glob.test b/test/sql/copy/parquet/parquet_glob.test index 2137aba8358a..eed0d449b379 100644 --- a/test/sql/copy/parquet/parquet_glob.test +++ b/test/sql/copy/parquet/parquet_glob.test @@ -8,60 +8,60 @@ statement ok PRAGMA enable_verification query II -select * from parquet_scan('data/parquet-testing/glob*/t?.parquet') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob*/t?.parquet') order by i ---- 1 a 2 b 3 c query II -select * from parquet_scan('data/parquet-testing/glob/t[0-9].parquet') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob/t[0-9].parquet') order by i ---- 1 a 2 b query II -select * from parquet_scan('data/parquet-testing/glob/*') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob/*') order by i ---- 1 a 2 b query II -select * from parquet_scan('data/parquet-testing/glob/*.parquet') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob/*.parquet') order by i ---- 1 a 2 b query II -select * from parquet_scan('data/parquet-testing/g*/*.parquet') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/g*/*.parquet') order by i ---- 1 a 2 b 3 c query II -select * from parquet_scan('data/parquet-testing/g*/t1.parquet') order by i +select * from parquet_scan('{DATA_DIR}/parquet-testing/g*/t1.parquet') order by i ---- 1 a 3 c # abs path query II -select * from parquet_scan('__WORKING_DIRECTORY__/data/parquet-testing/g*/t1.parquet') order by i +select * from parquet_scan('{WORKING_DIR}/data/parquet-testing/g*/t1.parquet') order by i ---- 1 a 3 c # forward slashes query II -select * from parquet_scan('data\parquet-testing\g*\t1.parquet') order by i +select * from parquet_scan('{DATA_DIR}\parquet-testing\g*\t1.parquet') order by i ---- 1 a 3 c # Double partial matches query II rowsort -FROM parquet_scan('data/parquet-testing/glob3/*/dir/*.parquet'); +FROM parquet_scan('{DATA_DIR}/parquet-testing/glob3/*/dir/*.parquet'); ---- 1 a 3 c @@ -72,7 +72,7 @@ select count(*) from parquet_scan('') # schema mismatch in parquet glob statement error -select * from parquet_scan('data/parquet-testing/*.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/*.parquet') ---- # parquet glob with COPY FROM @@ -80,7 +80,7 @@ statement ok CREATE TABLE vals (i INTEGER, j BLOB) statement ok -COPY vals FROM 'data/parquet-testing/glob/t?.parquet' (FORMAT PARQUET); +COPY vals FROM '{DATA_DIR}/parquet-testing/glob/t?.parquet' (FORMAT PARQUET); query II SELECT * FROM vals ORDER BY 1 diff --git a/test/sql/copy/parquet/parquet_go.test b/test/sql/copy/parquet/parquet_go.test index 3b40d86e9327..a836cbccefb2 100644 --- a/test/sql/copy/parquet/parquet_go.test +++ b/test/sql/copy/parquet/parquet_go.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query II -SELECT * FROM 'data/parquet-testing/parquet_go.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/parquet_go.parquet' ---- John Hello World John Hello World diff --git a/test/sql/copy/parquet/parquet_hive.test b/test/sql/copy/parquet/parquet_hive.test index ace06995e42f..09d982227061 100644 --- a/test/sql/copy/parquet/parquet_hive.test +++ b/test/sql/copy/parquet/parquet_hive.test @@ -6,137 +6,137 @@ require parquet # test parsing hive partitioning scheme query IIII -select id, value, part, date from parquet_scan('data/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1) order by id +select id, value, part, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1) order by id ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 # As long as the names match, we don't really mind since everything is a string anyway query IIII -select id, value, part, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) order by id +select id, value, part, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) order by id ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 # Filter should work too query II -select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01'; +select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01'; ---- 2 2013-01-01 query II -select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01'; +select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01'; ---- 1 2012-01-01 query II -select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2018-01-01'; +select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2018-01-01'; ---- query IIII -select id, value, part, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where part='a' OR part='b' order by id; +select id, value, part, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where part='a' OR part='b' order by id; ---- 1 value1 a 2012-01-01 2 value2 b 2013-01-01 query II -select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01' and id = 2; +select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01' and id = 2; ---- 2 2013-01-01 query II -select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01' and id = 1; +select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2013-01-01' and id = 1; ---- # This query should trigger the file skipping mechanism, which prevents reading metadata for files that are not scanned query III -select id, value, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01' and id = 1; +select id, value, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01' and id = 1; ---- 1 value1 2012-01-01 query III -select id, value, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01' or id <= 2 order by id; +select id, value, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where date = '2012-01-01' or id <= 2 order by id; ---- 1 value1 2012-01-01 2 value2 2013-01-01 # If the key names don't add up, there's nothing we can do statement error -select * from parquet_scan('data/parquet-testing/hive-partitioning/mismatching_names/*/*/test.parquet', HIVE_PARTITIONING=1) +select * from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/mismatching_names/*/*/test.parquet', HIVE_PARTITIONING=1) ---- Hive partition mismatch statement error -select * from parquet_scan('data/parquet-testing/hive-partitioning/mismatching_count/*/*/test.parquet', HIVE_PARTITIONING=1) WHERE part=b +select * from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/mismatching_count/*/*/test.parquet', HIVE_PARTITIONING=1) WHERE part=b ---- Hive partition mismatch statement error -select * from parquet_scan('data/parquet-testing/hive-partitioning/mismatching_names/*/*/test.parquet', HIVE_PARTITIONING=1, UNION_BY_NAME=1) +select * from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/mismatching_names/*/*/test.parquet', HIVE_PARTITIONING=1, UNION_BY_NAME=1) ---- Hive partition mismatch # Verify that filters are pushed down into the parquet scan (Only file with the filter are read) query II -EXPLAIN select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2013-01-01'; +EXPLAIN select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2013-01-01'; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(date = '2013.*-01.*-01'\).* query II -EXPLAIN select id, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2018-01-01'; +EXPLAIN select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2018-01-01'; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(date = '2018.*-01.*-01'\).* # No Parquet Scan Filters should be applied here query II -EXPLAIN select id, value, part, date from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where part='a' OR part='b' order by id; +EXPLAIN select id, value, part, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where part='a' OR part='b' order by id; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.* query II -EXPLAIN select id, date from parquet_scan('data/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2012-01-01' and id < 10; +EXPLAIN select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2012-01-01' and id < 10; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(date = '2012.*-01.*-01'\).* query II -EXPLAIN select id, date from parquet_scan('data/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2013-01-01' and id < 10; +EXPLAIN select id, date from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/simple/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where date = '2013-01-01' and id < 10; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(date = '2013.*-01.*-01'\).* # Complex filter filtering first file query IIII -select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where concat(date_cast::VARCHAR, part) == '2013-01-01b'; +select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where concat(date_cast::VARCHAR, part) == '2013-01-01b'; ---- 2 value2 b 2013-01-01 # Complex filter filtering first file, filter should be pruned completely query II -explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where concat(date_cast::VARCHAR, part) == '2013-01-01b'; +explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where concat(date_cast::VARCHAR, part) == '2013-01-01b'; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(concat\(CAST.*\(CAST.*\(date AS.*DATE\) AS VARCHAR\), part\).*= '2013-01-01b'\).* # Complex filter filtering second file query IIII -select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where concat(date_cast::VARCHAR, part) == '2012-01-01a'; +select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1) where concat(date_cast::VARCHAR, part) == '2012-01-01a'; ---- 1 value1 a 2012-01-01 # Complex filter filtering second file, filter should be pruned completely query II -explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where concat(date_cast::VARCHAR, part) == '2012-01-01a'; +explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where concat(date_cast::VARCHAR, part) == '2012-01-01a'; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(concat\(CAST.*\(CAST.*\(date AS.*DATE\) AS VARCHAR\), part\).*= '2012-01-01a'\).* # Currently, complex fiters combining hive columns and regular columns, can prevent filter pushdown for some situations # TODO: we want to support filter pushdown here too query II -explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where (date_cast=CAST('2013-01-01' as DATE) AND (value='value1' OR concat(date_cast::VARCHAR, part) == '2013-01-01b')); +explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where (date_cast=CAST('2013-01-01' as DATE) AND (value='value1' OR concat(date_cast::VARCHAR, part) == '2013-01-01b')); ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(CAST\(date AS DATE\) =.*'2013.*-01-01'::DATE\).* # Idem query II -explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('data/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where (date_cast=CAST('2012-01-01' as DATE) AND (value='value2' OR concat(date_cast::VARCHAR, part) == '2012-01-01a')); +explain select id, value, part, CAST(date AS DATE) as date_cast from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/different_order/*/*/test.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where (date_cast=CAST('2012-01-01' as DATE) AND (value='value2' OR concat(date_cast::VARCHAR, part) == '2012-01-01a')); ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(CAST\(date AS DATE\) =.*'2012.*-01-01'::DATE\).* @@ -144,22 +144,22 @@ physical_plan :.*PARQUET_SCAN.*File Filters:.*\(CAST\(date AS DATE\) =.*' # Without hive partitioning we just read the files, note the mismatch here between the hive partition in the filename and the col in the file query III -SELECT a, b, replace(filename, '\', '/') filename FROM parquet_scan('data/parquet-testing/hive-partitioning/hive_col_also_in_file/*/test.parquet', HIVE_PARTITIONING=0, FILENAME=1) order by filename; +SELECT a, b, parse_path(filename)[-3:] filename FROM parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/hive_col_also_in_file/*/test.parquet', HIVE_PARTITIONING=0, FILENAME=1) order by filename; ---- -1 2 data/parquet-testing/hive-partitioning/hive_col_also_in_file/a=5/test.parquet -3 4 data/parquet-testing/hive-partitioning/hive_col_also_in_file/a=6/test.parquet +1 2 [hive_col_also_in_file, 'a=5', test.parquet] +3 4 [hive_col_also_in_file, 'a=6', test.parquet] # Hive col from path overrides col in file query III -SELECT a, b, replace(filename, '\', '/') filename FROM parquet_scan('data/parquet-testing/hive-partitioning/hive_col_also_in_file/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) order by filename; +SELECT a, b, parse_path(filename)[-3:] filename FROM parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/hive_col_also_in_file/*/test.parquet', HIVE_PARTITIONING=1, FILENAME=1) order by filename; ---- -5 2 data/parquet-testing/hive-partitioning/hive_col_also_in_file/a=5/test.parquet -6 4 data/parquet-testing/hive-partitioning/hive_col_also_in_file/a=6/test.parquet +5 2 [hive_col_also_in_file, 'a=5', test.parquet] +6 4 [hive_col_also_in_file, 'a=6', test.parquet] # Test handling missing files query IIII select id, value, part, date -from parquet_scan('data/parquet-testing/hive-partitioning/missing/*/*/test.parquet', HIVE_PARTITIONING=1) +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/missing/*/*/test.parquet', HIVE_PARTITIONING=1) order by id ---- 3 value3 c 2014-01-01 @@ -178,19 +178,19 @@ insert into t1 (select range, ${i}*10, ${i}*100 from range(0,10)); endloop statement ok -COPY (SELECT * FROM t1) TO '__TEST_DIR__/hive_filters' (FORMAT PARQUET, PARTITION_BY c); +COPY (SELECT * FROM t1) TO '{TEMP_DIR}/hive_filters' (FORMAT PARQUET, PARTITION_BY c); statement ok -COPY (SELECT * FROM t1) TO '__TEST_DIR__/hive_filters_2' (FORMAT PARQUET, PARTITION_BY (c, b)); +COPY (SELECT * FROM t1) TO '{TEMP_DIR}/hive_filters_2' (FORMAT PARQUET, PARTITION_BY (c, b)); # There should be Table Filters (id < 50) and regular filters query II -EXPLAIN select a from parquet_scan('__TEST_DIR__/hive_filters/*/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where c::INT=500 and a::INT < 4; +EXPLAIN select a from parquet_scan('{TEMP_DIR}/hive_filters/*/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where c::INT=500 and a::INT < 4; ---- physical_plan :.*PARQUET_SCAN.*Filters:.*a<4.*File Filters:.*\(CAST\(c AS.*INTEGER\) = 500\).* # unsatisfiable file filters also show up query II -EXPLAIN select a from parquet_scan('__TEST_DIR__/hive_filters_2/*/*/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where c::INT > 500 and c::INT < 500; +EXPLAIN select a from parquet_scan('{TEMP_DIR}/hive_filters_2/*/*/*.parquet', HIVE_PARTITIONING=1, HIVE_TYPES_AUTOCAST=0) where c::INT > 500 and c::INT < 500; ---- physical_plan :.*PARQUET_SCAN.*File Filters:.*\(CAST\(c AS.*INTEGER\).*BETWEEN.*500 AND 500\).* diff --git a/test/sql/copy/parquet/parquet_hive_empty.test b/test/sql/copy/parquet/parquet_hive_empty.test index 4e7f0d2fc999..6199e547a8e9 100644 --- a/test/sql/copy/parquet/parquet_hive_empty.test +++ b/test/sql/copy/parquet/parquet_hive_empty.test @@ -6,7 +6,7 @@ require parquet query II select * -from parquet_scan('data/parquet-testing/hive-partitioning/empty_string/*/*.parquet') +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/empty_string/*/*.parquet') ORDER BY ALL ---- a a @@ -16,7 +16,7 @@ c NULL # filter on hive partitioning with NULL values query II select * -from parquet_scan('data/parquet-testing/hive-partitioning/empty_string/*/*.parquet') +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/empty_string/*/*.parquet') WHERE key IS NULL ---- c NULL @@ -24,14 +24,14 @@ c NULL query II select * -from parquet_scan('data/parquet-testing/hive-partitioning/empty_string/*/*.parquet') +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/empty_string/*/*.parquet') WHERE key='a' ---- a a query II select * -from parquet_scan('data/parquet-testing/hive-partitioning/empty_string/*/*.parquet') +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/empty_string/*/*.parquet') WHERE key='' ---- b (empty) diff --git a/test/sql/copy/parquet/parquet_list.test b/test/sql/copy/parquet/parquet_list.test index eea5fd9e5613..c9763221b4c4 100644 --- a/test/sql/copy/parquet/parquet_list.test +++ b/test/sql/copy/parquet/parquet_list.test @@ -9,25 +9,25 @@ PRAGMA enable_verification # standard list syntax query I -select count(*) from parquet_scan(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t2.parquet']); +select count(*) from parquet_scan(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet']); ---- 2 # glob inside a list query I -select count(*) from parquet_scan(['data/parquet-testing/glob/*.parquet', 'data/parquet-testing/glob/t2.parquet']); +select count(*) from parquet_scan(['{DATA_DIR}/parquet-testing/glob/*.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet']); ---- 3 # read the same file multiple times query I -select count(*) from parquet_scan(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t1.parquet']); +select count(*) from parquet_scan(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t1.parquet']); ---- 5 # file does not exist statement error -select count(*) from parquet_scan(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t2.parquet', 'this/file/doesnot/exist/hopefully.parquet']); +select count(*) from parquet_scan(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet', 'this/file/doesnot/exist/hopefully.parquet']); ---- # empty list diff --git a/test/sql/copy/parquet/parquet_metadata.test b/test/sql/copy/parquet/parquet_metadata.test index 2c21b0851296..97f2f516d60c 100644 --- a/test/sql/copy/parquet/parquet_metadata.test +++ b/test/sql/copy/parquet/parquet_metadata.test @@ -5,40 +5,40 @@ require parquet statement ok -SELECT * FROM parquet_metadata('data/parquet-testing/lineitem-top10000.gzip.parquet'); +SELECT * FROM parquet_metadata('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet'); statement ok -SELECT * FROM parquet_schema('data/parquet-testing/lineitem-top10000.gzip.parquet'); +SELECT * FROM parquet_schema('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet'); query I -SELECT COUNT(*) > 0 FROM parquet_metadata('data/parquet-testing/lineitem-top10000.gzip.parquet'); +SELECT COUNT(*) > 0 FROM parquet_metadata('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet'); ---- true query I -SELECT COUNT(*) > 0 FROM parquet_schema('data/parquet-testing/lineitem-top10000.gzip.parquet'); +SELECT COUNT(*) > 0 FROM parquet_schema('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet'); ---- true statement ok -select * from parquet_schema('data/parquet-testing/decimal/decimal_dc.parquet'); +select * from parquet_schema('{DATA_DIR}/parquet-testing/decimal/decimal_dc.parquet'); statement ok -select * from parquet_schema('data/parquet-testing/decimal/int64_decimal.parquet'); +select * from parquet_schema('{DATA_DIR}/parquet-testing/decimal/int64_decimal.parquet'); # with globs statement ok -select * from parquet_metadata('data/parquet-testing/glob/*.parquet'); +select * from parquet_metadata('{DATA_DIR}/parquet-testing/glob/*.parquet'); statement ok -select * from parquet_schema('data/parquet-testing/glob/*.parquet'); +select * from parquet_schema('{DATA_DIR}/parquet-testing/glob/*.parquet'); # list parameters statement ok -select * from parquet_schema(['data/parquet-testing/decimal/int64_decimal.parquet', 'data/parquet-testing/decimal/int64_decimal.parquet']); +select * from parquet_schema(['{DATA_DIR}/parquet-testing/decimal/int64_decimal.parquet', '{DATA_DIR}/parquet-testing/decimal/int64_decimal.parquet']); query III -SELECT name, type, duckdb_type FROM parquet_schema('data/parquet-testing/lineitem-top10000.gzip.parquet') WHERE type IS NOT NULL; +SELECT name, type, duckdb_type FROM parquet_schema('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet') WHERE type IS NOT NULL; ---- l_orderkey INT64 BIGINT l_partkey INT64 BIGINT diff --git a/test/sql/copy/parquet/parquet_metadata_cache.test b/test/sql/copy/parquet/parquet_metadata_cache.test index 6945ec3a252b..d12b050aea00 100644 --- a/test/sql/copy/parquet/parquet_metadata_cache.test +++ b/test/sql/copy/parquet/parquet_metadata_cache.test @@ -9,12 +9,12 @@ SET parquet_metadata_cache=true; # test the cached file query II -select * from parquet_scan('data/parquet-testing/cache/cache1.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/cache/cache1.parquet') ---- 1 hello query II -select * from parquet_scan('data/parquet-testing/cache/cache1.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/cache/cache1.parquet') ---- 1 hello @@ -24,24 +24,24 @@ require vector_size 64 # copy cache1.parquet to cached.parquet statement ok COPY ( - SELECT * FROM parquet_scan('data/parquet-testing/cache/cache1.parquet') + SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/cache/cache1.parquet') ) -TO '__TEST_DIR__/cached.parquet' (FORMAT 'parquet') +TO '{TEMP_DIR}/cached.parquet' (FORMAT 'parquet') query II -select * from parquet_scan('__TEST_DIR__/cached.parquet') +select * from parquet_scan('{TEMP_DIR}/cached.parquet') ---- 1 hello # copy cache2.parquet to cached.parquet statement ok COPY ( - SELECT * FROM parquet_scan('data/parquet-testing/cache/cache2.parquet') + SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/cache/cache2.parquet') ) -TO '__TEST_DIR__/cached.parquet' (FORMAT 'parquet') +TO '{TEMP_DIR}/cached.parquet' (FORMAT 'parquet') query II -select * from parquet_scan('__TEST_DIR__/cached.parquet') +select * from parquet_scan('{TEMP_DIR}/cached.parquet') ---- 0 10 1 20 @@ -50,17 +50,17 @@ select * from parquet_scan('__TEST_DIR__/cached.parquet') # test two files with the same name on different directories query II -select * from parquet_scan('data/parquet-testing/glob/t1.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob/t1.parquet') ---- 1 a query II -select * from parquet_scan('data/parquet-testing/glob2/t1.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/glob2/t1.parquet') ---- 3 c # test using * query I -select count(*) from parquet_scan('data/parquet-testing/glob/*.parquet') +select count(*) from parquet_scan('{DATA_DIR}/parquet-testing/glob/*.parquet') ---- 2 diff --git a/test/sql/copy/parquet/parquet_nan.test b/test/sql/copy/parquet/parquet_nan.test index 882114d3d0e4..8a9b5925e2ed 100644 --- a/test/sql/copy/parquet/parquet_nan.test +++ b/test/sql/copy/parquet/parquet_nan.test @@ -5,28 +5,28 @@ require parquet query TTT -select * from parquet_scan('data/parquet-testing/nan-float.parquet') order by 1 +select * from parquet_scan('{DATA_DIR}/parquet-testing/nan-float.parquet') order by 1 ---- -1 foo 1 2.5 baz 1 inf bar 0 query II -select * from parquet_scan('data/parquet-testing/arrow_nan.parquet', can_have_nan := true) where f='nan'; +select * from parquet_scan('{DATA_DIR}/parquet-testing/arrow_nan.parquet', can_have_nan := true) where f='nan'; ---- nan nan query II -select * from parquet_scan('data/parquet-testing/arrow_nan.parquet', can_have_nan := true) where f>10; +select * from parquet_scan('{DATA_DIR}/parquet-testing/arrow_nan.parquet', can_have_nan := true) where f>10; ---- nan nan query II -select * from parquet_scan('data/parquet-testing/arrow_nan.parquet', can_have_nan := true) where d='nan'; +select * from parquet_scan('{DATA_DIR}/parquet-testing/arrow_nan.parquet', can_have_nan := true) where d='nan'; ---- nan nan query II -select * from parquet_scan('data/parquet-testing/arrow_nan.parquet', can_have_nan := true) where d>10; +select * from parquet_scan('{DATA_DIR}/parquet-testing/arrow_nan.parquet', can_have_nan := true) where d>10; ---- nan nan diff --git a/test/sql/copy/parquet/parquet_nullbyte.test b/test/sql/copy/parquet/parquet_nullbyte.test index 2c874d018fee..30d97354f643 100644 --- a/test/sql/copy/parquet/parquet_nullbyte.test +++ b/test/sql/copy/parquet/parquet_nullbyte.test @@ -8,12 +8,12 @@ statement ok PRAGMA enable_verification query II -select * from parquet_scan('data/parquet-testing/nullbyte.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/nullbyte.parquet') ---- 42 hello\0world query II -select * from parquet_scan('data/parquet-testing/nullbyte_multiple.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/nullbyte_multiple.parquet') ---- 1 hello\0world 2 this is a long\0string diff --git a/test/sql/copy/parquet/parquet_pandas_date.test b/test/sql/copy/parquet/parquet_pandas_date.test index 25f33d98c430..44e372743a9d 100644 --- a/test/sql/copy/parquet/parquet_pandas_date.test +++ b/test/sql/copy/parquet/parquet_pandas_date.test @@ -5,7 +5,7 @@ require parquet query T -select * from parquet_scan('data/parquet-testing/pandas-date.parquet') order by 1 +select * from parquet_scan('{DATA_DIR}/parquet-testing/pandas-date.parquet') order by 1 ---- 1921-12-24 2021-01-12 diff --git a/test/sql/copy/parquet/parquet_row_number.test b/test/sql/copy/parquet/parquet_row_number.test index 2d1c122886aa..dc345476a204 100644 --- a/test/sql/copy/parquet/parquet_row_number.test +++ b/test/sql/copy/parquet/parquet_row_number.test @@ -9,28 +9,28 @@ PRAGMA enable_verification require parquet query IIII -select min(file_row_number), max(file_row_number), avg(file_row_number), count(*) from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1); +select min(file_row_number), max(file_row_number), avg(file_row_number), count(*) from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1); ---- 0 9999 4999.5 10000 query I -select l_orderkey from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number=42; +select l_orderkey from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number=42; ---- 35 query III -select sum(row_number_lag), min(row_number_lag), max(row_number_lag) from (select file_row_number - LAG(file_row_number) OVER (ORDER BY file_row_number) as row_number_lag from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1)); +select sum(row_number_lag), min(row_number_lag), max(row_number_lag) from (select file_row_number - LAG(file_row_number) OVER (ORDER BY file_row_number) as row_number_lag from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1)); ---- 9999 1 1 query IIII -select min(file_row_number), max(file_row_number), avg(file_row_number), count(*) from parquet_scan('data/parquet-testing/manyrowgroups.parquet', file_row_number=1); +select min(file_row_number), max(file_row_number), avg(file_row_number), count(*) from parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups.parquet', file_row_number=1); ---- 0 1000 500.0 1001 query III -select sum(row_number_lag), min(row_number_lag), max(row_number_lag) from (select file_row_number - LAG(file_row_number) OVER (ORDER BY file_row_number) as row_number_lag from parquet_scan('data/parquet-testing/manyrowgroups.parquet', file_row_number=1)); +select sum(row_number_lag), min(row_number_lag), max(row_number_lag) from (select file_row_number - LAG(file_row_number) OVER (ORDER BY file_row_number) as row_number_lag from parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups.parquet', file_row_number=1)); ---- 1000 1 1 @@ -41,7 +41,7 @@ PRAGMA disable_verification # lets eliminate some row groups using parquet stats and see what happens query IIII -select row_group_id, row_group_num_rows, stats_min, stats_max from parquet_metadata('data/parquet-testing/file_row_number.parquet') order by row_group_id +select row_group_id, row_group_num_rows, stats_min, stats_max from parquet_metadata('{DATA_DIR}/parquet-testing/file_row_number.parquet') order by row_group_id ---- 0 1000 0 999 1 1000 1000 1999 @@ -55,19 +55,19 @@ select row_group_id, row_group_num_rows, stats_min, stats_max from parquet_metad 9 1000 9000 9999 query I -select stats(seq) from parquet_scan('data/parquet-testing/file_row_number.parquet') limit 1 +select stats(seq) from parquet_scan('{DATA_DIR}/parquet-testing/file_row_number.parquet') limit 1 ---- [Min: 0, Max: 9999][Has Null: false, Has No Null: true] query III -select min(file_row_number), max(file_row_number), count(*) from parquet_scan('data/parquet-testing/file_row_number.parquet', file_row_number=1) where seq > 6500; +select min(file_row_number), max(file_row_number), count(*) from parquet_scan('{DATA_DIR}/parquet-testing/file_row_number.parquet', file_row_number=1) where seq > 6500; ---- 6501 9999 3499 # stats tests query I -select first(stats(file_row_number)) from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1); +select first(stats(file_row_number)) from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1); ---- [Min: 0, Max: 10000][Has Null: false, Has No Null: true] @@ -75,16 +75,16 @@ statement ok PRAGMA explain_output = OPTIMIZED_ONLY; query II nosort empty_result -explain select 1 from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number > 10000; +explain select 1 from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number > 10000; ---- logical_opt :.*EMPTY_RESULT.* query II nosort empty_result -explain select 1 from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number < 0; +explain select 1 from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number < 0; ---- logical_opt :.*EMPTY_RESULT.* query II nosort empty_result -explain select 1 from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number is null; +explain select 1 from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet', file_row_number=1) where file_row_number is null; ---- logical_opt :.*EMPTY_RESULT.* diff --git a/test/sql/copy/parquet/parquet_silly.test b/test/sql/copy/parquet/parquet_silly.test index f01a1e1d097c..7595c8327411 100644 --- a/test/sql/copy/parquet/parquet_silly.test +++ b/test/sql/copy/parquet/parquet_silly.test @@ -5,7 +5,7 @@ require parquet query TTT -select "önë", "C1", "🦆" from parquet_scan('data/parquet-testing/silly-names.parquet') order by 1 +select "önë", "C1", "🦆" from parquet_scan('{DATA_DIR}/parquet-testing/silly-names.parquet') order by 1 ---- 1 foo 1 2 bar 0 diff --git a/test/sql/copy/parquet/parquet_stats.test b/test/sql/copy/parquet/parquet_stats.test index a73a66482af3..3bd6d9b0c3df 100644 --- a/test/sql/copy/parquet/parquet_stats.test +++ b/test/sql/copy/parquet/parquet_stats.test @@ -6,13 +6,13 @@ require parquet # boolean values query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/boolean_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/boolean_stats.parquet'); ---- false true false true # signed numbers query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/signed_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/signed_stats.parquet'); ---- -128 127 -128 127 -32768 32767 -32768 32767 @@ -20,14 +20,14 @@ select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metad -9223372036854775808 9223372036854775807 -9223372036854775808 9223372036854775807 query IIII -select * from 'data/parquet-testing/signed_stats.parquet'; +select * from '{DATA_DIR}/parquet-testing/signed_stats.parquet'; ---- -128 -32768 -2147483648 -9223372036854775808 127 32767 2147483647 9223372036854775807 # unsigned numbers query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/unsigned_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/unsigned_stats.parquet'); ---- NULL NULL 0 255 NULL NULL 0 65535 @@ -35,14 +35,14 @@ NULL NULL 0 65535 NULL NULL 0 18446744073709551615 query IIII -select * from 'data/parquet-testing/unsigned_stats.parquet'; +select * from '{DATA_DIR}/parquet-testing/unsigned_stats.parquet'; ---- 0 0 0 0 255 65535 4294967295 18446744073709551615 # dates/times/timestamps query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/date_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/date_stats.parquet'); ---- 1900-01-01 2030-12-31 1900-01-01 2030-12-31 00:00:00+00 23:59:59+00 00:00:00+00 23:59:59+00 @@ -52,33 +52,33 @@ select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metad 1900-01-01 00:00:00 2030-12-31 23:59:59 1900-01-01 00:00:00 2030-12-31 23:59:59 query IIIIII -select * from 'data/parquet-testing/date_stats.parquet'; +select * from '{DATA_DIR}/parquet-testing/date_stats.parquet'; ---- 1900-01-01 00:00:00+00 1990-01-01 00:00:00 1900-01-01 00:00:00 1900-01-01 00:00:00 1900-01-01 00:00:00 2030-12-31 23:59:59+00 2030-12-31 23:59:59 2030-12-31 23:59:59 2030-12-31 23:59:59 2030-12-31 23:59:59 # varchar/blob stats query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/varchar_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/varchar_stats.parquet'); ---- NULL NULL hello world world hello NULL NULL hello\x00world world\x00hello # should be the same as computing min/max over these columns query IIII -select min(str_val), max(str_val), min("hello\x00world"), max("hello\x00world") from 'data/parquet-testing/varchar_stats.parquet'; +select min(str_val), max(str_val), min("hello\x00world"), max("hello\x00world") from '{DATA_DIR}/parquet-testing/varchar_stats.parquet'; ---- hello world world hello hello\x00world world\x00hello query II -select * from 'data/parquet-testing/varchar_stats.parquet'; +select * from '{DATA_DIR}/parquet-testing/varchar_stats.parquet'; ---- hello world hello\x00world world hello world\x00hello # decimal stats query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/decimal_stats.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/decimal_stats.parquet'); ---- -999.9 999.9 -999.9 999.9 -999999.999 999999.999 -999999.999 999999.999 @@ -86,19 +86,19 @@ select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metad -999999999999999999999999999999999.99999 999999999999999999999999999999999.99999 -999999999999999999999999999999999.99999 999999999999999999999999999999999.99999 query IIII -select * from 'data/parquet-testing/decimal_stats.parquet'; +select * from '{DATA_DIR}/parquet-testing/decimal_stats.parquet'; ---- -999.9 -999999.999 -9999999999999.99999 -999999999999999999999999999999999.99999 999.9 999999.999 9999999999999.99999 999999999999999999999999999999999.99999 # int32 decimal stats query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/arrow/int32_decimal.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/arrow/int32_decimal.parquet'); ---- 1.00 24.00 NULL NULL query I -SELECT * FROM 'data/parquet-testing/arrow/int32_decimal.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/int32_decimal.parquet' ---- 1.00 2.00 @@ -127,12 +127,12 @@ SELECT * FROM 'data/parquet-testing/arrow/int32_decimal.parquet' # int64 decimal stats query IIII -select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('data/parquet-testing/arrow/int64_decimal.parquet'); +select stats_min, stats_max, stats_min_value, stats_max_value from parquet_metadata('{DATA_DIR}/parquet-testing/arrow/int64_decimal.parquet'); ---- 1.00 24.00 NULL NULL query I -SELECT * FROM 'data/parquet-testing/arrow/int64_decimal.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/int64_decimal.parquet' ---- 1.00 2.00 @@ -161,7 +161,7 @@ SELECT * FROM 'data/parquet-testing/arrow/int64_decimal.parquet' # data-types stats query IIII -SELECT stats_min, stats_max, stats_min_value, stats_max_value FROM parquet_metadata('data/parquet-testing/data-types.parquet') +SELECT stats_min, stats_max, stats_min_value, stats_max_value FROM parquet_metadata('{DATA_DIR}/parquet-testing/data-types.parquet') ---- -127 127 -127 127 -32767 32767 -32767 32767 @@ -177,7 +177,7 @@ false true false true 2020-01-10 2020-01-10 2020-01-10 2020-01-10 query IIIIIIIIIIII -SELECT * FROM 'data/parquet-testing/data-types.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/data-types.parquet' ---- NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 42 43 44 45 4.600000 4.700000 4.80 49 50 True 2019-11-26 20:11:42.501 2020-01-10 @@ -186,7 +186,7 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL # parquet stats for all parquet files -foreach parquet_file data/parquet-testing/manyrowgroups.parquet data/parquet-testing/map.parquet data/parquet-testing/arrow/int32_decimal.parquet data/parquet-testing/arrow/nonnullable.impala.parquet data/parquet-testing/bug687_nulls.parquet data/parquet-testing/bug1554.parquet data/parquet-testing/apkwan.parquet data/parquet-testing/arrow/nested_lists.snappy.parquet data/parquet-testing/arrow/nulls.snappy.parquet data/parquet-testing/nan-float.parquet data/parquet-testing/manyrowgroups2.parquet data/parquet-testing/struct.parquet data/parquet-testing/arrow/list_columns.parquet data/parquet-testing/timestamp-ms.parquet data/parquet-testing/arrow/alltypes_dictionary.parquet data/parquet-testing/arrow/binary.parquet data/parquet-testing/arrow/nation.dict-malformed.parquet data/parquet-testing/lineitem-top10000.gzip.parquet data/parquet-testing/arrow/nested_maps.snappy.parquet data/parquet-testing/arrow/dict-page-offset-zero.parquet data/parquet-testing/silly-names.parquet data/parquet-testing/zstd.parquet data/parquet-testing/bug1618_struct_strings.parquet data/parquet-testing/arrow/single_nan.parquet data/parquet-testing/arrow/int64_decimal.parquet data/parquet-testing/filter_bug1391.parquet data/parquet-testing/arrow/fixed_length_decimal_legacy.parquet data/parquet-testing/timestamp.parquet data/parquet-testing/arrow/fixed_length_decimal.parquet data/parquet-testing/leftdate3_192_loop_1.parquet data/parquet-testing/blob.parquet data/parquet-testing/bug1588.parquet data/parquet-testing/bug1589.parquet data/parquet-testing/arrow/alltypes_plain.parquet data/parquet-testing/arrow/repeated_no_annotation.parquet data/parquet-testing/data-types.parquet data/parquet-testing/unsigned.parquet data/parquet-testing/pandas-date.parquet data/parquet-testing/date.parquet data/parquet-testing/arrow/nullable.impala.parquet data/parquet-testing/fixed.parquet data/parquet-testing/arrow/alltypes_plain.snappy.parquet data/parquet-testing/decimal/int32_decimal.parquet data/parquet-testing/decimal/pandas_decimal.parquet data/parquet-testing/decimal/decimal_dc.parquet data/parquet-testing/decimal/int64_decimal.parquet data/parquet-testing/decimal/fixed_length_decimal_legacy.parquet data/parquet-testing/decimal/fixed_length_decimal.parquet data/parquet-testing/glob2/t1.parquet data/parquet-testing/cache/cache1.parquet data/parquet-testing/cache/cache2.parquet data/parquet-testing/glob/t2.parquet data/parquet-testing/glob/t1.parquet data/parquet-testing/bug2557.parquet +foreach parquet_file {DATA_DIR}/parquet-testing/manyrowgroups.parquet {DATA_DIR}/parquet-testing/map.parquet {DATA_DIR}/parquet-testing/arrow/int32_decimal.parquet {DATA_DIR}/parquet-testing/arrow/nonnullable.impala.parquet {DATA_DIR}/parquet-testing/bug687_nulls.parquet {DATA_DIR}/parquet-testing/bug1554.parquet {DATA_DIR}/parquet-testing/apkwan.parquet {DATA_DIR}/parquet-testing/arrow/nested_lists.snappy.parquet {DATA_DIR}/parquet-testing/arrow/nulls.snappy.parquet {DATA_DIR}/parquet-testing/nan-float.parquet {DATA_DIR}/parquet-testing/manyrowgroups2.parquet {DATA_DIR}/parquet-testing/struct.parquet {DATA_DIR}/parquet-testing/arrow/list_columns.parquet {DATA_DIR}/parquet-testing/timestamp-ms.parquet {DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet {DATA_DIR}/parquet-testing/arrow/binary.parquet {DATA_DIR}/parquet-testing/arrow/nation.dict-malformed.parquet {DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet {DATA_DIR}/parquet-testing/arrow/nested_maps.snappy.parquet {DATA_DIR}/parquet-testing/arrow/dict-page-offset-zero.parquet {DATA_DIR}/parquet-testing/silly-names.parquet {DATA_DIR}/parquet-testing/zstd.parquet {DATA_DIR}/parquet-testing/bug1618_struct_strings.parquet {DATA_DIR}/parquet-testing/arrow/single_nan.parquet {DATA_DIR}/parquet-testing/arrow/int64_decimal.parquet {DATA_DIR}/parquet-testing/filter_bug1391.parquet {DATA_DIR}/parquet-testing/arrow/fixed_length_decimal_legacy.parquet {DATA_DIR}/parquet-testing/timestamp.parquet {DATA_DIR}/parquet-testing/arrow/fixed_length_decimal.parquet {DATA_DIR}/parquet-testing/leftdate3_192_loop_1.parquet {DATA_DIR}/parquet-testing/blob.parquet {DATA_DIR}/parquet-testing/bug1588.parquet {DATA_DIR}/parquet-testing/bug1589.parquet {DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet {DATA_DIR}/parquet-testing/arrow/repeated_no_annotation.parquet {DATA_DIR}/parquet-testing/data-types.parquet {DATA_DIR}/parquet-testing/unsigned.parquet {DATA_DIR}/parquet-testing/pandas-date.parquet {DATA_DIR}/parquet-testing/date.parquet {DATA_DIR}/parquet-testing/arrow/nullable.impala.parquet {DATA_DIR}/parquet-testing/fixed.parquet {DATA_DIR}/parquet-testing/arrow/alltypes_plain.snappy.parquet {DATA_DIR}/parquet-testing/decimal/int32_decimal.parquet {DATA_DIR}/parquet-testing/decimal/pandas_decimal.parquet {DATA_DIR}/parquet-testing/decimal/decimal_dc.parquet {DATA_DIR}/parquet-testing/decimal/int64_decimal.parquet {DATA_DIR}/parquet-testing/decimal/fixed_length_decimal_legacy.parquet {DATA_DIR}/parquet-testing/decimal/fixed_length_decimal.parquet {DATA_DIR}/parquet-testing/glob2/t1.parquet {DATA_DIR}/parquet-testing/cache/cache1.parquet {DATA_DIR}/parquet-testing/cache/cache2.parquet {DATA_DIR}/parquet-testing/glob/t2.parquet {DATA_DIR}/parquet-testing/glob/t1.parquet {DATA_DIR}/parquet-testing/bug2557.parquet statement ok select * from parquet_metadata('${parquet_file}'); @@ -195,25 +195,25 @@ endloop # internal issue 2037 statement ok -copy (select '' i) to '__TEST_DIR__/test.parquet'; +copy (select '' i) to '{TEMP_DIR}/test.parquet'; query I -select i is null c0 from '__TEST_DIR__/test.parquet'; +select i is null c0 from '{TEMP_DIR}/test.parquet'; ---- false query II -select stats_min_value is null c0, stats_max_value is null c1 from parquet_metadata('__TEST_DIR__/test.parquet'); +select stats_min_value is null c0, stats_max_value is null c1 from parquet_metadata('{TEMP_DIR}/test.parquet'); ---- false false query II -select row_group_bytes, row_group_compressed_bytes from parquet_metadata('__TEST_DIR__/test.parquet'); +select row_group_bytes, row_group_compressed_bytes from parquet_metadata('{TEMP_DIR}/test.parquet'); ---- 27 1 query II -select row_group_bytes, row_group_compressed_bytes from parquet_metadata('data/parquet-testing/varchar_stats.parquet'); +select row_group_bytes, row_group_compressed_bytes from parquet_metadata('{DATA_DIR}/parquet-testing/varchar_stats.parquet'); ---- 200 1 200 1 diff --git a/test/sql/copy/parquet/parquet_virtual_columns.test b/test/sql/copy/parquet/parquet_virtual_columns.test index 5a77fa5d5cc3..db1b02b8cd1d 100644 --- a/test/sql/copy/parquet/parquet_virtual_columns.test +++ b/test/sql/copy/parquet/parquet_virtual_columns.test @@ -6,12 +6,12 @@ require parquet # file_index query I -SELECT file_index FROM 'data/parquet-testing/glob/t1.parquet' +SELECT file_index FROM '{DATA_DIR}/parquet-testing/glob/t1.parquet' ---- 0 query III -SELECT file_index, i, j FROM read_parquet(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t2.parquet', 'data/parquet-testing/glob2/t1.parquet']) +SELECT file_index, i, j FROM read_parquet(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet', '{DATA_DIR}/parquet-testing/glob2/t1.parquet']) ---- 0 1 a 1 2 b @@ -19,25 +19,25 @@ SELECT file_index, i, j FROM read_parquet(['data/parquet-testing/glob/t1.parquet query III SELECT file_index, i, j -FROM read_parquet(['data/parquet-testing/glob/t1.parquet', 'data/parquet-testing/glob/t2.parquet', 'data/parquet-testing/glob2/t1.parquet']) +FROM read_parquet(['{DATA_DIR}/parquet-testing/glob/t1.parquet', '{DATA_DIR}/parquet-testing/glob/t2.parquet', '{DATA_DIR}/parquet-testing/glob2/t1.parquet']) WHERE file_index=1 ---- 1 2 b # Filename without the filename option statement ok -select filename from 'data/parquet-testing/glob/t1.parquet' +select filename from '{DATA_DIR}/parquet-testing/glob/t1.parquet' query III -select i, j, replace(filename, '\', '/') from 'data/parquet-testing/glob*/t?.parquet' order by i; +select i, j, parse_path(filename)[-2:] from '{DATA_DIR}/parquet-testing/glob*/t?.parquet' order by i; ---- -1 a data/parquet-testing/glob/t1.parquet -2 b data/parquet-testing/glob/t2.parquet -3 c data/parquet-testing/glob2/t1.parquet +1 a [glob, t1.parquet] +2 b [glob, t2.parquet] +3 c [glob2, t1.parquet] # not projected in * query II -select * from 'data/parquet-testing/glob*/t?.parquet' order by i; +select * from '{DATA_DIR}/parquet-testing/glob*/t?.parquet' order by i; ---- 1 a 2 b @@ -47,6 +47,6 @@ require notwindows # filename in filter query III -select i, j, replace(filename, '\', '/') from 'data/parquet-testing/glob*/t?.parquet' where filename='data/parquet-testing/glob/t1.parquet' +select i, j, parse_path(filename)[-2:] from '{DATA_DIR}/parquet-testing/glob*/t?.parquet' where filename='{DATA_DIR}/parquet-testing/glob/t1.parquet' ---- -1 a data/parquet-testing/glob/t1.parquet +1 a [glob, t1.parquet] diff --git a/test/sql/copy/parquet/parquet_virtual_file_row_number.test b/test/sql/copy/parquet/parquet_virtual_file_row_number.test index 830316c86407..5c572781159a 100644 --- a/test/sql/copy/parquet/parquet_virtual_file_row_number.test +++ b/test/sql/copy/parquet/parquet_virtual_file_row_number.test @@ -6,18 +6,18 @@ require parquet # File row number without the file_row_number option query I -select file_row_number from 'data/parquet-testing/glob/t1.parquet' +select file_row_number from '{DATA_DIR}/parquet-testing/glob/t1.parquet' ---- 0 query I -select file_row_number from 'data/parquet-testing/glob/t1.parquet' where file_row_number=0 +select file_row_number from '{DATA_DIR}/parquet-testing/glob/t1.parquet' where file_row_number=0 ---- 0 query IIII -select i, j, replace(filename, '\', '/'), file_row_number from 'data/parquet-testing/glob*/t?.parquet' order by i; +select i, j, parse_path(filename)[-2:], file_row_number from '{DATA_DIR}/parquet-testing/glob*/t?.parquet' order by i; ---- -1 a data/parquet-testing/glob/t1.parquet 0 -2 b data/parquet-testing/glob/t2.parquet 0 -3 c data/parquet-testing/glob2/t1.parquet 0 +1 a [glob, t1.parquet] 0 +2 b [glob, t2.parquet] 0 +3 c [glob2, t1.parquet] 0 diff --git a/test/sql/copy/parquet/parquet_write_codecs.test b/test/sql/copy/parquet/parquet_write_codecs.test index 6ee94c0e3901..6d493f187eba 100644 --- a/test/sql/copy/parquet/parquet_write_codecs.test +++ b/test/sql/copy/parquet/parquet_write_codecs.test @@ -9,33 +9,33 @@ foreach codec UNCOMPRESSED SNAPPY GZIP ZSTD LZ4 LZ4_RAW BROTLI # codec uncompressed statement ok -COPY (SELECT 42, 'hello') TO '__TEST_DIR__/${codec}.parquet' (FORMAT 'parquet', CODEC '${codec}'); +COPY (SELECT 42, 'hello') TO '{TEMP_DIR}/${codec}.parquet' (FORMAT 'parquet', CODEC '${codec}'); query II -SELECT * FROM parquet_scan('__TEST_DIR__/${codec}.parquet'); +SELECT * FROM parquet_scan('{TEMP_DIR}/${codec}.parquet'); ---- 42 hello statement ok -COPY (FROM "data/parquet-testing/userdata1.parquet") TO '__TEST_DIR__/userdata-${codec}.parquet' (FORMAT 'parquet', CODEC '${codec}', ROW_GROUP_SIZE 10); +COPY (FROM "{DATA_DIR}/parquet-testing/userdata1.parquet") TO '{TEMP_DIR}/userdata-${codec}.parquet' (FORMAT 'parquet', CODEC '${codec}', ROW_GROUP_SIZE 10); statement ok -FROM "__TEST_DIR__/userdata-${codec}.parquet"; +FROM "{TEMP_DIR}/userdata-${codec}.parquet"; endloop # unsupported codec statement error -COPY (SELECT 42, 'hello') TO '__TEST_DIR__/gzip.parquet' (FORMAT 'parquet', CODEC 'BLABLABLA'); +COPY (SELECT 42, 'hello') TO '{TEMP_DIR}/gzip.parquet' (FORMAT 'parquet', CODEC 'BLABLABLA'); ---- # empty codec statement error -COPY (SELECT 42, 'hello') TO '__TEST_DIR__/gzip.parquet' (FORMAT 'parquet', CODEC); +COPY (SELECT 42, 'hello') TO '{TEMP_DIR}/gzip.parquet' (FORMAT 'parquet', CODEC); ---- # integer codec statement error -COPY (SELECT 42, 'hello') TO '__TEST_DIR__/gzip.parquet' (FORMAT 'parquet', CODEC 3); +COPY (SELECT 42, 'hello') TO '{TEMP_DIR}/gzip.parquet' (FORMAT 'parquet', CODEC 3); ---- diff --git a/test/sql/copy/parquet/parquet_zstd.test b/test/sql/copy/parquet/parquet_zstd.test index 7d2a004dd59d..9e079c29fccc 100644 --- a/test/sql/copy/parquet/parquet_zstd.test +++ b/test/sql/copy/parquet/parquet_zstd.test @@ -8,7 +8,7 @@ SET default_null_order='nulls_first'; require parquet query TTT -select * from parquet_scan('data/parquet-testing/zstd.parquet') order by 1 +select * from parquet_scan('{DATA_DIR}/parquet-testing/zstd.parquet') order by 1 ---- NULL bar 0 -1 foo 1 diff --git a/test/sql/copy/parquet/read_parquet_parameter.test b/test/sql/copy/parquet/read_parquet_parameter.test index c2d09907920f..f1d42707517c 100644 --- a/test/sql/copy/parquet/read_parquet_parameter.test +++ b/test/sql/copy/parquet/read_parquet_parameter.test @@ -11,7 +11,7 @@ statement ok PREPARE v1 AS SELECT * FROM parquet_scan($1) ORDER BY 1 query ITIIIIRRTTT -EXECUTE v1('data/parquet-testing/arrow/alltypes_plain.parquet') +EXECUTE v1('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') ---- 0 1 0 0 0 0 0.000000 0.000000 01/01/09 0 2009-01-01 00:00:00 1 0 1 1 1 10 1.100000 10.100000 01/01/09 1 2009-01-01 00:01:00 @@ -23,7 +23,7 @@ EXECUTE v1('data/parquet-testing/arrow/alltypes_plain.parquet') 7 0 1 1 1 10 1.100000 10.100000 04/01/09 1 2009-04-01 00:01:00 query T -EXECUTE v1('data/parquet-testing/pandas-date.parquet') +EXECUTE v1('{DATA_DIR}/parquet-testing/pandas-date.parquet') ---- 1921-12-24 2021-01-12 diff --git a/test/sql/copy/parquet/recursive_parquet_union_by_name.test b/test/sql/copy/parquet/recursive_parquet_union_by_name.test index 4a3a6b4a1cba..3524eba2ec13 100644 --- a/test/sql/copy/parquet/recursive_parquet_union_by_name.test +++ b/test/sql/copy/parquet/recursive_parquet_union_by_name.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification statement ok -create view r AS from read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) +create view r AS from read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) query II WITH RECURSIVE t(it, accum) AS diff --git a/test/sql/copy/parquet/rle_bool.test b/test/sql/copy/parquet/rle_bool.test index c397f09e0a88..f1f3685ecd01 100644 --- a/test/sql/copy/parquet/rle_bool.test +++ b/test/sql/copy/parquet/rle_bool.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query IIIII -SELECT * FROM 'data/parquet-testing/arrow/datapage_v2.snappy.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/arrow/datapage_v2.snappy.parquet' ---- abc 1 2.0 True [1, 2, 3] abc 2 3.0 True NULL @@ -17,7 +17,7 @@ NULL 4 5.0 False [1, 2, 3] abc 5 2.0 True [1, 2] query I -SELECT * FROM 'data/parquet-testing/rle_boolean_encoding.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/rle_boolean_encoding.parquet' ---- True False diff --git a/test/sql/copy/parquet/spark_v2.test b/test/sql/copy/parquet/spark_v2.test index 3be540f16f8b..8af051b10b60 100644 --- a/test/sql/copy/parquet/spark_v2.test +++ b/test/sql/copy/parquet/spark_v2.test @@ -8,12 +8,12 @@ statement ok PRAGMA enable_verification query I -SELECT count(*) FROM 'data/parquet-testing/spark-store.parquet' +SELECT count(*) FROM '{DATA_DIR}/parquet-testing/spark-store.parquet' ---- 12 query III -SELECT s_store_sk, s_floor_space, trim(s_street_name) FROM 'data/parquet-testing/spark-store.parquet' +SELECT s_store_sk, s_floor_space, trim(s_street_name) FROM '{DATA_DIR}/parquet-testing/spark-store.parquet' ---- 1 5250760 Spring 2 5285950 Sycamore @@ -30,7 +30,7 @@ SELECT s_store_sk, s_floor_space, trim(s_street_name) FROM 'data/parquet-testing query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII -SELECT * FROM 'data/parquet-testing/spark-ontime.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/spark-ontime.parquet' ---- 2017 1 2 26 7 2017-02-26 DL 19790.0 DL 30 11298 1129804 30194 DFW Dallas/Fort Worth, TX TX 48 Texas 74.0 10397 1039705 30397 ATL Atlanta, GA GA 13 Georgia 34.0 1715.0 1711.0 -4.0 0.0 0.0 -1.0 1700-1759 2024.0 2011.0 -13.0 0.0 0.0 -1.0 2000-2059 0 0 129.0 120.0 1.0 731.0 3 2017 1 2 26 7 2017-02-26 DL 19790.0 DL 31 10397 1039705 30397 ATL Atlanta, GA GA 13 Georgia 34.0 11298 1129804 30194 DFW Dallas/Fort Worth, TX TX 48 Texas 74.0 1910.0 1924.0 14.0 14.0 0.0 0.0 1900-1959 2039.0 2059.0 20.0 20.0 1.0 1.0 2000-2059 0 0 149.0 155.0 1.0 731.0 3 diff --git a/test/sql/copy/parquet/struct_column_reader_skip.test b/test/sql/copy/parquet/struct_column_reader_skip.test index 02ae1dbb698a..3999c98f4161 100644 --- a/test/sql/copy/parquet/struct_column_reader_skip.test +++ b/test/sql/copy/parquet/struct_column_reader_skip.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query III -SELECT my_map['A'], * FROM parquet_scan('data/parquet-testing/struct_skip_test.parquet') where filter == '0' +SELECT my_map['A'], * FROM parquet_scan('{DATA_DIR}/parquet-testing/struct_skip_test.parquet') where filter == '0' ---- NULL {A=NULL} 0 diff --git a/test/sql/copy/parquet/test_aws_files.test b/test/sql/copy/parquet/test_aws_files.test index 234fa73b4b46..3e6584e367b2 100644 --- a/test/sql/copy/parquet/test_aws_files.test +++ b/test/sql/copy/parquet/test_aws_files.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query III -SELECT * FROM parquet_scan('data/parquet-testing/aws1.snappy.parquet') limit 100 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/aws1.snappy.parquet') limit 100 ---- 11000 values hashing to 5a3700bd7f58ae786d89a06bfb6fb4f6 diff --git a/test/sql/copy/parquet/test_parallel_many_row_groups.test b/test/sql/copy/parquet/test_parallel_many_row_groups.test index 8628e96069b6..73050451ce4c 100644 --- a/test/sql/copy/parquet/test_parallel_many_row_groups.test +++ b/test/sql/copy/parquet/test_parallel_many_row_groups.test @@ -6,12 +6,12 @@ require parquet # these files are small (1000 entries), but they are intentionally written such that every row has its own row group query IIII -SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('data/parquet-testing/manyrowgroups.parquet') t(i) +SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups.parquet') t(i) ---- 1001 42 1041 541542 query IIII -SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('data/parquet-testing/manyrowgroups*') t(i) +SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups*') t(i) ---- 2002 42 1041 1083084 @@ -19,11 +19,11 @@ statement ok PRAGMA threads=4 query IIII -SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('data/parquet-testing/manyrowgroups.parquet') t(i) +SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups.parquet') t(i) ---- 1001 42 1041 541542 query IIII -SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('data/parquet-testing/manyrowgroups*') t(i) +SELECT COUNT(*), MIN(i), MAX(i), SUM(i) FROM parquet_scan('{DATA_DIR}/parquet-testing/manyrowgroups*') t(i) ---- 2002 42 1041 1083084 diff --git a/test/sql/copy/parquet/test_parquet_decimal.test b/test/sql/copy/parquet/test_parquet_decimal.test index 1294577376fb..0cf3e495a030 100644 --- a/test/sql/copy/parquet/test_parquet_decimal.test +++ b/test/sql/copy/parquet/test_parquet_decimal.test @@ -12,7 +12,7 @@ PRAGMA enable_verification query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/fixed_length_decimal.parquet') ---- 1.00 2.00 @@ -40,7 +40,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal.pa 24.00 query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal_legacy.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/fixed_length_decimal_legacy.parquet') ---- 1.00 2.00 @@ -70,7 +70,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/decimal/fixed_length_decimal_le # yay NULLs query I -SELECT * FROM parquet_scan('data/parquet-testing/decimal/decimal_dc.parquet') limit 10 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/decimal_dc.parquet') limit 10 ---- NULL NULL @@ -85,7 +85,7 @@ NULL query IIIII -SELECT * FROM parquet_scan('data/parquet-testing/decimal/pandas_decimal.parquet') limit 10 +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/decimal/pandas_decimal.parquet') limit 10 ---- 1234.0 12.34 12345.6789 123456789.98765433 922337203685477580700.92230685477500000 -1234.0 -12.34 -9765.4321 -987654321.1234568 -922337236854775807.92233720306854775 diff --git a/test/sql/copy/parquet/test_parquet_filter_pushdown.test b/test/sql/copy/parquet/test_parquet_filter_pushdown.test index d172756ea335..937dc24df045 100644 --- a/test/sql/copy/parquet/test_parquet_filter_pushdown.test +++ b/test/sql/copy/parquet/test_parquet_filter_pushdown.test @@ -9,48 +9,48 @@ pragma enable_verification # userdata1.parquet query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id > 500 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id > 500 ---- 500 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id < 500 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id < 500 ---- 499 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id > 100 and id < 900 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id > 100 and id < 900 ---- 799 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id between 100 and 900 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id between 100 and 900 ---- 801 query IIIII -SELECT registration_dttm, id, first_name, birthdate, salary FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id = 42 +SELECT registration_dttm, id, first_name, birthdate, salary FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id = 42 ---- 2016-02-03 04:33:04 42 Todd 12/19/1999 284728.990000 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where id = 42 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id = 42 ---- 1 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where salary < 1000 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where salary < 1000 ---- 0 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where salary < 1000 +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where salary < 1000 ---- 0 query II -SELECT first_name, gender FROM parquet_scan('data/parquet-testing/userdata1.parquet') where first_name = 'Mark' and gender <> '' +SELECT first_name, gender FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where first_name = 'Mark' and gender <> '' ---- Mark Male Mark Male @@ -64,7 +64,7 @@ Mark Male Mark Male query II -SELECT gender, first_name FROM parquet_scan('data/parquet-testing/userdata1.parquet') where first_name = 'Mark' and gender <> '' +SELECT gender, first_name FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where first_name = 'Mark' and gender <> '' ---- Male Mark Male Mark @@ -78,13 +78,13 @@ Male Mark Male Mark query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') where gender = 'Male' and first_name = 'Mark' +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where gender = 'Male' and first_name = 'Mark' ---- 10 query I -SELECT last_name FROM parquet_scan('data/parquet-testing/userdata1.parquet') where first_name > 'Mark' and country > 'Germany' and salary > 0 order by last_name limit 10 +SELECT last_name FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where first_name > 'Mark' and country > 'Germany' and salary > 0 order by last_name limit 10 ---- Adams Adams @@ -99,7 +99,7 @@ Arnold query I -SELECT length(l_comment) FROM parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet') where l_orderkey = 1 order by l_comment +SELECT length(l_comment) FROM parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet') where l_orderkey = 1 order by l_comment ---- 24 17 @@ -109,7 +109,7 @@ SELECT length(l_comment) FROM parquet_scan('data/parquet-testing/lineitem-top100 29 query I -SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1978-01-01' as date) +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1978-01-01' as date) ---- 1970-01-01 1971-01-01 @@ -125,7 +125,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast(' 1977-01-01 query I -SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d > cast('1982-01-01' as date) and d < cast('1986-01-01' as date) +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d > cast('1982-01-01' as date) and d < cast('1986-01-01' as date) ---- 1983-01-01 1984-01-01 @@ -136,7 +136,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d > cast(' 1985-05-01 query I -SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d >= cast('1990-01-01' as date) +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d >= cast('1990-01-01' as date) ---- 1990-01-01 1991-01-01 @@ -147,56 +147,56 @@ SELECT * FROM parquet_scan('data/parquet-testing/date.parquet') where d >= cast( 1997-01-01 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d == cast('1970-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d == cast('1970-01-01' as date) ---- 1 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d > cast('1970-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d > cast('1970-01-01' as date) ---- 26 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d >= cast('1982-01-01' as date) and d < cast('1985-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d >= cast('1982-01-01' as date) and d < cast('1985-01-01' as date) ---- 3 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1970-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1970-01-01' as date) ---- 0 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d between cast('1975-01-01' as date) and cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d between cast('1975-01-01' as date) and cast('1976-01-01' as date) ---- 6 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d >= cast('1975-01-01' as date) and d < cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d >= cast('1975-01-01' as date) and d < cast('1976-01-01' as date) ---- 5 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) and d > cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) and d > cast('1976-01-01' as date) ---- 0 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d > cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d > cast('1976-01-01' as date) ---- 21 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d > cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d > cast('1976-01-01' as date) ---- 21 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d >= cast('1976-01-01' as date) +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d < cast('1975-01-01' as date) or d >= cast('1976-01-01' as date) ---- 22 query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/date.parquet') where d is null +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/date.parquet') where d is null ---- 9 diff --git a/test/sql/copy/parquet/test_parquet_gzip.test b/test/sql/copy/parquet/test_parquet_gzip.test index c2f0995800f7..1e8ec01aae0b 100644 --- a/test/sql/copy/parquet/test_parquet_gzip.test +++ b/test/sql/copy/parquet/test_parquet_gzip.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query IIIIIIIIIIIIIIII -select * from parquet_scan('data/parquet-testing/lineitem-top10000.gzip.parquet') +select * from parquet_scan('{DATA_DIR}/parquet-testing/lineitem-top10000.gzip.parquet') ---- 160000 values hashing to 96cd23a0712a6a753aedcb8a2bcfcfa0 diff --git a/test/sql/copy/parquet/test_parquet_nested.test b/test/sql/copy/parquet/test_parquet_nested.test index 70cea082685c..51d7e609a7de 100644 --- a/test/sql/copy/parquet/test_parquet_nested.test +++ b/test/sql/copy/parquet/test_parquet_nested.test @@ -6,7 +6,7 @@ require parquet query I -select * FROM parquet_scan('data/parquet-testing/map.parquet') sq limit 3; +select * FROM parquet_scan('{DATA_DIR}/parquet-testing/map.parquet') sq limit 3; ---- {Content-Encoding=gzip, X-Frame-Options=SAMEORIGIN, Connection=keep-alive, Via='1.1 ip-10-1-1-216.ec2.internal (squid/4.10-20200322-r358ad2fdf)', X-Xss-Protection='1; mode=block', Content-Type='text/html;charset=utf-8', Date='Sat, 30 Jan 2021 16:19:57 GMT', X-Cache=MISS from ip-10-1-1-216.ec2.internal, Vary=Accept-Encoding, Server=nginx/1.10.3, X-Cache-Lookup='HIT from ip-10-1-1-216.ec2.internal:3128', X-Content-Type-Options=nosniff, Content-Length=921} {Content-Encoding=gzip, X-Frame-Options=SAMEORIGIN, Connection=keep-alive, Via='1.1 ip-10-1-1-216.ec2.internal (squid/4.10-20200322-r358ad2fdf)', X-Xss-Protection='1; mode=block', Content-Type='text/html;charset=utf-8', Date='Sat, 30 Jan 2021 16:19:59 GMT', X-Cache=MISS from ip-10-1-1-216.ec2.internal, Vary=Accept-Encoding, Server=nginx/1.10.3, X-Cache-Lookup='HIT from ip-10-1-1-216.ec2.internal:3128', X-Content-Type-Options=nosniff, Content-Length=922} @@ -14,7 +14,7 @@ select * FROM parquet_scan('data/parquet-testing/map.parquet') sq limit 3; # this was a hard one query II -SELECT * FROM parquet_scan('data/parquet-testing/arrow/nested_lists.snappy.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nested_lists.snappy.parquet') ---- [[[a, b], [c]], [NULL, [d]]] 1 [[[a, b], [c, d]], [NULL, [e]]] 1 @@ -27,7 +27,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/nested_lists.snappy.parqu #2 [[[a, b], [c, d], [e]], [None, [f]]] 1 query I -SELECT unnest(a) FROM parquet_scan('data/parquet-testing/arrow/nested_lists.snappy.parquet') +SELECT unnest(a) FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/nested_lists.snappy.parquet') ---- [[a, b], [c]] [NULL, [d]] @@ -37,7 +37,7 @@ SELECT unnest(a) FROM parquet_scan('data/parquet-testing/arrow/nested_lists.snap [NULL, [f]] query II -SELECT * FROM parquet_scan('data/parquet-testing/arrow/list_columns.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/list_columns.parquet') ---- [1, 2, 3] [abc, efg, hij] [NULL, 1] NULL @@ -52,13 +52,13 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/list_columns.parquet') ## need to fix data page v2 for this #query II -#SELECT * FROM parquet_scan('data/parquet-testing/datapage_v2.snappy.parquet') +#SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/datapage_v2.snappy.parquet') #---- # query II -SELECT id, url FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 10 +SELECT id, url FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet') limit 10 ---- 53e997b9b7602d9701f9f044 ['https://link.springer.com/10.1007/s00108-004-1229-0'] 53e997b2b7602d9701f8fea5 [] @@ -72,7 +72,7 @@ SELECT id, url FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 10 53e997a6b7602d9701f7ffb0 ['http://www.ncbi.nlm.nih.gov/pubmed/4051185?report=xml&format=text'] query II -select * from (SELECT id, unnest(url) u FROM parquet_scan('data/parquet-testing/apkwan.parquet')) sq where u is not null limit 10 +select * from (SELECT id, unnest(url) u FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet')) sq where u is not null limit 10 ---- 53e997b9b7602d9701f9f044 https://link.springer.com/10.1007/s00108-004-1229-0 53e997aeb7602d9701f8856e http://www.ncbi.nlm.nih.gov/pubmed/4669724?report=xml&format=text @@ -87,7 +87,7 @@ select * from (SELECT id, unnest(url) u FROM parquet_scan('data/parquet-testing/ query II -SELECT id, authors FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 10 +SELECT id, authors FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet') limit 10 ---- 53e997b9b7602d9701f9f044 [{'name': M. Stoll, 'id': 56018d9645cedb3395e77641, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover}, {'name': H. Heiken, 'id': 53f4d53adabfaef34ff814c8, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover}, {'name': G. M. N. Behrens, 'id': 53f42afbdabfaec09f0ed4e0, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover}, {'name': R. E. Schmidt, 'id': 56018d9645cedb3395e77644, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover}] 53e997b2b7602d9701f8fea5 [{'name': D. Barr, 'id': 5440d4cfdabfae805a6fd46c, 'org': 'Camborne School of Mines Redruth, Cornwall England'}] @@ -101,7 +101,7 @@ SELECT id, authors FROM parquet_scan('data/parquet-testing/apkwan.parquet') limi 53e997a6b7602d9701f7ffb0 [{'name': R R Walters, 'id': 53f43b0edabfaee0d9b91d40, 'org': NULL}] query II -SELECT id, unnest(authors) FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 20 +SELECT id, unnest(authors) FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet') limit 20 ---- 53e997b9b7602d9701f9f044 {'name': M. Stoll, 'id': 56018d9645cedb3395e77641, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover} 53e997b9b7602d9701f9f044 {'name': H. Heiken, 'id': 53f4d53adabfaef34ff814c8, 'org': Abteilung Klinische Immunologie Medizinische Hochschule Hannover} @@ -125,7 +125,7 @@ SELECT id, unnest(authors) FROM parquet_scan('data/parquet-testing/apkwan.parque 53e99813b7602d970202f0a1 {'name': Sean Milmo, 'id': 53f45f64dabfaee4dc832b5f, 'org': NULL} query III -SELECT id, struct_extract(unnest(authors), 'name'), struct_extract(unnest(authors), 'id') FROM parquet_scan('data/parquet-testing/apkwan.parquet') limit 20 +SELECT id, struct_extract(unnest(authors), 'name'), struct_extract(unnest(authors), 'id') FROM parquet_scan('{DATA_DIR}/parquet-testing/apkwan.parquet') limit 20 ---- 53e997b9b7602d9701f9f044 M. Stoll 56018d9645cedb3395e77641 53e997b9b7602d9701f9f044 H. Heiken 53f4d53adabfaef34ff814c8 diff --git a/test/sql/copy/parquet/test_parquet_null.test b/test/sql/copy/parquet/test_parquet_null.test index d58535995209..cbeb5327809a 100644 --- a/test/sql/copy/parquet/test_parquet_null.test +++ b/test/sql/copy/parquet/test_parquet_null.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query I -select count(col1) from parquet_scan('data/parquet-testing/bug687_nulls.parquet') +select count(col1) from parquet_scan('{DATA_DIR}/parquet-testing/bug687_nulls.parquet') ---- 99000 diff --git a/test/sql/copy/parquet/test_parquet_scan.test b/test/sql/copy/parquet/test_parquet_scan.test index af06f274dde9..eba2cd00c543 100644 --- a/test/sql/copy/parquet/test_parquet_scan.test +++ b/test/sql/copy/parquet/test_parquet_scan.test @@ -14,7 +14,7 @@ SELECT * FROM parquet_scan('does_not_exist') # alltypes_plain.parquet query ITIIIIRRTTT -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') ---- 4 1 0 0 0 0 0.000000 0.000000 03/01/09 0 2009-03-01 00:00:00 5 0 1 1 1 10 1.100000 10.100000 03/01/09 1 2009-03-01 00:01:00 @@ -27,7 +27,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') # we don't need to explicitly use parquet_scan query ITIIIIRRTTT -SELECT * FROM "data/parquet-testing/arrow/alltypes_plain.parquet" +SELECT * FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" ---- 4 1 0 0 0 0 0.000000 0.000000 03/01/09 0 2009-03-01 00:00:00 5 0 1 1 1 10 1.100000 10.100000 03/01/09 1 2009-03-01 00:01:00 @@ -40,7 +40,7 @@ SELECT * FROM "data/parquet-testing/arrow/alltypes_plain.parquet" # we can use table aliases here as well query ITIIIIRRTTT -SELECT tbl.* FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl +SELECT tbl.* FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl ---- 4 1 0 0 0 0 0.000000 0.000000 03/01/09 0 2009-03-01 00:00:00 5 0 1 1 1 10 1.100000 10.100000 03/01/09 1 2009-03-01 00:01:00 @@ -53,7 +53,7 @@ SELECT tbl.* FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl # and column aliases query I -SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) +SELECT tbl.a FROM "{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) ---- 4 5 @@ -66,7 +66,7 @@ SELECT tbl.a FROM "data/parquet-testing/arrow/alltypes_plain.parquet" tbl(a) #unsigned types parquet query IIII -SELECT * FROM parquet_scan('data/parquet-testing/unsigned.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/unsigned.parquet') ---- 1 1 1 1 2 2 2 2 @@ -77,14 +77,14 @@ SELECT * FROM parquet_scan('data/parquet-testing/unsigned.parquet') # alltypes_plain.snappy.parquet query ITIIIIRRTTT -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.snappy.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.snappy.parquet') ---- 6 1 0 0 0 0 0.000000 0.000000 04/01/09 0 2009-04-01 00:00:00 7 0 1 1 1 10 1.100000 10.100000 04/01/09 1 2009-04-01 00:01:00 # alltypes_dictionary.parquet query ITIIIIRRTTT -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_dictionary.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet') ---- 0 1 0 0 0 0 0.000000 0.000000 01/01/09 0 2009-01-01 00:00:00 1 0 1 1 1 10 1.100000 10.100000 01/01/09 1 2009-01-01 00:01:00 @@ -92,7 +92,7 @@ SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_dictionary.parqu # this file was created with spark using the data-types.py script # data-types.parquet query IIIIRRITTTTI -SELECT * FROM parquet_scan('data/parquet-testing/data-types.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/data-types.parquet') ---- NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 42 43 44 45 4.600000 4.700000 4.80 49 50 1 2019-11-26 20:11:42.501 2020-01-10 @@ -102,12 +102,12 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL # userdata1.parquet query I -SELECT COUNT(*) FROM parquet_scan('data/parquet-testing/userdata1.parquet') +SELECT COUNT(*) FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') ---- 1000 statement ok -CREATE VIEW userdata1 AS SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet') +CREATE VIEW userdata1 AS SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') query I SELECT COUNT(*) FROM userdata1 @@ -245,5 +245,5 @@ SELECT FIRST(comments) OVER w, LAST(comments) OVER w FROM userdata1 WINDOW w AS 1E+02 (empty) statement error -SELECT * FROM parquet_scan('data/parquet-testing/broken-arrow.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/broken-arrow.parquet') ---- diff --git a/test/sql/copy/parquet/test_parquet_stats.test b/test/sql/copy/parquet/test_parquet_stats.test index e829f85a71fb..32c4023b5332 100644 --- a/test/sql/copy/parquet/test_parquet_stats.test +++ b/test/sql/copy/parquet/test_parquet_stats.test @@ -9,74 +9,74 @@ PRAGMA explain_output = PHYSICAL_ONLY # empty reference result query I nosort empty -explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where false; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where false; ---- # verify null stats work query I nosort empty -explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where id is null; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id is null; ---- # verify min/max stats on int cols work query I nosort empty -explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where id < 1; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id < 1; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where id > 1000; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where id > 1000; ---- # verify min/max stats on double cols work query I nosort empty -explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where salary < 12380; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where salary < 12380; ---- # max is nan because of parquet's unique setup - so we can't prune here # query I nosort empty -# explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where salary > 286593 and salary < 286594; +# explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where salary > 286593 and salary < 286594; # ---- # verify min/max stats on timestamp cols work query I nosort empty -explain select * from parquet_scan('data/parquet-testing/timestamp.parquet') where time < '2020-10-04'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp.parquet') where time < '2020-10-04'; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/timestamp.parquet') where time > '2020-10-06'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp.parquet') where time > '2020-10-06'; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/timestamp-ms.parquet') where time < '2020-10-04'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp-ms.parquet') where time < '2020-10-04'; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/timestamp-ms.parquet') where time > '2020-10-06'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/timestamp-ms.parquet') where time > '2020-10-06'; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/data-types.parquet') where timestampval < '2019-11-25'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/data-types.parquet') where timestampval < '2019-11-25'; ---- query I nosort empty -explain select * from parquet_scan('data/parquet-testing/data-types.parquet') where timestampval > '2019-11-27'; +explain select * from parquet_scan('{DATA_DIR}/parquet-testing/data-types.parquet') where timestampval > '2019-11-27'; ---- # TODO string comparisons are not pruned yet # verify min/max stats on string cols work #query I nosort empty -#explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where country < 'Aruba'; +#explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where country < 'Aruba'; #---- #query I nosort empty -#explain select * from parquet_scan('data/parquet-testing/userdata1.parquet') where country > 'Zimbabwe'; +#explain select * from parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') where country > 'Zimbabwe'; #---- # see if stats work correctly for globs query I nosort nostats -explain select time from parquet_scan('data/parquet-testing/timestamp*.parquet') where time > '2020-10-06' +explain select time from parquet_scan('{DATA_DIR}/parquet-testing/timestamp*.parquet') where time > '2020-10-06' ---- statement ok @@ -87,16 +87,16 @@ mode skip # no stats since there are two files and the cache is on but we have not read all files yet query I nosort nostats -explain select time from parquet_scan('data/parquet-testing/timestamp*.parquet') where time > '2020-10-06' +explain select time from parquet_scan('{DATA_DIR}/parquet-testing/timestamp*.parquet') where time > '2020-10-06' ---- statement ok -select time from parquet_scan('data/parquet-testing/timestamp*.parquet') where time > '2020-10-06' +select time from parquet_scan('{DATA_DIR}/parquet-testing/timestamp*.parquet') where time > '2020-10-06' # but now we should have them query I nosort empty -explain select time from parquet_scan('data/parquet-testing/timestamp*.parquet') where time > '2020-10-06' +explain select time from parquet_scan('{DATA_DIR}/parquet-testing/timestamp*.parquet') where time > '2020-10-06' ---- statement ok @@ -104,5 +104,5 @@ pragma disable_object_cache # no stats again query I nosort nostats -explain select time from parquet_scan('data/parquet-testing/timestamp*.parquet') where time > '2020-10-06' +explain select time from parquet_scan('{DATA_DIR}/parquet-testing/timestamp*.parquet') where time > '2020-10-06' ---- diff --git a/test/sql/copy/parquet/timestamp_ms_stats.test b/test/sql/copy/parquet/timestamp_ms_stats.test index 072d650f98f7..b361eda41f38 100644 --- a/test/sql/copy/parquet/timestamp_ms_stats.test +++ b/test/sql/copy/parquet/timestamp_ms_stats.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification query I -select timestamp from 'data/parquet-testing/issue_5533_timestamp_ms_stats.parquet' order by 1; +select timestamp from '{DATA_DIR}/parquet-testing/issue_5533_timestamp_ms_stats.parquet' order by 1; ---- 2022-11-27 17:42:43.514 2022-11-27 17:42:43.514 @@ -16,7 +16,7 @@ select timestamp from 'data/parquet-testing/issue_5533_timestamp_ms_stats.parque 2022-11-27 17:42:44.28 query I -select timestamp from 'data/parquet-testing/issue_5533_timestamp_ms_stats.parquet' where timestamp >= '2022-11-27 00:00:00' +select timestamp from '{DATA_DIR}/parquet-testing/issue_5533_timestamp_ms_stats.parquet' where timestamp >= '2022-11-27 00:00:00' ---- 2022-11-27 17:42:43.514 2022-11-27 17:42:43.514 diff --git a/test/sql/copy/parquet/timetz_parquet.test b/test/sql/copy/parquet/timetz_parquet.test index 4371535a1065..6358d55a30ff 100644 --- a/test/sql/copy/parquet/timetz_parquet.test +++ b/test/sql/copy/parquet/timetz_parquet.test @@ -8,6 +8,6 @@ statement ok PRAGMA enable_verification query II -select * from 'data/parquet-testing/timetz_4byte_stats.parquet' order by 1; +select * from '{DATA_DIR}/parquet-testing/timetz_4byte_stats.parquet' order by 1; ---- 00:00:00+00 00:00:00+00 diff --git a/test/sql/copy/parquet/timezone.test b/test/sql/copy/parquet/timezone.test index 2320ee6b7363..4fab64936087 100644 --- a/test/sql/copy/parquet/timezone.test +++ b/test/sql/copy/parquet/timezone.test @@ -10,7 +10,7 @@ statement ok PRAGMA enable_verification query I -select typeof(TimeRecStart) from 'data/parquet-testing/tz.parquet' limit 1; +select typeof(TimeRecStart) from '{DATA_DIR}/parquet-testing/tz.parquet' limit 1; ---- TIMESTAMP WITH TIME ZONE @@ -18,7 +18,7 @@ statement ok SET timezone='UTC' query I -select TimeRecStart from 'data/parquet-testing/tz.parquet'; +select TimeRecStart from '{DATA_DIR}/parquet-testing/tz.parquet'; ---- 2022-10-17 21:52:27+00 2022-10-17 21:52:27+00 diff --git a/test/sql/copy/parquet/union_by_name_hive_partitioning.test b/test/sql/copy/parquet/union_by_name_hive_partitioning.test index 973143b530a2..138ba65564e5 100644 --- a/test/sql/copy/parquet/union_by_name_hive_partitioning.test +++ b/test/sql/copy/parquet/union_by_name_hive_partitioning.test @@ -5,13 +5,13 @@ require parquet statement error -SELECT * FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1) +SELECT * FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1) ---- schema mismatch query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=0, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=0, union_by_name=1) ORDER BY j, x NULLS LAST ---- 42 84 NULL 1 @@ -20,7 +20,7 @@ NULL 128 33 NULL query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) ORDER BY j ---- 42 84 NULL 1 @@ -29,7 +29,7 @@ NULL 128 33 2 query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) WHERE x=2 ORDER BY j ---- @@ -41,7 +41,7 @@ CREATE TABLE selected_values AS SELECT 2 x query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) WHERE x=(SELECT MAX(x) FROM selected_values) ORDER BY j ---- @@ -49,7 +49,7 @@ NULL 128 33 2 query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) ORDER BY j ---- 42 84 NULL 1 @@ -57,7 +57,7 @@ NULL 128 33 2 query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) WHERE k IS NULL ORDER BY j ---- @@ -65,7 +65,7 @@ ORDER BY j query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) WHERE k IS NOT DISTINCT FROM NULL ORDER BY j ---- @@ -73,39 +73,39 @@ ORDER BY j query IIII SELECT i, j, k, x -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1) WHERE k>0 ORDER BY j ---- NULL 128 33 2 query IIIII -SELECT i, j, k, x, filename.replace('\', '/').split('/')[-2] -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) +SELECT i, j, k, x, parse_path(filename)[-2] +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) WHERE k>0 ORDER BY j ---- NULL 128 33 2 x=2 query IIIII -SELECT i, j, k, x, filename.replace('\', '/').split('/')[-2] -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) -WHERE filename.replace('\', '/') >= 'data/parquet-testing/hive-partitioning/union_by_name/x=2' +SELECT i, j, k, x, parse_path(filename)[-2] +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) +WHERE parse_path(filename) >= parse_path('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/x=2') ORDER BY j ---- NULL 128 33 2 x=2 query IIIII -SELECT i, j, k, x, filename.replace('\', '/').split('/')[-2] -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) -WHERE filename.replace('\', '/') < 'data/parquet-testing/hive-partitioning/union_by_name/x=2' +SELECT i, j, k, x, parse_path(filename)[-2] +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) +WHERE parse_path(filename) < parse_path('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/x=2') ORDER BY j ---- 42 84 NULL 1 x=1 query IIIII SELECT i, j, k, x, filename.replace('\', '/').split('/')[-2] -FROM read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) -WHERE filename.replace('\', '/') < 'data/parquet-testing/hive-partitioning/union_by_name/x=1' +FROM read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/f2.parquet', hive_partitioning=1, union_by_name=1, filename=1) +WHERE parse_path(filename) < parse_path('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/x=1') ORDER BY j ---- diff --git a/test/sql/copy/parquet/writer/parquet_write_strings.test b/test/sql/copy/parquet/writer/parquet_write_strings.test index 7bfe8448a055..7efefa3955cc 100644 --- a/test/sql/copy/parquet/writer/parquet_write_strings.test +++ b/test/sql/copy/parquet/writer/parquet_write_strings.test @@ -1,5 +1,5 @@ # name: test/sql/copy/parquet/writer/parquet_write_strings.test -# description: Strings tests +# description: Strings tests with Parquet v2 format. # group: [writer] require parquet @@ -21,7 +21,7 @@ INSERT INTO strings VALUES ('happy'), ('happy'), ('joy'), ('joy'), ('surprise'); statement ok -COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET, PARQUET_VERSION v2); query I SELECT encodings FROM parquet_metadata('__TEST_DIR__/strings.parquet') diff --git a/test/sql/copy/parquet/writer/parquet_write_strings_v1.test b/test/sql/copy/parquet/writer/parquet_write_strings_v1.test new file mode 100644 index 000000000000..4440c74b3a1a --- /dev/null +++ b/test/sql/copy/parquet/writer/parquet_write_strings_v1.test @@ -0,0 +1,263 @@ +# name: test/sql/copy/parquet/writer/parquet_write_strings_v1.test +# description: Strings tests with Parquet v1 format. Other than that, same as test/sql/copy/parquet/writer/parquet_write_strings.test. +# group: [writer] + +require parquet + +statement ok +PRAGMA enable_verification + +statement ok +CREATE TABLE strings(s VARCHAR); + +statement ok +INSERT INTO strings VALUES + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), + ('happy'), ('happy'), ('joy'), ('joy'), ('surprise'); + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET, PARQUET_VERSION v1); + +query I +SELECT encodings FROM parquet_metadata('__TEST_DIR__/strings.parquet') +---- +PLAIN_DICTIONARY + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- +happy +happy +joy +joy +happy +happy +joy +joy +happy +happy +joy +joy +happy +happy +joy +joy +happy +happy +joy +joy +happy +happy +joy +joy +happy +happy +joy +joy +surprise + +query I +SELECT stats_distinct_count FROM parquet_metadata('__TEST_DIR__/strings.parquet') +---- +3 + +# strings with null values +statement ok +UPDATE strings SET s=NULL WHERE s='joy' + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- +happy +happy +NULL +NULL +happy +happy +NULL +NULL +happy +happy +NULL +NULL +happy +happy +NULL +NULL +happy +happy +NULL +NULL +happy +happy +NULL +NULL +happy +happy +NULL +NULL +surprise + +# all values are null +statement ok +UPDATE strings SET s=NULL + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL + +# empty table +statement ok +DELETE FROM strings + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- + + +# non-dictionary table, also no distinct count +statement ok +DELETE FROM strings + +statement ok +INSERT INTO strings VALUES + ('0'), ('1'), ('2'), ('3'), ('4'), ('5'), ('6'), ('7'), ('8'), ('9'), + ('10'), ('11'), ('12'), ('13'), ('14'), ('15'), ('16'), ('17'), ('18'), ('19'), + ('20'), ('21'), ('22'), ('23'), ('24'), ('25'), ('26'), ('27'), ('28'), ('29') + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); + +query I +SELECT encodings FROM parquet_metadata('__TEST_DIR__/strings.parquet') +---- +PLAIN + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 + +query I +SELECT stats_distinct_count FROM parquet_metadata('__TEST_DIR__/strings.parquet') +---- +NULL + +# non-dictionary table with null +statement ok +DELETE FROM strings + +statement ok +INSERT INTO strings VALUES + ('0'), ('1'), ('2'), (NULL), ('4'), ('5'), ('6'), (NULL), ('8'), ('9'), + ('10'), ('11'), ('12'), ('13'), ('14'), ('15'), ('16'), ('17'), ('18'), ('19'), + ('20'), (NULL), ('22'), ('23'), ('24'), ('25'), (NULL), ('27'), ('28'), ('29') + +statement ok +COPY strings TO '__TEST_DIR__/strings.parquet' (FORMAT PARQUET); + +query I +SELECT * FROM '__TEST_DIR__/strings.parquet' +---- +0 +1 +2 +NULL +4 +5 +6 +NULL +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +NULL +22 +23 +24 +25 +NULL +27 +28 +29 \ No newline at end of file diff --git a/test/sql/copy/parquet/writer/parquet_zstd_sequence.test_slow b/test/sql/copy/parquet/writer/parquet_zstd_sequence.test_slow index 29120344c07e..433a9b025eda 100644 --- a/test/sql/copy/parquet/writer/parquet_zstd_sequence.test_slow +++ b/test/sql/copy/parquet/writer/parquet_zstd_sequence.test_slow @@ -7,11 +7,11 @@ require parquet require 64bit statement ok -COPY (SELECT * FROM read_csv_auto('data/csv/sequences.csv.gz', delim=',', header=True) LIMIT 25000) TO '__TEST_DIR__/duckseq.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD', ROW_GROUP_SIZE 25000); +COPY (SELECT * FROM read_csv_auto('{DATA_DIR}/csv/sequences.csv.gz', delim=',', header=True) LIMIT 25000) TO '{TEMP_DIR}/duckseq.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD', ROW_GROUP_SIZE 25000); query IIIIII select count(*), min(strain), max(strain), min(strlen(sequence)), max(strlen(sequence)), avg(strlen(sequence)) -from '__TEST_DIR__/duckseq.parquet'; +from '{TEMP_DIR}/duckseq.parquet'; ---- 25000 AUS/NT01/2020 canine/HKG/20-03695/2020 17340 30018 29855.647080 @@ -21,20 +21,20 @@ COPY SELECT lstrain::VARCHAR[] lstrain, lsequence::VARCHAR[] lsequence FROM (VALUES ([], []), (NULL, NULL), ([], [])) tbl(lstrain, lsequence) UNION ALL SELECT * FROM ( - SELECT LIST(strain) AS lstrain, LIST(sequence) AS lsequence FROM '__TEST_DIR__/duckseq.parquet' LIMIT 10000 + SELECT LIST(strain) AS lstrain, LIST(sequence) AS lsequence FROM '{TEMP_DIR}/duckseq.parquet' LIMIT 10000 ) UNION ALL SELECT * FROM (VALUES ([], []), (NULL, NULL), ([], [])) ) -TO '__TEST_DIR__/duckseq2.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD'); +TO '{TEMP_DIR}/duckseq2.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD'); query I -SELECT COUNT(*) FROM '__TEST_DIR__/duckseq2.parquet' +SELECT COUNT(*) FROM '{TEMP_DIR}/duckseq2.parquet' ---- 7 query IIIIII nosort querylabel select count(*), min(strain), max(strain), min(strlen(sequence)), max(strlen(sequence)), avg(strlen(sequence)) -from (SELECT UNNEST(lstrain) AS strain, UNNEST(lsequence) AS sequence FROM '__TEST_DIR__/duckseq2.parquet'); +from (SELECT UNNEST(lstrain) AS strain, UNNEST(lsequence) AS sequence FROM '{TEMP_DIR}/duckseq2.parquet'); ---- 100000 ARG/Cordoba-1006-155/2020 tiger/NY/040420/2020 17340 30643 29821.264410 diff --git a/test/sql/copy/parquet/writer/test_parquet_write_complex.test b/test/sql/copy/parquet/writer/test_parquet_write_complex.test index a2b6a93f19eb..e4651959fc35 100644 --- a/test/sql/copy/parquet/writer/test_parquet_write_complex.test +++ b/test/sql/copy/parquet/writer/test_parquet_write_complex.test @@ -6,72 +6,72 @@ require parquet # alltypes_dictionary: scan as parquet query I nosort alltypes_dictionary -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_dictionary.parquet'); +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet'); ---- # rewrite the file statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_dictionary.parquet')) TO '__TEST_DIR__/alltypes_dictionary.parquet' (FORMAT 'PARQUET') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_dictionary.parquet')) TO '{TEMP_DIR}/alltypes_dictionary.parquet' (FORMAT 'PARQUET') # verify that the rewritten file has the same values again query I nosort alltypes_dictionary -SELECT * FROM parquet_scan('__TEST_DIR__/alltypes_dictionary.parquet'); +SELECT * FROM parquet_scan('{TEMP_DIR}/alltypes_dictionary.parquet'); ---- # bug687_nulls.parquet query I nosort bug687_nulls -SELECT * FROM parquet_scan('data/parquet-testing/bug687_nulls.parquet') LIMIT 10; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug687_nulls.parquet') LIMIT 10; ---- statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/bug687_nulls.parquet')) TO '__TEST_DIR__/bug687_nulls.parquet' (FORMAT 'PARQUET') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/bug687_nulls.parquet')) TO '{TEMP_DIR}/bug687_nulls.parquet' (FORMAT 'PARQUET') query I nosort bug687_nulls -SELECT * FROM parquet_scan('__TEST_DIR__/bug687_nulls.parquet') LIMIT 10; +SELECT * FROM parquet_scan('{TEMP_DIR}/bug687_nulls.parquet') LIMIT 10; ---- # Issue #1637: booleans encoded incorrectly statement ok -COPY (SELECT true as x UNION ALL SELECT true) TO '__TEST_DIR__/bug1637_booleans.parquet' (FORMAT 'PARQUET'); +COPY (SELECT true as x UNION ALL SELECT true) TO '{TEMP_DIR}/bug1637_booleans.parquet' (FORMAT 'PARQUET'); # Prior to the #1637 fix, duckdb wrote a parquet file containing true, false query I -SELECT COUNT(*) FROM parquet_scan('__TEST_DIR__/bug1637_booleans.parquet') WHERE x; +SELECT COUNT(*) FROM parquet_scan('{TEMP_DIR}/bug1637_booleans.parquet') WHERE x; ---- 2 # userdata1.parquet query I nosort userdata1.parquet -SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet') ORDER BY 1 LIMIT 10; +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet') ORDER BY 1 LIMIT 10; ---- statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet')) TO '__TEST_DIR__/userdata1.parquet' (FORMAT 'PARQUET') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet')) TO '{TEMP_DIR}/userdata1.parquet' (FORMAT 'PARQUET') query I nosort userdata1.parquet -SELECT * FROM parquet_scan('__TEST_DIR__/userdata1.parquet') ORDER BY 1 LIMIT 10; +SELECT * FROM parquet_scan('{TEMP_DIR}/userdata1.parquet') ORDER BY 1 LIMIT 10; ---- # gzip codec statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet')) TO '__TEST_DIR__/userdata1-gzip.parquet' (FORMAT 'PARQUET', CODEC 'GZIP') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet')) TO '{TEMP_DIR}/userdata1-gzip.parquet' (FORMAT 'PARQUET', CODEC 'GZIP') query I nosort userdata1.parquet -SELECT * FROM parquet_scan('__TEST_DIR__/userdata1-gzip.parquet') ORDER BY 1 LIMIT 10; +SELECT * FROM parquet_scan('{TEMP_DIR}/userdata1-gzip.parquet') ORDER BY 1 LIMIT 10; ---- # uncompressed codec statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet')) TO '__TEST_DIR__/userdata1-uncompressed.parquet' (FORMAT 'PARQUET', CODEC 'UNCOMPRESSED') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet')) TO '{TEMP_DIR}/userdata1-uncompressed.parquet' (FORMAT 'PARQUET', CODEC 'UNCOMPRESSED') query I nosort userdata1.parquet -SELECT * FROM parquet_scan('__TEST_DIR__/userdata1-uncompressed.parquet') ORDER BY 1 LIMIT 10; +SELECT * FROM parquet_scan('{TEMP_DIR}/userdata1-uncompressed.parquet') ORDER BY 1 LIMIT 10; ---- # zstd codec statement ok -COPY (SELECT * FROM parquet_scan('data/parquet-testing/userdata1.parquet')) TO '__TEST_DIR__/userdata1-zstd.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD') +COPY (SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/userdata1.parquet')) TO '{TEMP_DIR}/userdata1-zstd.parquet' (FORMAT 'PARQUET', CODEC 'ZSTD') query I nosort userdata1.parquet -SELECT * FROM parquet_scan('__TEST_DIR__/userdata1-zstd.parquet') ORDER BY 1 LIMIT 10; +SELECT * FROM parquet_scan('{TEMP_DIR}/userdata1-zstd.parquet') ORDER BY 1 LIMIT 10; ---- diff --git a/test/sql/copy/parquet/writer/writer_round_trip.test_slow b/test/sql/copy/parquet/writer/writer_round_trip.test_slow index 1d526aefb2ae..6f57f52f3ca8 100644 --- a/test/sql/copy/parquet/writer/writer_round_trip.test_slow +++ b/test/sql/copy/parquet/writer/writer_round_trip.test_slow @@ -4,16 +4,16 @@ require parquet -foreach parquet_file data/parquet-testing/manyrowgroups.parquet data/parquet-testing/map.parquet data/parquet-testing/arrow/int32_decimal.parquet data/parquet-testing/arrow/nonnullable.impala.parquet data/parquet-testing/bug687_nulls.parquet data/parquet-testing/bug1554.parquet data/parquet-testing/apkwan.parquet data/parquet-testing/arrow/nested_lists.snappy.parquet data/parquet-testing/arrow/nulls.snappy.parquet data/parquet-testing/nan-float.parquet data/parquet-testing/manyrowgroups2.parquet data/parquet-testing/struct.parquet data/parquet-testing/arrow/list_columns.parquet data/parquet-testing/timestamp-ms.parquet data/parquet-testing/arrow/alltypes_dictionary.parquet data/parquet-testing/arrow/binary.parquet data/parquet-testing/arrow/nation.dict-malformed.parquet data/parquet-testing/lineitem-top10000.gzip.parquet data/parquet-testing/arrow/nested_maps.snappy.parquet data/parquet-testing/arrow/dict-page-offset-zero.parquet data/parquet-testing/silly-names.parquet data/parquet-testing/zstd.parquet data/parquet-testing/bug1618_struct_strings.parquet data/parquet-testing/arrow/single_nan.parquet data/parquet-testing/arrow/int64_decimal.parquet data/parquet-testing/filter_bug1391.parquet data/parquet-testing/arrow/fixed_length_decimal_legacy.parquet data/parquet-testing/timestamp.parquet data/parquet-testing/arrow/fixed_length_decimal.parquet data/parquet-testing/leftdate3_192_loop_1.parquet data/parquet-testing/blob.parquet data/parquet-testing/bug1588.parquet data/parquet-testing/bug1589.parquet data/parquet-testing/arrow/alltypes_plain.parquet data/parquet-testing/arrow/repeated_no_annotation.parquet data/parquet-testing/data-types.parquet data/parquet-testing/unsigned.parquet data/parquet-testing/pandas-date.parquet data/parquet-testing/date.parquet data/parquet-testing/arrow/nullable.impala.parquet data/parquet-testing/fixed.parquet data/parquet-testing/arrow/alltypes_plain.snappy.parquet data/parquet-testing/decimal/int32_decimal.parquet data/parquet-testing/decimal/pandas_decimal.parquet data/parquet-testing/decimal/decimal_dc.parquet data/parquet-testing/decimal/int64_decimal.parquet data/parquet-testing/decimal/fixed_length_decimal_legacy.parquet data/parquet-testing/decimal/fixed_length_decimal.parquet data/parquet-testing/glob2/t1.parquet data/parquet-testing/cache/cache1.parquet data/parquet-testing/cache/cache2.parquet data/parquet-testing/glob/t2.parquet data/parquet-testing/glob/t1.parquet data/parquet-testing/bug2557.parquet +foreach parquet_file parquet-testing/manyrowgroups.parquet parquet-testing/map.parquet parquet-testing/arrow/int32_decimal.parquet parquet-testing/arrow/nonnullable.impala.parquet parquet-testing/bug687_nulls.parquet parquet-testing/bug1554.parquet parquet-testing/apkwan.parquet parquet-testing/arrow/nested_lists.snappy.parquet parquet-testing/arrow/nulls.snappy.parquet parquet-testing/nan-float.parquet parquet-testing/manyrowgroups2.parquet parquet-testing/struct.parquet parquet-testing/arrow/list_columns.parquet parquet-testing/timestamp-ms.parquet parquet-testing/arrow/alltypes_dictionary.parquet parquet-testing/arrow/binary.parquet parquet-testing/arrow/nation.dict-malformed.parquet parquet-testing/lineitem-top10000.gzip.parquet parquet-testing/arrow/nested_maps.snappy.parquet parquet-testing/arrow/dict-page-offset-zero.parquet parquet-testing/silly-names.parquet parquet-testing/zstd.parquet parquet-testing/bug1618_struct_strings.parquet parquet-testing/arrow/single_nan.parquet parquet-testing/arrow/int64_decimal.parquet parquet-testing/filter_bug1391.parquet parquet-testing/arrow/fixed_length_decimal_legacy.parquet parquet-testing/timestamp.parquet parquet-testing/arrow/fixed_length_decimal.parquet parquet-testing/leftdate3_192_loop_1.parquet parquet-testing/blob.parquet parquet-testing/bug1588.parquet parquet-testing/bug1589.parquet parquet-testing/arrow/alltypes_plain.parquet parquet-testing/arrow/repeated_no_annotation.parquet parquet-testing/data-types.parquet parquet-testing/unsigned.parquet parquet-testing/pandas-date.parquet parquet-testing/date.parquet parquet-testing/arrow/nullable.impala.parquet parquet-testing/fixed.parquet parquet-testing/arrow/alltypes_plain.snappy.parquet parquet-testing/decimal/int32_decimal.parquet parquet-testing/decimal/pandas_decimal.parquet parquet-testing/decimal/decimal_dc.parquet parquet-testing/decimal/int64_decimal.parquet parquet-testing/decimal/fixed_length_decimal_legacy.parquet parquet-testing/decimal/fixed_length_decimal.parquet parquet-testing/glob2/t1.parquet parquet-testing/cache/cache1.parquet parquet-testing/cache/cache2.parquet parquet-testing/glob/t2.parquet parquet-testing/glob/t1.parquet parquet-testing/bug2557.parquet statement ok -CREATE TABLE parquet_read AS SELECT * FROM parquet_scan('${parquet_file}'); +CREATE TABLE parquet_read AS SELECT * FROM parquet_scan('{DATA_DIR}/${parquet_file}'); statement ok -COPY parquet_read TO '__TEST_DIR__/test_round_trip.parquet' +COPY parquet_read TO '{TEMP_DIR}/test_round_trip.parquet' statement ok -CREATE TABLE parquet_write AS SELECT * FROM parquet_scan('__TEST_DIR__/test_round_trip.parquet'); +CREATE TABLE parquet_write AS SELECT * FROM parquet_scan('{TEMP_DIR}/test_round_trip.parquet'); # verify that the count is the same query I diff --git a/test/sql/copy/partitioned/hive_partition_case_insensitive_column.test b/test/sql/copy/partitioned/hive_partition_case_insensitive_column.test index 678e4ec78f6a..741248a77090 100644 --- a/test/sql/copy/partitioned/hive_partition_case_insensitive_column.test +++ b/test/sql/copy/partitioned/hive_partition_case_insensitive_column.test @@ -5,7 +5,7 @@ require parquet query II -SELECT * FROM 'data/parquet-testing/hive-partitioning/ci-column-names/**/*.parquet' ORDER BY ALL +SELECT * FROM '{DATA_DIR}/parquet-testing/hive-partitioning/ci-column-names/**/*.parquet' ORDER BY ALL ---- Hannes 2 Mark 1 diff --git a/test/sql/copy/partitioned/hive_partition_duplicate_name.test b/test/sql/copy/partitioned/hive_partition_duplicate_name.test index 2f3f52e8f043..d3459abd1c3f 100644 --- a/test/sql/copy/partitioned/hive_partition_duplicate_name.test +++ b/test/sql/copy/partitioned/hive_partition_duplicate_name.test @@ -7,7 +7,7 @@ require parquet # we just use the first partitioning key by default query III select * -from parquet_scan('data/parquet-testing/hive-partitioning/duplicate_names/**/*.parquet') +from parquet_scan('{DATA_DIR}/parquet-testing/hive-partitioning/duplicate_names/**/*.parquet') ORDER BY ALL ---- 1 value1 1 diff --git a/test/sql/copy/partitioned/hive_partitioned_auto_detect.test b/test/sql/copy/partitioned/hive_partitioned_auto_detect.test index 6e77e9fb5cfb..b4bb0a415705 100644 --- a/test/sql/copy/partitioned/hive_partitioned_auto_detect.test +++ b/test/sql/copy/partitioned/hive_partitioned_auto_detect.test @@ -12,96 +12,96 @@ CREATE TABLE t AS SELECT i%2 AS year, i%3 AS month, i%4 AS c, i%5 AS d FROM RANG # without partition columns written # test a csv partition by year statement ok -COPY t TO '__TEST_DIR__/csv_partition_1' (partition_by(year)); +COPY t TO '{TEMP_DIR}/csv_partition_1' (partition_by(year)); query I -select count(*) from glob('__TEST_DIR__/csv_partition_1/**'); +select count(*) from glob('{TEMP_DIR}/csv_partition_1/**'); ---- 2 # with HIVE_PARTITIONING=0, directory names won't be read unless they are written in data query III -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b c # with HIVE_PARTITIONING, column name from directory name supercedes "names" parameter query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b c year query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d']) LIMIT 1; ---- a b c year # test a csv partition by year,month statement ok -COPY t TO '__TEST_DIR__/csv_partition_2' (partition_by(year,month)); +COPY t TO '{TEMP_DIR}/csv_partition_2' (partition_by(year,month)); query I -select count(*) from glob('__TEST_DIR__/csv_partition_2/**'); +select count(*) from glob('{TEMP_DIR}/csv_partition_2/**'); ---- 6 query II -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b month year query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d']) LIMIT 1; ---- a b month year # test a single file query I -select count(*) from glob('__TEST_DIR__/t.csv'); +select count(*) from glob('{TEMP_DIR}/t.csv'); ---- 0 statement ok -COPY t TO '__TEST_DIR__/bad_file.csv'; +COPY t TO '{TEMP_DIR}/bad_file.csv'; query I -select count(*) from glob('__TEST_DIR__/bad_file.csv'); +select count(*) from glob('{TEMP_DIR}/bad_file.csv'); ---- 1 query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b c d query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b c d query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d']) LIMIT 1; ---- a b c d # add bad file to list: hive partitioning will be false, because scheme doesn't match query II -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], HIVE_PARTITIONING=0, names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], HIVE_PARTITIONING=0, names=['a','b','c','d']) LIMIT 1; ---- a b statement error -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], HIVE_PARTITIONING=1, names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], HIVE_PARTITIONING=1, names=['a','b','c','d']) LIMIT 1; ---- Binder Error: Hive partition mismatch query II -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], names=['a','b','c','d']) LIMIT 1; ---- a b @@ -110,94 +110,94 @@ require parquet # test a parquet partition by year statement ok -COPY t TO '__TEST_DIR__/parquet_partition_1' (format parquet, partition_by(year)); +COPY t TO '{TEMP_DIR}/parquet_partition_1' (format parquet, partition_by(year)); query I -select count(*) from glob('__TEST_DIR__/parquet_partition_1/**'); +select count(*) from glob('{TEMP_DIR}/parquet_partition_1/**'); ---- 2 query III -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**', HIVE_PARTITIONING=0) LIMIT 1; ---- month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**', HIVE_PARTITIONING=1) LIMIT 1; ---- month c d year query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**') LIMIT 1; ---- month c d year # test a parquet partition by year,month statement ok -COPY t TO '__TEST_DIR__/parquet_partition_2' (format parquet, partition_by(year,month)); +COPY t TO '{TEMP_DIR}/parquet_partition_2' (format parquet, partition_by(year,month)); query I -select count(*) from glob('__TEST_DIR__/parquet_partition_2/**'); +select count(*) from glob('{TEMP_DIR}/parquet_partition_2/**'); ---- 6 query II -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**', HIVE_PARTITIONING=0) LIMIT 1; ---- c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**', HIVE_PARTITIONING=1) LIMIT 1; ---- c d month year query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**') LIMIT 1; ---- c d month year # test a single file query I -select count(*) from glob('__TEST_DIR__/t.parquet'); +select count(*) from glob('{TEMP_DIR}/t.parquet'); ---- 0 statement ok -COPY t TO '__TEST_DIR__/t.parquet' (format parquet); +COPY t TO '{TEMP_DIR}/t.parquet' (format parquet); query I -select count(*) from glob('__TEST_DIR__/t.parquet'); +select count(*) from glob('{TEMP_DIR}/t.parquet'); ---- 1 query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet', HIVE_PARTITIONING=0) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet', HIVE_PARTITIONING=1) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet') LIMIT 1; ---- year month c d # add bad file to list: hive partitioning will be false, because scheme doesn't match query II -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet'], HIVE_PARTITIONING=0) LIMIT 1; ---- c d statement error -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet'], HIVE_PARTITIONING=1) LIMIT 1; ---- Binder Error: Hive partition mismatch query II -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet']) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet']) LIMIT 1; ---- c d @@ -205,94 +205,94 @@ c d # with partition columns written # test a csv partition by year statement ok -COPY t TO '__TEST_DIR__/csv_partition_1' (partition_by(year), overwrite_or_ignore, write_partition_columns); +COPY t TO '{TEMP_DIR}/csv_partition_1' (partition_by(year), overwrite_or_ignore, write_partition_columns); query I -select count(*) from glob('__TEST_DIR__/csv_partition_1/**'); +select count(*) from glob('{TEMP_DIR}/csv_partition_1/**'); ---- 2 query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b c d query IIIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b c d year query IIIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_1/**', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_1/**', names=['a','b','c','d']) LIMIT 1; ---- a b c d year # test a csv partition by year,month statement ok -COPY t TO '__TEST_DIR__/csv_partition_2' (partition_by(year,month), overwrite_or_ignore, write_partition_columns); +COPY t TO '{TEMP_DIR}/csv_partition_2' (partition_by(year,month), overwrite_or_ignore, write_partition_columns); query I -select count(*) from glob('__TEST_DIR__/csv_partition_2/**'); +select count(*) from glob('{TEMP_DIR}/csv_partition_2/**'); ---- 6 query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b c d query IIIIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b c d month year query IIIIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/csv_partition_2/**', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/csv_partition_2/**', names=['a','b','c','d']) LIMIT 1; ---- a b c d month year # test a single file query I -select count(*) from glob('__TEST_DIR__/t.csv'); +select count(*) from glob('{TEMP_DIR}/t.csv'); ---- 0 statement ok -COPY t TO '__TEST_DIR__/bad_file.csv'; +COPY t TO '{TEMP_DIR}/bad_file.csv'; query I -select count(*) from glob('__TEST_DIR__/bad_file.csv'); +select count(*) from glob('{TEMP_DIR}/bad_file.csv'); ---- 1 query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=0) LIMIT 1; ---- a b c d query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d'], HIVE_PARTITIONING=1) LIMIT 1; ---- a b c d query IIII -select alias(columns(*)) from read_csv_auto('__TEST_DIR__/bad_file.csv', names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto('{TEMP_DIR}/bad_file.csv', names=['a','b','c','d']) LIMIT 1; ---- a b c d # add bad file to list: hive partitioning will be false, because scheme doesn't match query IIII -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], HIVE_PARTITIONING=0, names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], HIVE_PARTITIONING=0, names=['a','b','c','d']) LIMIT 1; ---- a b c d statement error -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], HIVE_PARTITIONING=1, names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], HIVE_PARTITIONING=1, names=['a','b','c','d']) LIMIT 1; ---- Binder Error: Hive partition mismatch query IIII -select alias(columns(*)) from read_csv_auto(['__TEST_DIR__/csv_partition_2/**', '__TEST_DIR__/bad_file.csv'], names=['a','b','c','d']) LIMIT 1; +select alias(columns(*)) from read_csv_auto(['{TEMP_DIR}/csv_partition_2/**', '{TEMP_DIR}/bad_file.csv'], names=['a','b','c','d']) LIMIT 1; ---- a b c d @@ -303,95 +303,95 @@ require parquet # test a parquet partition by year statement ok -COPY t TO '__TEST_DIR__/parquet_partition_1' (format parquet, partition_by(year), overwrite_or_ignore, write_partition_columns); +COPY t TO '{TEMP_DIR}/parquet_partition_1' (format parquet, partition_by(year), overwrite_or_ignore, write_partition_columns); query I -select count(*) from glob('__TEST_DIR__/parquet_partition_1/**'); +select count(*) from glob('{TEMP_DIR}/parquet_partition_1/**'); ---- 2 query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**', HIVE_PARTITIONING=0) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**', HIVE_PARTITIONING=1) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_1/**') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_1/**') LIMIT 1; ---- year month c d # test a parquet partition by year,month statement ok -COPY t TO '__TEST_DIR__/parquet_partition_2' (format parquet, partition_by(year,month), overwrite_or_ignore, write_partition_columns); +COPY t TO '{TEMP_DIR}/parquet_partition_2' (format parquet, partition_by(year,month), overwrite_or_ignore, write_partition_columns); query I -select count(*) from glob('__TEST_DIR__/parquet_partition_2/**'); +select count(*) from glob('{TEMP_DIR}/parquet_partition_2/**'); ---- 6 query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**', HIVE_PARTITIONING=0) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**', HIVE_PARTITIONING=1) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/parquet_partition_2/**') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/parquet_partition_2/**') LIMIT 1; ---- year month c d # test a single file statement ok -COPY t TO '__TEST_DIR__/t.parquet' (format parquet); +COPY t TO '{TEMP_DIR}/t.parquet' (format parquet); query I -select count(*) from glob('__TEST_DIR__/t.parquet'); +select count(*) from glob('{TEMP_DIR}/t.parquet'); ---- 1 query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet', HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet', HIVE_PARTITIONING=0) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet', HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet', HIVE_PARTITIONING=1) LIMIT 1; ---- year month c d query IIII -select alias(columns(*)) from read_parquet('__TEST_DIR__/t.parquet') LIMIT 1; +select alias(columns(*)) from read_parquet('{TEMP_DIR}/t.parquet') LIMIT 1; ---- year month c d # add bad file to list: hive partitioning will be false, because scheme doesn't match query IIII -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet'], HIVE_PARTITIONING=0) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet'], HIVE_PARTITIONING=0) LIMIT 1; ---- year month c d statement error -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet'], HIVE_PARTITIONING=1) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet'], HIVE_PARTITIONING=1) LIMIT 1; ---- Binder Error: Hive partition mismatch query IIII -select alias(columns(*)) from read_parquet(['__TEST_DIR__/parquet_partition_2/**', '__TEST_DIR__/t.parquet']) LIMIT 1; +select alias(columns(*)) from read_parquet(['{TEMP_DIR}/parquet_partition_2/**', '{TEMP_DIR}/t.parquet']) LIMIT 1; ---- year month c d query IIII select i,j,k,x -from read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=0, union_by_name=1) +from read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=0, union_by_name=1) order by j,x nulls last; ---- 42 84 NULL 1 @@ -400,7 +400,7 @@ NULL 128 33 NULL query IIII select i,j,k,x -from read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) +from read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', hive_partitioning=1, union_by_name=1) order by j,x nulls last; ---- 42 84 NULL 1 @@ -409,7 +409,7 @@ NULL 128 33 2 query IIII select i,j,k,x -from read_parquet('data/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', union_by_name=1) +from read_parquet('{DATA_DIR}/parquet-testing/hive-partitioning/union_by_name/*/*.parquet', union_by_name=1) order by j,x nulls last; ---- 42 84 NULL 1 diff --git a/test/sql/cte/cte_on_conflict_issue.test b/test/sql/cte/cte_on_conflict_issue.test new file mode 100644 index 000000000000..2c97bd4d6d76 --- /dev/null +++ b/test/sql/cte/cte_on_conflict_issue.test @@ -0,0 +1,28 @@ +# name: test/sql/cte/cte_on_conflict_issue.test +# description: Test CTE with ON CONFLICT rewrite +# group: [cte] + +statement ok +CREATE TABLE t1 ( + t1c1 BIGINT, + t1c2 BIGINT, + PRIMARY KEY (t1c1, t1c2) +); + +statement ok +CREATE TABLE t2 ( + t2c1 BIGINT +); + +statement error +WITH +cte1 AS ( + SELECT 42 AS cte1c1, [84] AS cte1c2 +), +cte2 AS ( + SELECT * + FROM t2 s +) +INSERT OR REPLACE INTO t1 +SELECT * FROM cte2; +---- diff --git a/test/sql/error/correlated_at_clause.test b/test/sql/error/correlated_at_clause.test new file mode 100644 index 000000000000..2f6de52d56b8 --- /dev/null +++ b/test/sql/error/correlated_at_clause.test @@ -0,0 +1,13 @@ +# name: test/sql/error/correlated_at_clause.test +# description: Reference a column from an outer subquery within an at clause +# group: [error] + +statement ok +CREATE TABLE t (i VARCHAR) + +# See https://github.com/duckdb/duckdb/issues/16826 +statement error +FROM t, t AT (VERSION => i) +---- +:Binder Error.*AT clause cannot contain column names.* + diff --git a/test/sql/extensions/checked_load.test b/test/sql/extensions/checked_load.test index 186cf7aa3560..73690df02953 100644 --- a/test/sql/extensions/checked_load.test +++ b/test/sql/extensions/checked_load.test @@ -17,7 +17,7 @@ LOAD 'README.md'; Error: Extension "README.md" could not be loaded statement error -LOAD 'data/csv/no_opt.csv'; +LOAD '{DATA_DIR}/csv/no_opt.csv'; ---- is not a DuckDB extension. Valid DuckDB extensions must be at least 512 bytes diff --git a/test/sql/function/autocomplete/suggest_file.test b/test/sql/function/autocomplete/suggest_file.test index 38edfa207c38..98781b2996fc 100644 --- a/test/sql/function/autocomplete/suggest_file.test +++ b/test/sql/function/autocomplete/suggest_file.test @@ -7,12 +7,12 @@ require notwindows require autocomplete # test file auto complete -query II -FROM sql_auto_complete('COPY tbl FROM ''data/parq') LIMIT 1; +query I +SELECT suggestion FROM sql_auto_complete('COPY tbl FROM ''{DATA_DIR}/parq') LIMIT 1; ---- -parquet-testing/ 20 +parquet-testing/ -query II -FROM sql_auto_complete('COPY tbl FROM ''data/csv/all_quote') LIMIT 1; +query I +SELECT suggestion FROM sql_auto_complete('COPY tbl FROM ''{DATA_DIR}/csv/all_quote') LIMIT 1; ---- -all_quotes.csv' 24 +all_quotes.csv' diff --git a/test/sql/function/list/lambdas/incorrect.test b/test/sql/function/list/lambdas/incorrect.test index 2ec762772753..a7549c905d3e 100644 --- a/test/sql/function/list/lambdas/incorrect.test +++ b/test/sql/function/list/lambdas/incorrect.test @@ -32,7 +32,7 @@ SELECT ${func_name}(NULL, NULL); statement error SELECT ${func_name}(NULL, x); ---- -:Binder Error.*Referenced column.*not found in FROM clause!.* +:Binder Error.*Referenced column.*was not found because the FROM clause is missing.* statement error SELECT ${func_name}([1, 2], (SELECT 1) -> x + 1); @@ -47,7 +47,7 @@ SELECT ${func_name}(NULL, i) FROM incorrect_test; statement error SELECT ${func_name}(NULL, x -> y); ---- -:Binder Error.*Referenced column.*not found in FROM clause!.* +:Binder Error.*Referenced column.*was not found because the FROM clause is missing.* statement error SELECT ${func_name}([1]); @@ -107,7 +107,7 @@ SELECT list_reduce([True], x -> x, x -> x); statement error SELECT [split('01:08:22', ':'), x -> CAST (x AS INTEGER)]; ---- -:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*not found in FROM clause!.* +:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*was not found because the FROM clause is missing.* statement error select list_apply(i, x -> x * 3 + 2 / zz) from (values (list_value(1, 2, 3))) tbl(i); @@ -154,12 +154,12 @@ SELECT list_filter([1, 2], (x, y, z) -> x >= y AND y >= z); statement error SELECT cos(x -> x + 1); ---- -:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*not found in FROM clause!.* +:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*was not found because the FROM clause is missing.* statement error SELECT cos([1], x -> x + 1); ---- -:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*not found in FROM clause!.* +:Binder Error.*failed to bind function, either.*This scalar function does not support lambdas!.*Referenced column.*was not found because the FROM clause is missing.* # FIXME: support lambdas in CHECK constraints diff --git a/test/sql/function/list/list_grade_up.test_slow b/test/sql/function/list/list_grade_up.test_slow index 993a254f5a3b..586e565e9030 100644 --- a/test/sql/function/list/list_grade_up.test_slow +++ b/test/sql/function/list/list_grade_up.test_slow @@ -300,7 +300,7 @@ select k, v, map(k,v), map(k,v)[(list_grade_up(k,'DESC'))[1]] from (values ([1,2 require parquet statement ok -CREATE TABLE stage AS SELECT * FROM 'data/parquet-testing/list_sort_segfault.parquet'; +CREATE TABLE stage AS SELECT * FROM '{DATA_DIR}/parquet-testing/list_sort_segfault.parquet'; statement ok CREATE TABLE health (a VARCHAR[]); diff --git a/test/sql/function/list/list_sort.test_slow b/test/sql/function/list/list_sort.test_slow index 10b2cf7b59be..c0208b58276b 100644 --- a/test/sql/function/list/list_sort.test_slow +++ b/test/sql/function/list/list_sort.test_slow @@ -471,7 +471,7 @@ Binder Error: In a DISTINCT aggregate, ORDER BY expressions must appear in the a require parquet statement ok -CREATE TABLE stage AS SELECT * FROM 'data/parquet-testing/list_sort_segfault.parquet'; +CREATE TABLE stage AS SELECT * FROM '{DATA_DIR}/parquet-testing/list_sort_segfault.parquet'; statement ok CREATE TABLE health (a VARCHAR[]); diff --git a/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst.test b/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst.test new file mode 100644 index 000000000000..9bbddf533321 --- /dev/null +++ b/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst.test @@ -0,0 +1,25 @@ +# name: test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst.test +# description: Test updating a column with an index that is compressed with DICT_FSST. +# group: [insert_update_delete] + +load __TEST_DIR__/foo.db readwrite v1.4.2 + +statement ok +CREATE OR REPLACE TABLE bar (col1 VARCHAR, col2 VARCHAR UNIQUE) + +statement ok +INSERT INTO bar (col1, col2) VALUES (NULL, 'one'); + +statement ok +CHECKPOINT + +statement ok +SET wal_autocheckpoint='1TB' + +statement ok +UPDATE bar AS original SET col1 = 'a' + +query I +SELECT col1 FROM bar WHERE col2 = 'one' +---- +a diff --git a/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst_and_replay.test b/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst_and_replay.test new file mode 100644 index 000000000000..21a7cba64cc7 --- /dev/null +++ b/test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst_and_replay.test @@ -0,0 +1,30 @@ +# name: test/sql/index/art/insert_update_delete/test_art_update_with_dict_fsst_and_replay.test +# description: Test updating a column with an index that is compressed with DICT_FSST, then replay the update. +# group: [insert_update_delete] + +load __TEST_DIR__/foo.db readwrite v1.4.2 + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +SET wal_autocheckpoint='1TB' + +statement ok +CREATE OR REPLACE TABLE bar (col1 VARCHAR, col2 VARCHAR UNIQUE) + +statement ok +INSERT INTO bar (col1, col2) VALUES (NULL, 'one'); + +statement ok +CHECKPOINT + +statement ok +UPDATE bar AS original SET col1 = 'a' + +restart + +query I +SELECT col1 FROM bar WHERE col2 = 'one' +---- +a diff --git a/test/sql/index/art/issues/test_art_fuzzer_persisted.test b/test/sql/index/art/issues/test_art_fuzzer_persisted.test index 3d4085eba592..1ec3380c0e08 100644 --- a/test/sql/index/art/issues/test_art_fuzzer_persisted.test +++ b/test/sql/index/art/issues/test_art_fuzzer_persisted.test @@ -14,7 +14,7 @@ statement ok CREATE INDEX i1 ON t1 (c1); statement ok -PRAGMA MEMORY_LIMIT='2MB'; +PRAGMA MEMORY_LIMIT='4MB'; statement ok CHECKPOINT; diff --git a/test/sql/index/art/nodes/test_art_nested_leaf_coverage.test b/test/sql/index/art/nodes/test_art_nested_leaf_coverage.test new file mode 100644 index 000000000000..b5473657852e --- /dev/null +++ b/test/sql/index/art/nodes/test_art_nested_leaf_coverage.test @@ -0,0 +1,64 @@ +# name: test/sql/index/art/nodes/test_art_nested_leaf_coverage.test +# description: Test ART nested leaf coverage (hit debug asserts) +# group: [nodes] + +statement ok +CREATE TABLE integers(i integer); + +statement ok +CREATE INDEX i_index ON integers(i); + + +# Node7 Leaf Insertion and Deletion Coverage. +loop i 0 7 + +statement ok +INSERT INTO integers VALUES (2); + +endloop + +statement ok +DELETE FROM integers where rowid = 1; + +query I +SELECT COUNT(*) FROM integers; +---- +6 + +# Node15 Leaf Insertion and Deletion Coverage. +loop i 0 7 + +statement ok +INSERT INTO integers VALUES (2) + +endloop + +statement ok +DELETE FROM integers where rowid = 2 + +query I +SELECT COUNT(*) FROM integers +---- +12 + +# Node256 Leaf Insertion and Deletion Coverage. +loop i 0 10 + +statement ok +INSERT INTO integers VALUES (2) + +endloop + +statement ok +DELETE FROM integers where rowid = 3 + +query I +SELECT COUNT(*) FROM integers +---- +21 + +statement ok +DROP INDEX i_index + +statement ok +DROP TABLE integers \ No newline at end of file diff --git a/test/sql/index/art/scan/test_random_uuid.test b/test/sql/index/art/scan/test_random_uuid.test new file mode 100644 index 000000000000..f7a0410d2b28 --- /dev/null +++ b/test/sql/index/art/scan/test_random_uuid.test @@ -0,0 +1,113 @@ +# name: test/sql/index/art/scan/test_random_uuid.test +# description: Test ART index with many random uuid in transaction +# group: [scan] + +statement ok +create or replace table t as select id: uuid(), v: i from generate_series(1, 700000) s(i); + +# create a table with the values [0, 1, 0, 1, ..., 0, 1] +statement ok +create unique index uid on t(id); + +statement ok +set variable u1 = uuid(); + +statement ok +set variable u2 = uuid(); + +statement ok +set variable u3 = uuid(); + +statement ok +set variable u4 = uuid(); + +statement ok +start transaction; + +statement ok +insert into t select * replace (getvariable('u1') as id) from t using sample 1 rows; + +statement ok +select * from t where id = getvariable('u1'); + +statement ok +insert into t select * replace (getvariable('u2') as id) from t using sample 1 rows; + +statement ok +select * from t where id = getvariable('u2'); + +statement ok +insert into t select * replace (getvariable('u3') as id) from t using sample 1 rows; + +statement ok r4 +select * from t where id = getvariable('u4'); + +statement ok +commit; + +statement ok r1 +select * from t where id = getvariable('u1'); + +statement ok r2 +select * from t where id = getvariable('u2'); + +statement ok r3 +select * from t where id = getvariable('u3'); + +statement ok r4 +select * from t where id = getvariable('u4'); + +statement ok +start transaction; + +statement ok +drop index uid; + +statement ok r1 +select * from t where id = getvariable('u1'); + +statement ok r2 +select * from t where id = getvariable('u2'); + +statement ok r3 +select * from t where id = getvariable('u3'); + +statement ok r4 +select * from t where id = getvariable('u4'); + +statement ok +rollback; + +statement ok +start transaction; + +statement ok +drop index uid; + +statement ok r1 +select * from t where id = getvariable('u1'); + +statement ok r2 +select * from t where id = getvariable('u2'); + +statement ok r3 +select * from t where id = getvariable('u3'); + +statement ok r4 +select * from t where id = getvariable('u4'); + +statement ok +commit; + +statement ok r1 +select * from t where id = getvariable('u1'); + +statement ok r2 +select * from t where id = getvariable('u2'); + +statement ok r3 +select * from t where id = getvariable('u3'); + +statement ok r4 +select * from t where id = getvariable('u4'); + diff --git a/test/sql/index/art/storage/test_art_buffered_replays_chunk_edges.test b/test/sql/index/art/storage/test_art_buffered_replays_chunk_edges.test new file mode 100644 index 000000000000..b10ab056be99 --- /dev/null +++ b/test/sql/index/art/storage/test_art_buffered_replays_chunk_edges.test @@ -0,0 +1,80 @@ +# name: test/sql/index/art/storage/test_art_buffered_replays_chunk_edges.test +# description: Interleaved buffered replays that cross chunk boundaries with delete ranges that must remain separate +# group: [storage] + +load __TEST_DIR__/test_interleaved_replays_chunk_edges.db + +statement ok +SET wal_autocheckpoint = '1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE tbl(i INTEGER); + +statement ok +CREATE UNIQUE INDEX idx_tbl_i ON tbl(i); + +# Range 1: partial first chunk [0, 250] +statement ok +INSERT INTO tbl SELECT r FROM range(0, 251) t(r); + +statement ok +DELETE FROM tbl WHERE i BETWEEN 0 AND 250; + +# Range 2: crosses the 2048 chunk boundary [251, 2048] +statement ok +INSERT INTO tbl SELECT r FROM range(251, 2049) t(r); + +statement ok +DELETE FROM tbl WHERE i BETWEEN 251 AND 2048; + +# Range 3: remainder of the second chunk [2049, 4095] +statement ok +INSERT INTO tbl SELECT r FROM range(2049, 4096) t(r); + +statement ok +DELETE FROM tbl WHERE i BETWEEN 2049 AND 4095; + +statement ok +INSERT INTO tbl VALUES (5000); + +statement ok +INSERT INTO tbl VALUES (6000); + +statement ok +DELETE FROM tbl WHERE i = 5000; + +restart + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 5000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 5000; +---- + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 5000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 5000; +---- + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 6000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 6000; +---- +6000 + + + diff --git a/test/sql/index/art/storage/test_art_buffered_replays_interleaved.test b/test/sql/index/art/storage/test_art_buffered_replays_interleaved.test new file mode 100644 index 000000000000..5f7745edf73d --- /dev/null +++ b/test/sql/index/art/storage/test_art_buffered_replays_interleaved.test @@ -0,0 +1,49 @@ +# name: test/sql/index/art/storage/test_art_buffered_replays_interleaved.test +# description: Test interleaved buffered index replays with multi-chunk batches +# group: [storage] + +load __TEST_DIR__/test_interleaved_replays.db + +statement ok +SET wal_autocheckpoint = '1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE tbl (i INTEGER); + +statement ok +CREATE UNIQUE INDEX idx_i ON tbl (i); + +loop i 0 9 + +statement ok +INSERT INTO tbl SELECT r FROM range(${i} * 10000, ${i} * 10000 + 3000) t(r); + +statement ok +DELETE FROM tbl WHERE i >= ${i} * 10000 AND i < ${i} * 10000 + 2500; + +endloop + +restart + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 5000; +---- +analyzed_plan :.*Type: Index Scan.* + +statement error +INSERT INTO tbl VALUES (12501); +---- +Constraint Error: Duplicate key "i: 12501" violates unique constraint. + +query I +SELECT i FROM tbl WHERE i = 12501; +---- +12501 + +query I +SELECT i FROM tbl WHERE i = 1; +---- + diff --git a/test/sql/index/art/storage/test_art_buffered_replays_interval_merging.test b/test/sql/index/art/storage/test_art_buffered_replays_interval_merging.test new file mode 100644 index 000000000000..f4d7347afe63 --- /dev/null +++ b/test/sql/index/art/storage/test_art_buffered_replays_interval_merging.test @@ -0,0 +1,140 @@ +# name: test/sql/index/art/storage/test_art_buffered_replays_interval_merging.test +# description: Test interval merging for buffered index replays +# group: [storage] + +load __TEST_DIR__/test_interval_merging_replays.db + +statement ok +SET wal_autocheckpoint = '1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE tbl(i INTEGER); + +statement ok +CREATE UNIQUE INDEX idx_tbl_i ON tbl(i); + +loop i 0 4 + +statement ok +INSERT INTO tbl SELECT r FROM range(${i} * 10000, ${i} * 10000 + 2001) t(r); + +statement ok +INSERT INTO tbl SELECT r FROM range(${i} * 10000 + 2001, ${i} * 10000 + 4001) t(r); + +statement ok +INSERT INTO tbl SELECT r FROM range(${i} * 10000 + 4001, ${i} * 10000 + 5001) t(r); + +statement ok +DELETE FROM tbl WHERE i >= ${i} * 10000 AND i < ${i} * 10000 + 2500; + +endloop + +statement ok +INSERT INTO tbl VALUES (60000); + +statement ok +INSERT INTO tbl VALUES (60001); + +restart + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 60000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 60000; +---- +60000 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 60001; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 60001; +---- +60001 + +loop val 0 4 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = ${val} * 10000 + 1000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = ${val} * 10000 + 1000; +---- + +endloop + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 3000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 3000; +---- +3000 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 13000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 13000; +---- +13000 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 23000; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 23000; +---- +23000 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 4500; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 4500; +---- +4500 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 14500; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 14500; +---- +14500 + +query II +EXPLAIN ANALYZE SELECT i FROM tbl WHERE i = 24500; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT i FROM tbl WHERE i = 24500; +---- +24500 + +statement error +INSERT INTO tbl VALUES (60000); +---- +Constraint Error: Duplicate key "i: 60000" violates unique constraint. + diff --git a/test/sql/index/art/storage/test_art_buffered_replays_mod2.test b/test/sql/index/art/storage/test_art_buffered_replays_mod2.test new file mode 100644 index 000000000000..0dcc5f00fcf8 --- /dev/null +++ b/test/sql/index/art/storage/test_art_buffered_replays_mod2.test @@ -0,0 +1,93 @@ +# name: test/sql/index/art/storage/test_art_buffered_replays_mod2.test +# description: Testing inserts and deletes work for wal index operation replays. +# group: [storage] + +load __TEST_DIR__/test_art_buffered_replays_mod2.db + +statement ok +SET wal_autocheckpoint = '1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE tbl(i INTEGER); + +statement ok +CREATE UNIQUE INDEX idx_tbl_i ON tbl(i); + +# Batch 1: [0, 500] +statement ok +INSERT INTO tbl SELECT r FROM range(0, 501) t(r); + +statement ok +DELETE FROM tbl WHERE i BETWEEN 0 AND 500 AND i % 2 = 0; + +# Batch 2: [500, 4500] +statement ok +INSERT INTO tbl SELECT r FROM range(500, 4501) t(r); + +statement ok +DELETE FROM tbl WHERE i BETWEEN 500 AND 4500 AND i % 2 = 0; + +restart + +# sample some ranges to test for validity + +loop i 0 200 + +query II +EXPLAIN ANALYZE SELECT COUNT(*) FROM tbl WHERE i = (${i} * 2); +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = (${i} * 2)); +---- +0 + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = ((${i} * 2)+1)); +---- +1 + +endloop + +loop i 1789 2203 + +query II +EXPLAIN ANALYZE SELECT COUNT(*) FROM tbl WHERE i = (${i} * 2); +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = (${i} * 2)); +---- +0 + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = ((${i} * 2)+1)); +---- +1 + +endloop + +loop i 2200 2250 + +query II +EXPLAIN ANALYZE SELECT COUNT(*) FROM tbl WHERE i = (${i} * 2); +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = (${i} * 2)); +---- +0 + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE i = ((${i} * 2)+1)); +---- +1 + +endloop + diff --git a/test/sql/index/art/storage/test_art_buffered_replays_multi_col.test b/test/sql/index/art/storage/test_art_buffered_replays_multi_col.test new file mode 100644 index 000000000000..8e32990f3094 --- /dev/null +++ b/test/sql/index/art/storage/test_art_buffered_replays_multi_col.test @@ -0,0 +1,82 @@ +# name: test/sql/index/art/storage/test_art_buffered_replays_multi_col.test +# description: Test buffered replays with multiple columns, generated column, and unique index on middle column +# group: [storage] + +load __TEST_DIR__/test_art_buffered_replays_multi_col.db + +statement ok +SET wal_autocheckpoint = '1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE tbl( + col1 INTEGER, + col2 INTEGER, + idx_col INTEGER, + gen_col INTEGER GENERATED ALWAYS AS (col1 + col2) VIRTUAL, + col5 VARCHAR +); + +statement ok +CREATE UNIQUE INDEX idx_tbl_idx_col ON tbl(idx_col); + +statement ok +INSERT INTO tbl (col1, col2, idx_col, col5) SELECT r, r * 2, r, 'val' || r::VARCHAR FROM range(0, 1001) t(r); + +statement ok +DELETE FROM tbl WHERE idx_col % 2 = 0; + +restart + +loop i 0 50 + +query II +EXPLAIN ANALYZE SELECT idx_col FROM tbl WHERE idx_col = ${i} * 2; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE idx_col = ${i} * 2); +---- +0 + +endloop + +loop i 0 50 + +query II +EXPLAIN ANALYZE SELECT idx_col FROM tbl WHERE idx_col = ${i} * 2 + 1; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT EXISTS (SELECT 1 FROM tbl WHERE idx_col = ${i} * 2 + 1); +---- +1 + +statement error +INSERT INTO tbl (col1, col2, idx_col, col5) VALUES (${i} * 2 + 1, ${i} * 4 + 2, ${i} * 2 + 1, 'duplicate' || (${i} * 2 + 1)::VARCHAR); +---- +:.*Constraint Error: Duplicate key.*violates unique constraint.* + +endloop + +query II +EXPLAIN ANALYZE SELECT idx_col FROM tbl WHERE idx_col = 0; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT idx_col FROM tbl WHERE idx_col = 0; +---- + +query II +EXPLAIN ANALYZE SELECT idx_col FROM tbl WHERE idx_col = 10; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT idx_col FROM tbl WHERE idx_col = 10; +---- \ No newline at end of file diff --git a/test/sql/index/art/storage/test_art_wal_checkpoint_minimal.test b/test/sql/index/art/storage/test_art_wal_checkpoint_minimal.test new file mode 100644 index 000000000000..8fa79432a4d7 --- /dev/null +++ b/test/sql/index/art/storage/test_art_wal_checkpoint_minimal.test @@ -0,0 +1,52 @@ +# name: test/sql/index/art/storage/test_art_wal_checkpoint_minimal.test +# description: buffered index binding on checkpoint (when context is available). +# group: [storage] + +load __TEST_DIR__/test_art_wal_checkpoint_buffer_minimal.db + +statement ok +SET index_scan_max_count = 1; + +statement ok +PRAGMA wal_autocheckpoint='1TB'; + +statement ok +PRAGMA disable_checkpoint_on_shutdown; + +statement ok +CREATE TABLE minimal_tbl(i INTEGER); + +statement ok +CREATE UNIQUE INDEX idx_minimal ON minimal_tbl(i); + +statement ok +INSERT INTO minimal_tbl VALUES (42); + +statement ok +INSERT INTO minimal_tbl VALUES (43); + +statement ok +INSERT INTO minimal_tbl VALUES (44); + +statement ok +DELETE FROM minimal_tbl where i = 42 + +restart + +statement ok +CHECKPOINT; + +restart + +statement ok +INSERT INTO minimal_tbl VALUES (42); + +statement error +INSERT INTO minimal_tbl VALUES (43); +---- +violates unique constraint + +statement error +INSERT INTO minimal_tbl VALUES (44); +---- +violates unique constraint \ No newline at end of file diff --git a/test/sql/insert/insert_by_name.test b/test/sql/insert/insert_by_name.test index a879a649afd6..f1f341f14e0c 100644 --- a/test/sql/insert/insert_by_name.test +++ b/test/sql/insert/insert_by_name.test @@ -95,3 +95,32 @@ explicit column list statement ok INSERT INTO integers BY POSITION VALUES (42, 84); + +statement ok +CREATE TABLE tbl2 (a INTEGER, b INTEGER PRIMARY KEY); + +statement ok +INSERT INTO tbl2 BY NAME (SELECT 22 AS b); + +query II +FROM tbl2 +---- +NULL 22 + +# INSERT OR REPLACE BY NAME with partial columns +statement ok +INSERT OR REPLACE INTO tbl2 BY NAME (SELECT 22 AS b); + +query II +FROM tbl2 +---- +NULL 22 + +# INSERT OR REPLACE BY NAME with all columns +statement ok +INSERT OR REPLACE INTO tbl2 BY NAME (SELECT 22 AS b, 1 as a); + +query II +FROM tbl2 +---- +1 22 diff --git a/test/sql/join/test_huge_nested_payloads.test_slow b/test/sql/join/test_huge_nested_payloads.test_slow index accb23a4a241..8877bec35644 100644 --- a/test/sql/join/test_huge_nested_payloads.test_slow +++ b/test/sql/join/test_huge_nested_payloads.test_slow @@ -5,7 +5,7 @@ require parquet statement ok -IMPORT DATABASE 'data/parquet-testing/malloy-smaller'; +IMPORT DATABASE '{DATA_DIR}/parquet-testing/malloy-smaller'; # minimal reproducible examples for internal issue 1546 query I diff --git a/test/sql/json/issues/internal_issue2732.test b/test/sql/json/issues/internal_issue2732.test index 3c7b389fc06d..84fba25dff95 100644 --- a/test/sql/json/issues/internal_issue2732.test +++ b/test/sql/json/issues/internal_issue2732.test @@ -8,4 +8,4 @@ statement ok PRAGMA enable_verification statement ok -select * from read_json('data/json/internal_2732.json', map_inference_threshold=0); +select * from read_json('{DATA_DIR}/json/internal_2732.json', map_inference_threshold=0); diff --git a/test/sql/json/issues/internal_issue3197.test b/test/sql/json/issues/internal_issue3197.test index 28cea440165e..cf6004d53697 100644 --- a/test/sql/json/issues/internal_issue3197.test +++ b/test/sql/json/issues/internal_issue3197.test @@ -5,6 +5,6 @@ require json statement error -from 'data/json/internal_3197.json' +from '{DATA_DIR}/json/internal_3197.json' ---- Invalid Input Error diff --git a/test/sql/json/issues/internal_issue3813.test b/test/sql/json/issues/internal_issue3813.test index 894ef5a23f0c..6a2e8a0024b7 100644 --- a/test/sql/json/issues/internal_issue3813.test +++ b/test/sql/json/issues/internal_issue3813.test @@ -5,4 +5,4 @@ require json statement ok -SELECT * FROM read_json('data/json/internal_3813.json', map_inference_threshold=10); +SELECT * FROM read_json('{DATA_DIR}/json/internal_3813.json', map_inference_threshold=10); diff --git a/test/sql/json/issues/internal_issue4014.test b/test/sql/json/issues/internal_issue4014.test index 5e14739dc015..9ce2b04775b0 100644 --- a/test/sql/json/issues/internal_issue4014.test +++ b/test/sql/json/issues/internal_issue4014.test @@ -5,4 +5,4 @@ require json statement ok -FROM read_json('data/json/internal_4014.json', map_inference_threshold=0); +FROM read_json('{DATA_DIR}/json/internal_4014.json', map_inference_threshold=0); diff --git a/test/sql/json/issues/internal_issue4403.test b/test/sql/json/issues/internal_issue4403.test index 8507d2ff27d7..a26f319f4c87 100644 --- a/test/sql/json/issues/internal_issue4403.test +++ b/test/sql/json/issues/internal_issue4403.test @@ -8,6 +8,6 @@ statement ok pragma enable_verification statement error -SELECT * FROM read_json('data/json/example_n.ndjson', columns={id: NULL::VARCHAR, name: NULL::VARCHAR}) +SELECT * FROM read_json('{DATA_DIR}/json/example_n.ndjson', columns={id: NULL::VARCHAR, name: NULL::VARCHAR}) ---- Binder Error diff --git a/test/sql/json/issues/internal_issue4794.test b/test/sql/json/issues/internal_issue4794.test index 0e83d4bbd4e8..ea650c11bdeb 100644 --- a/test/sql/json/issues/internal_issue4794.test +++ b/test/sql/json/issues/internal_issue4794.test @@ -8,6 +8,6 @@ statement ok pragma enable_verification statement error -FROM read_json('data/json/format_string_key.json'); +FROM read_json('{DATA_DIR}/json/format_string_key.json'); ---- Invalid Input Error diff --git a/test/sql/json/issues/issue10751and11152.test b/test/sql/json/issues/issue10751and11152.test index 1273e39962b7..5b090dd004aa 100644 --- a/test/sql/json/issues/issue10751and11152.test +++ b/test/sql/json/issues/issue10751and11152.test @@ -6,21 +6,21 @@ require json # issue 10751 statement error -create or replace table json_test as select * from read_json_auto('data/json/10751.json', format = 'newline_delimited'); +create or replace table json_test as select * from read_json_auto('{DATA_DIR}/json/10751.json', format = 'newline_delimited'); ---- Not implemented Error: Duplicate name statement ok -create table json_test as select * from read_json_auto('data/json/10751.json', format = 'newline_delimited', ignore_errors=true); +create table json_test as select * from read_json_auto('{DATA_DIR}/json/10751.json', format = 'newline_delimited', ignore_errors=true); statement ok select * from json_test; # issue 11152 statement error -FROM read_json_auto('data/json/11152.json'); +FROM read_json_auto('{DATA_DIR}/json/11152.json'); ---- Invalid Input Error: Malformed JSON statement ok -FROM read_json_auto('data/json/11152.json', ignore_errors=true); +FROM read_json_auto('{DATA_DIR}/json/11152.json', ignore_errors=true); diff --git a/test/sql/json/issues/issue10784.test b/test/sql/json/issues/issue10784.test index 1cb0373b886f..71375d8da279 100644 --- a/test/sql/json/issues/issue10784.test +++ b/test/sql/json/issues/issue10784.test @@ -6,20 +6,20 @@ require json # original query from the issue should just return an error because it's not an array of objects statement error -SELECT * FROM read_json_auto('data/json/arr.json', columns={'v':'VARCHAR','k':'VARCHAR'}); +SELECT * FROM read_json_auto('{DATA_DIR}/json/arr.json', columns={'v':'VARCHAR','k':'VARCHAR'}); ---- Invalid Input Error # if we ignore errors we get NULLs because the array entries aren't objects query II -SELECT * FROM read_json_auto('data/json/arr.json', columns={'v':'VARCHAR','k':'VARCHAR'}, ignore_errors=true); +SELECT * FROM read_json_auto('{DATA_DIR}/json/arr.json', columns={'v':'VARCHAR','k':'VARCHAR'}, ignore_errors=true); ---- NULL NULL NULL NULL # if we read it as if it's one column we just get the array values as varchar query I -SELECT * FROM read_json_auto('data/json/arr.json', columns={'v':'VARCHAR'}); +SELECT * FROM read_json_auto('{DATA_DIR}/json/arr.json', columns={'v':'VARCHAR'}); ---- 4 hello diff --git a/test/sql/json/issues/issue12188.test b/test/sql/json/issues/issue12188.test index e1c06a2a036e..90b51a202cc5 100644 --- a/test/sql/json/issues/issue12188.test +++ b/test/sql/json/issues/issue12188.test @@ -5,16 +5,16 @@ require parquet query II -SELECT typeof(field1), typeof(field2) FROM 'data/parquet-testing/parquet_with_json.parquet' LIMIT 1 +SELECT typeof(field1), typeof(field2) FROM '{DATA_DIR}/parquet-testing/parquet_with_json.parquet' LIMIT 1 ---- JSON JSON require json statement ok -COPY (SELECT * FROM read_ndjson('data/json/12188.ndjson', maximum_depth=1)) TO '__TEST_DIR__/my.parquet'; +COPY (SELECT * FROM read_ndjson('{DATA_DIR}/json/12188.ndjson', maximum_depth=1)) TO '{TEMP_DIR}/my.parquet'; query II -SELECT typeof(field1), typeof(field2) FROM '__TEST_DIR__/my.parquet' LIMIT 1 +SELECT typeof(field1), typeof(field2) FROM '{TEMP_DIR}/my.parquet' LIMIT 1 ---- JSON JSON diff --git a/test/sql/json/issues/issue13725.test b/test/sql/json/issues/issue13725.test index 62664f600995..db136ba31657 100644 --- a/test/sql/json/issues/issue13725.test +++ b/test/sql/json/issues/issue13725.test @@ -9,28 +9,28 @@ require notwindows query III select * -from read_json_objects('data/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) +from read_json_objects('{DATA_DIR}/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) where month = 7; ---- -{"hello": "there"} data/json/13725/month=07/mytest.json 7 +{"hello": "there"} {DATA_DIR}/json/13725/month=07/mytest.json 7 query I select count(*) -from read_json_objects('data/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) +from read_json_objects('{DATA_DIR}/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) where month = 7; ---- 1 query III select * -from read_json('data/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) +from read_json('{DATA_DIR}/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) where month = 7; ---- -there data/json/13725/month=07/mytest.json 7 +there {DATA_DIR}/json/13725/month=07/mytest.json 7 query I select count(*) -from read_json('data/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) +from read_json('{DATA_DIR}/json/13725/month=*/*.json', hive_partitioning = true, format = auto, hive_types = {'month': int}, filename = true) where month = 7; ---- 1 diff --git a/test/sql/json/issues/issue14167.test b/test/sql/json/issues/issue14167.test index 52083a2965bb..06973bf3322d 100644 --- a/test/sql/json/issues/issue14167.test +++ b/test/sql/json/issues/issue14167.test @@ -6,6 +6,6 @@ require json # the auto-detected type is a MAP, but we can still extract using the dot syntax because we rewrite to map_extract query I -select columns.v4_c6 from read_ndjson_auto('data/json/14167.json'); +select columns.v4_c6 from read_ndjson_auto('{DATA_DIR}/json/14167.json'); ---- {'statistics': {'nonNullCount': 0}} diff --git a/test/sql/json/issues/issue14259.test b/test/sql/json/issues/issue14259.test index 2a0ae193d8c7..2723b049ab07 100644 --- a/test/sql/json/issues/issue14259.test +++ b/test/sql/json/issues/issue14259.test @@ -5,4 +5,4 @@ require json statement ok -from 'data/json/issue14259.json' +from '{DATA_DIR}/json/issue14259.json' diff --git a/test/sql/json/issues/issue15601.test b/test/sql/json/issues/issue15601.test index 106db3bdd82b..bddeb4a58e92 100644 --- a/test/sql/json/issues/issue15601.test +++ b/test/sql/json/issues/issue15601.test @@ -9,8 +9,8 @@ PRAGMA enable_verification # original from the issue statement ok -FROM 'data/json/15601/fragment*.json' +FROM '{DATA_DIR}/json/15601/fragment*.json' # created an even worse example statement ok -FROM 'data/json/15601/bunch_of_key_collisions.json' +FROM '{DATA_DIR}/json/15601/bunch_of_key_collisions.json' diff --git a/test/sql/json/issues/issue16568.test b/test/sql/json/issues/issue16568.test index a884d637c6a9..48649d2e89d7 100644 --- a/test/sql/json/issues/issue16568.test +++ b/test/sql/json/issues/issue16568.test @@ -8,6 +8,6 @@ statement ok pragma enable_verification query I -select count(*) from 'data/json/sample_utf8_bom.json' +select count(*) from '{DATA_DIR}/json/sample_utf8_bom.json' ---- 1 diff --git a/test/sql/json/issues/issue18301.test b/test/sql/json/issues/issue18301.test index 634742ddeeb5..88d8143e6d2e 100644 --- a/test/sql/json/issues/issue18301.test +++ b/test/sql/json/issues/issue18301.test @@ -12,7 +12,7 @@ SET threads = 2; statement ok CREATE OR REPLACE TABLE cricket_staging AS -SELECT * FROM read_json('data/json/18301/*.json', filename=true) +SELECT * FROM read_json('{DATA_DIR}/json/18301/*.json', filename=true) WHERE 1=0; statement ok @@ -20,7 +20,7 @@ TRUNCATE cricket_staging; statement ok INSERT INTO cricket_staging -SELECT * FROM read_json('data/json/18301/*.json', +SELECT * FROM read_json('{DATA_DIR}/json/18301/*.json', union_by_name=true, filename=true ); diff --git a/test/sql/json/issues/issue19366.test b/test/sql/json/issues/issue19366.test new file mode 100644 index 000000000000..b5c16041ae6b --- /dev/null +++ b/test/sql/json/issues/issue19366.test @@ -0,0 +1,19 @@ +# name: test/sql/json/issues/issue19366.test +# description: Test issue 19366 - 1.4.1: malformed JSON when running through view +# group: [issues] + +require json + +require parquet + +statement ok +copy ( + select + 'TEST' AS "key", + {Attributes: '{}'::MAP(VARCHAR, JSON), Nodes: {hardware: NULL::JSON, networking: NULL::JSON, software: NULL::JSON}, "Table": '{}'::MAP(VARCHAR, JSON)} AS "value" +) to '__TEST_DIR__/19366.parquet' (PARQUET_VERSION V2) + +query II +from '__TEST_DIR__/19366.parquet'; +---- +TEST {'Attributes': {}, 'Nodes': {'hardware': NULL, 'networking': NULL, 'software': NULL}, 'Table': {}} diff --git a/test/sql/json/issues/issue6722.test b/test/sql/json/issues/issue6722.test index b00843052533..e39efdd242ac 100644 --- a/test/sql/json/issues/issue6722.test +++ b/test/sql/json/issues/issue6722.test @@ -9,13 +9,13 @@ PRAGMA enable_verification # this file has 4 columns, name "id", "Id", "iD", and "ID" query IIII -FROM 'data/json/duplicate_column_names.json' +FROM '{DATA_DIR}/json/duplicate_column_names.json' ---- 42 43 44 45 # due to case-insensitivity these column names would cause an error, but we add a number to de-duplicate them query IIIIII -DESCRIBE FROM 'data/json/duplicate_column_names.json' +DESCRIBE FROM '{DATA_DIR}/json/duplicate_column_names.json' ---- id BIGINT YES NULL NULL NULL Id_1 BIGINT YES NULL NULL NULL diff --git a/test/sql/json/issues/issue8695.test b/test/sql/json/issues/issue8695.test index 6a6c67f3015b..6acccd393669 100644 --- a/test/sql/json/issues/issue8695.test +++ b/test/sql/json/issues/issue8695.test @@ -6,7 +6,7 @@ require json # these two succeeded statement ok -SELECT MAX(JSON_ARRAY_LENGTH(filter_keystage))::int - 1 FROM read_json_auto('data/json/filter_keystage.ndjson'); +SELECT MAX(JSON_ARRAY_LENGTH(filter_keystage))::int - 1 FROM read_json_auto('{DATA_DIR}/json/filter_keystage.ndjson'); statement ok WITH RECURSIVE nums AS ( @@ -23,6 +23,6 @@ WITH RECURSIVE nums AS ( SELECT 0 AS n UNION ALL SELECT n + 1 FROM nums - WHERE n < (SELECT MAX(JSON_ARRAY_LENGTH(filter_keystage))::int - 1 FROM read_json_auto('data/json/filter_keystage.ndjson')) + WHERE n < (SELECT MAX(JSON_ARRAY_LENGTH(filter_keystage))::int - 1 FROM read_json_auto('{DATA_DIR}/json/filter_keystage.ndjson')) ) SELECT * FROM nums; diff --git a/test/sql/json/issues/read_json_memory_usage.test b/test/sql/json/issues/read_json_memory_usage.test index 96c7c6a42257..96b621867482 100644 --- a/test/sql/json/issues/read_json_memory_usage.test +++ b/test/sql/json/issues/read_json_memory_usage.test @@ -11,7 +11,7 @@ statement ok SET memory_limit='200MiB'; query I -SELECT * FROM read_json_objects('data/json/example_rn.ndjson', format='nd'); +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_rn.ndjson', format='nd'); ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -23,6 +23,6 @@ statement ok SET memory_limit='50MiB'; statement error -SELECT * FROM read_json_objects('data/json/example_rn.ndjson', format='nd'); +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_rn.ndjson', format='nd'); ---- Out of Memory Error diff --git a/test/sql/json/table/json_empty_array.test b/test/sql/json/table/json_empty_array.test index d0231dc57671..5afa1a7a4c42 100644 --- a/test/sql/json/table/json_empty_array.test +++ b/test/sql/json/table/json_empty_array.test @@ -9,40 +9,40 @@ pragma enable_verification # empty file query I -select * from 'data/json/empty.ndjson' +select * from '{DATA_DIR}/json/empty.ndjson' ---- query I -select * from 'data/json/whitespace_only.json' +select * from '{DATA_DIR}/json/whitespace_only.json' ---- # empty array query I -SELECT * FROM read_json_auto('data/json/empty_array.json') +SELECT * FROM read_json_auto('{DATA_DIR}/json/empty_array.json') ---- query I -SELECT * FROM read_json_auto('data/json/empty_no_newline.json') +SELECT * FROM read_json_auto('{DATA_DIR}/json/empty_no_newline.json') ---- # malformed files statement error -SELECT * FROM read_json_auto('data/json/malformed/empty_array_malformed.json') +SELECT * FROM read_json_auto('{DATA_DIR}/json/malformed/empty_array_malformed.json') ---- Missing closing brace statement error -SELECT * FROM read_json_auto('data/json/malformed/empty_array_trailing.json', format='array') +SELECT * FROM read_json_auto('{DATA_DIR}/json/malformed/empty_array_trailing.json', format='array') ---- Empty array with trailing data when parsing JSON array statement error -SELECT * FROM read_json_auto('data/json/malformed/array_comma_malformed.json', format='array') +SELECT * FROM read_json_auto('{DATA_DIR}/json/malformed/array_comma_malformed.json', format='array') ---- Malformed JSON query I -SELECT * FROM read_json_auto('data/json/array_of_empty_arrays.json', format='array') +SELECT * FROM read_json_auto('{DATA_DIR}/json/array_of_empty_arrays.json', format='array') ---- [] [] diff --git a/test/sql/json/table/json_multi_file_reader.test b/test/sql/json/table/json_multi_file_reader.test index 7dd26544b2d6..66d7cbee7f4b 100644 --- a/test/sql/json/table/json_multi_file_reader.test +++ b/test/sql/json/table/json_multi_file_reader.test @@ -10,50 +10,50 @@ create table test as SELECT i as i, to_json([i%4]) as j FROM range(0,20) as tbl( # FIXME: we can't do partitioned JSON writes yet because the column we partition by is packed into a to_json # because we just push an expression and then use the csv writer, this uses the csv writer for now statement ok -COPY test TO '__TEST_DIR__/json_part' (FORMAT csv, quote '', PARTITION_BY (j), HEADER 0); +COPY test TO '{TEMP_DIR}/json_part' (FORMAT csv, quote '', PARTITION_BY (j), HEADER 0); # some tests for read_json first query III -select * exclude (filename), replace(filename, '\', '/') as filename from read_json_auto('data/json/example_*.ndjson', filename=true) order by all +select * exclude (filename), parse_filename(filename) as filename from read_json_auto('{DATA_DIR}/json/example_*.ndjson', filename=true) order by all ---- -1 O Brother, Where Art Thou? data/json/example_n.ndjson -1 O Brother, Where Art Thou? data/json/example_r.ndjson -1 O Brother, Where Art Thou? data/json/example_rn.ndjson -2 Home for the Holidays data/json/example_n.ndjson -2 Home for the Holidays data/json/example_r.ndjson -2 Home for the Holidays data/json/example_rn.ndjson -3 The Firm data/json/example_n.ndjson -3 The Firm data/json/example_r.ndjson -3 The Firm data/json/example_rn.ndjson -4 Broadcast News data/json/example_n.ndjson -4 Broadcast News data/json/example_r.ndjson -4 Broadcast News data/json/example_rn.ndjson -5 Raising Arizona data/json/example_n.ndjson -5 Raising Arizona data/json/example_r.ndjson -5 Raising Arizona data/json/example_rn.ndjson +1 O Brother, Where Art Thou? example_n.ndjson +1 O Brother, Where Art Thou? example_r.ndjson +1 O Brother, Where Art Thou? example_rn.ndjson +2 Home for the Holidays example_n.ndjson +2 Home for the Holidays example_r.ndjson +2 Home for the Holidays example_rn.ndjson +3 The Firm example_n.ndjson +3 The Firm example_r.ndjson +3 The Firm example_rn.ndjson +4 Broadcast News example_n.ndjson +4 Broadcast News example_r.ndjson +4 Broadcast News example_rn.ndjson +5 Raising Arizona example_n.ndjson +5 Raising Arizona example_r.ndjson +5 Raising Arizona example_rn.ndjson # virtual column query III -select *, replace(filename, '\', '/') from read_json_auto('data/json/example_*.ndjson') order by all +select *, parse_filename(filename) from read_json_auto('{DATA_DIR}/json/example_*.ndjson') order by all ---- -1 O Brother, Where Art Thou? data/json/example_n.ndjson -1 O Brother, Where Art Thou? data/json/example_r.ndjson -1 O Brother, Where Art Thou? data/json/example_rn.ndjson -2 Home for the Holidays data/json/example_n.ndjson -2 Home for the Holidays data/json/example_r.ndjson -2 Home for the Holidays data/json/example_rn.ndjson -3 The Firm data/json/example_n.ndjson -3 The Firm data/json/example_r.ndjson -3 The Firm data/json/example_rn.ndjson -4 Broadcast News data/json/example_n.ndjson -4 Broadcast News data/json/example_r.ndjson -4 Broadcast News data/json/example_rn.ndjson -5 Raising Arizona data/json/example_n.ndjson -5 Raising Arizona data/json/example_r.ndjson -5 Raising Arizona data/json/example_rn.ndjson +1 O Brother, Where Art Thou? example_n.ndjson +1 O Brother, Where Art Thou? example_r.ndjson +1 O Brother, Where Art Thou? example_rn.ndjson +2 Home for the Holidays example_n.ndjson +2 Home for the Holidays example_r.ndjson +2 Home for the Holidays example_rn.ndjson +3 The Firm example_n.ndjson +3 The Firm example_r.ndjson +3 The Firm example_rn.ndjson +4 Broadcast News example_n.ndjson +4 Broadcast News example_r.ndjson +4 Broadcast News example_rn.ndjson +5 Raising Arizona example_n.ndjson +5 Raising Arizona example_r.ndjson +5 Raising Arizona example_rn.ndjson query III -select * from read_json_auto(['data/json/example_n.ndjson', 'data/json/top_level_array.json'], union_by_name=true) order by all +select * from read_json_auto(['{DATA_DIR}/json/example_n.ndjson', '{DATA_DIR}/json/top_level_array.json'], union_by_name=true) order by all ---- 1 O Brother, Where Art Thou? NULL 2 Home for the Holidays NULL @@ -65,7 +65,7 @@ NULL NULL cancelled # despite not being able to do partitioned writes, we can do partitioned json reads already! query II -SELECT j, count(*) FROM read_json_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_PARTITIONING=1) group by j order by j; +SELECT j, count(*) FROM read_json_auto('{TEMP_DIR}/json_part/j=*/*.csv', HIVE_PARTITIONING=1) group by j order by j; ---- [0] 5 [1] 5 @@ -74,26 +74,26 @@ SELECT j, count(*) FROM read_json_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_ # also test read_json_objects query II -select * exclude (filename), replace(filename, '\', '/') as filename from read_json_objects_auto('data/json/example_*.ndjson', filename=true) order by all +select * exclude (filename), parse_filename(filename) as filename from read_json_objects_auto('{DATA_DIR}/json/example_*.ndjson', filename=true) order by all ---- -{"id":1,"name":"O Brother, Where Art Thou?"} data/json/example_n.ndjson -{"id":1,"name":"O Brother, Where Art Thou?"} data/json/example_r.ndjson -{"id":1,"name":"O Brother, Where Art Thou?"} data/json/example_rn.ndjson -{"id":2,"name":"Home for the Holidays"} data/json/example_n.ndjson -{"id":2,"name":"Home for the Holidays"} data/json/example_r.ndjson -{"id":2,"name":"Home for the Holidays"} data/json/example_rn.ndjson -{"id":3,"name":"The Firm"} data/json/example_n.ndjson -{"id":3,"name":"The Firm"} data/json/example_r.ndjson -{"id":3,"name":"The Firm"} data/json/example_rn.ndjson -{"id":4,"name":"Broadcast News"} data/json/example_n.ndjson -{"id":4,"name":"Broadcast News"} data/json/example_r.ndjson -{"id":4,"name":"Broadcast News"} data/json/example_rn.ndjson -{"id":5,"name":"Raising Arizona"} data/json/example_n.ndjson -{"id":5,"name":"Raising Arizona"} data/json/example_r.ndjson -{"id":5,"name":"Raising Arizona"} data/json/example_rn.ndjson +{"id":1,"name":"O Brother, Where Art Thou?"} example_n.ndjson +{"id":1,"name":"O Brother, Where Art Thou?"} example_r.ndjson +{"id":1,"name":"O Brother, Where Art Thou?"} example_rn.ndjson +{"id":2,"name":"Home for the Holidays"} example_n.ndjson +{"id":2,"name":"Home for the Holidays"} example_r.ndjson +{"id":2,"name":"Home for the Holidays"} example_rn.ndjson +{"id":3,"name":"The Firm"} example_n.ndjson +{"id":3,"name":"The Firm"} example_r.ndjson +{"id":3,"name":"The Firm"} example_rn.ndjson +{"id":4,"name":"Broadcast News"} example_n.ndjson +{"id":4,"name":"Broadcast News"} example_r.ndjson +{"id":4,"name":"Broadcast News"} example_rn.ndjson +{"id":5,"name":"Raising Arizona"} example_n.ndjson +{"id":5,"name":"Raising Arizona"} example_r.ndjson +{"id":5,"name":"Raising Arizona"} example_rn.ndjson query I -select * from read_json_objects_auto(['data/json/example_n.ndjson', 'data/json/top_level_array.json'], union_by_name=true) order by all +select * from read_json_objects_auto(['{DATA_DIR}/json/example_n.ndjson', '{DATA_DIR}/json/top_level_array.json'], union_by_name=true) order by all ---- {"conclusion":"cancelled"} {"conclusion":"cancelled"} @@ -104,7 +104,7 @@ select * from read_json_objects_auto(['data/json/example_n.ndjson', 'data/json/t {"id":5,"name":"Raising Arizona"} query II -select j, count(*) from read_json_objects_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_PARTITIONING=1) group by j order by j +select j, count(*) from read_json_objects_auto('{TEMP_DIR}/json_part/j=*/*.csv', HIVE_PARTITIONING=1) group by j order by j ---- [0] 5 [1] 5 @@ -114,7 +114,7 @@ select j, count(*) from read_json_objects_auto('__TEST_DIR__/json_part/j=*/*.csv # also test the filter pushdown query II SELECT j, count(*) -FROM read_json_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_PARTITIONING=1) +FROM read_json_auto('{TEMP_DIR}/json_part/j=*/*.csv', HIVE_PARTITIONING=1) where j='[2]' group by j order by j; @@ -123,7 +123,7 @@ order by j; query II SELECT j, count(*) -FROM read_json_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_PARTITIONING=1) +FROM read_json_auto('{TEMP_DIR}/json_part/j=*/*.csv', HIVE_PARTITIONING=1) where j>'[2]' group by j order by j; @@ -132,7 +132,7 @@ order by j; query II SELECT j, count(*) -FROM read_json_auto('__TEST_DIR__/json_part/j=*/*.csv', HIVE_PARTITIONING=1) +FROM read_json_auto('{TEMP_DIR}/json_part/j=*/*.csv', HIVE_PARTITIONING=1) where sqrt(j[2]::int) > 1.5 group by j order by j; @@ -143,22 +143,22 @@ order by j; # even across multiple files when union_by_name=false # there two files have a different schema, but we can read them together nonetheless statement ok -SELECT * FROM read_json_auto(['data/json/with_uuid.json', 'data/json/example_n.ndjson']) +SELECT * FROM read_json_auto(['{DATA_DIR}/json/with_uuid.json', '{DATA_DIR}/json/example_n.ndjson']) # both have 5 rows, so if we set sample_size=1, and maximum_sample_files=1, we cannot read them together anymore statement error -SELECT * FROM read_json_auto(['data/json/with_uuid.json', 'data/json/example_n.ndjson'], sample_size=1, maximum_sample_files=1) +SELECT * FROM read_json_auto(['{DATA_DIR}/json/with_uuid.json', '{DATA_DIR}/json/example_n.ndjson'], sample_size=1, maximum_sample_files=1) ---- Invalid Input Error # if we increase maximum_sample_files, or set union_by_name=true, then we can read them again statement ok -SELECT * FROM read_json_auto(['data/json/with_uuid.json', 'data/json/example_n.ndjson'], sample_size=1, maximum_sample_files=99) +SELECT * FROM read_json_auto(['{DATA_DIR}/json/with_uuid.json', '{DATA_DIR}/json/example_n.ndjson'], sample_size=1, maximum_sample_files=99) # if we set union_by_name=true, then we sample sample_size rows per file, so then we can read them again statement ok -SELECT * FROM read_json_auto(['data/json/with_uuid.json', 'data/json/example_n.ndjson'], sample_size=1, union_by_name=true) +SELECT * FROM read_json_auto(['{DATA_DIR}/json/with_uuid.json', '{DATA_DIR}/json/example_n.ndjson'], sample_size=1, union_by_name=true) # with sample size 6 we sample 1 line from the second file, and of course we can read it again statement ok -SELECT * FROM read_json_auto(['data/json/with_uuid.json', 'data/json/example_n.ndjson'], sample_size=6) +SELECT * FROM read_json_auto(['{DATA_DIR}/json/with_uuid.json', '{DATA_DIR}/json/example_n.ndjson'], sample_size=6) diff --git a/test/sql/json/table/multi_file_hang.test b/test/sql/json/table/multi_file_hang.test index 1e93a3953b68..577551309c99 100644 --- a/test/sql/json/table/multi_file_hang.test +++ b/test/sql/json/table/multi_file_hang.test @@ -10,7 +10,7 @@ set threads=8 # only happened with these parameters statement error -from read_json('data/json/multi_file_hang/*.json', sample_size=1, maximum_sample_files=1) +from read_json('{DATA_DIR}/json/multi_file_hang/*.json', sample_size=1, maximum_sample_files=1) ---- Invalid Input Error: JSON transform error @@ -18,6 +18,6 @@ Invalid Input Error: JSON transform error # we cannot ignore errors of this kind when the data is not newline-delimited # because we wouldn't know how to continue statement error -SELECT * FROM read_json('data/json/fuzzer_hang.json', ignore_errors=true); +SELECT * FROM read_json('{DATA_DIR}/json/fuzzer_hang.json', ignore_errors=true); ---- Invalid Input Error diff --git a/test/sql/json/table/read_json.test b/test/sql/json/table/read_json.test index 8ece1299f6b6..a09141afbc6d 100644 --- a/test/sql/json/table/read_json.test +++ b/test/sql/json/table/read_json.test @@ -8,19 +8,19 @@ statement ok pragma enable_verification statement error -SELECT * FROM read_json('data/json/example_n.ndjson', auto_detect=false) +SELECT * FROM read_json('{DATA_DIR}/json/example_n.ndjson', auto_detect=false) ---- Binder Error # can't read ndjson with array statement error -SELECT * FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='array') +SELECT * FROM read_json('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='array') ---- Invalid Input Error: Expected top-level JSON array # read_ndjson works query II -SELECT * FROM read_ndjson('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}) ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -30,7 +30,7 @@ SELECT * FROM read_ndjson('data/json/example_n.ndjson', columns={id: 'INTEGER', # We can also read only one of the columns query I -SELECT * FROM read_ndjson('data/json/example_n.ndjson', columns={id: 'INTEGER'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER'}) ---- 1 2 @@ -39,7 +39,7 @@ SELECT * FROM read_ndjson('data/json/example_n.ndjson', columns={id: 'INTEGER'}) 5 query I -SELECT * FROM read_ndjson('data/json/example_n.ndjson', columns={name: 'VARCHAR'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/example_n.ndjson', columns={name: 'VARCHAR'}) ---- O Brother, Where Art Thou? Home for the Holidays @@ -49,7 +49,7 @@ Raising Arizona # what about a broken JSON file query II -SELECT * FROM read_ndjson('data/json/unterminated_quotes.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, ignore_errors=true) +SELECT * FROM read_ndjson('{DATA_DIR}/json/unterminated_quotes.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, ignore_errors=true) ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -59,7 +59,7 @@ NULL NULL # some of these values don't have "name" query II -SELECT * FROM read_ndjson('data/json/different_schemas.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/different_schemas.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}) ---- 1 O Brother, Where Art Thou? 2 NULL @@ -69,7 +69,7 @@ SELECT * FROM read_ndjson('data/json/different_schemas.ndjson', columns={id: 'IN # test projection pushdown (unstructured json) query I -SELECT id FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='unstructured') +SELECT id FROM read_json('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='unstructured') ---- 1 2 @@ -78,7 +78,7 @@ SELECT id FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', n 5 query I -SELECT name FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='unstructured') +SELECT name FROM read_json('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='unstructured') ---- O Brother, Where Art Thou? Home for the Holidays @@ -88,7 +88,7 @@ Raising Arizona # test projection pushdown (newline-delimited json) query I -SELECT id FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='newline_delimited') +SELECT id FROM read_json('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='newline_delimited') ---- 1 2 @@ -97,7 +97,7 @@ SELECT id FROM read_json('data/json/example_n.ndjson', columns={id: 'INTEGER', n 5 query I -SELECT name FROM read_ndjson('data/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='nd') +SELECT name FROM read_ndjson('{DATA_DIR}/json/example_n.ndjson', columns={id: 'INTEGER', name: 'VARCHAR'}, format='nd') ---- O Brother, Where Art Thou? Home for the Holidays @@ -107,7 +107,7 @@ Raising Arizona # auto-detect query II -SELECT * FROM read_json_auto('data/json/example_n.ndjson') +SELECT * FROM read_json_auto('{DATA_DIR}/json/example_n.ndjson') ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -116,7 +116,7 @@ SELECT * FROM read_json_auto('data/json/example_n.ndjson') 5 Raising Arizona query II -SELECT * FROM 'data/json/example_n.ndjson' +SELECT * FROM '{DATA_DIR}/json/example_n.ndjson' ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -126,7 +126,7 @@ SELECT * FROM 'data/json/example_n.ndjson' # we can detect at varying levels, level 0 is just JSON query I -SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=0) +SELECT * FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=0) ---- {"id":1,"name":["O","Brother,","Where","Art","Thou?"]} {"id":2,"name":["Home","for","the","Holidays"]} @@ -136,7 +136,7 @@ SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=0) # at level one we get JSON and JSON query II -SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=1) +SELECT * FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=1) ---- 1 ["O","Brother,","Where","Art","Thou?"] 2 ["Home","for","the","Holidays"] @@ -146,7 +146,7 @@ SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=1) # at level 2 we get BIGINT and JSON[] query II -SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=2) +SELECT * FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=2) ---- 1 ["O", "Brother,", "Where", "Art", "Thou?"] 2 ["Home", "for", "the", "Holidays"] @@ -156,7 +156,7 @@ SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=2) # at level 3 it's fully detected, and we get BIGINT and VARCHAR[] query II -SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=3) +SELECT * FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=3) ---- 1 [O, 'Brother,', Where, Art, Thou?] 2 [Home, for, the, Holidays] @@ -166,7 +166,7 @@ SELECT * FROM read_json_auto('data/json/with_list.json', maximum_depth=3) # we can detect lists too query III -SELECT id, typeof(name), unnest(name) FROM 'data/json/with_list.json' +SELECT id, typeof(name), unnest(name) FROM '{DATA_DIR}/json/with_list.json' ---- 1 VARCHAR[] O 1 VARCHAR[] Brother, @@ -186,7 +186,7 @@ SELECT id, typeof(name), unnest(name) FROM 'data/json/with_list.json' # with depth 2 we don't bother detecting inside of the list - defaults to JSON query III -SELECT id, typeof(name), unnest(name) FROM read_json_auto('data/json/with_list.json', maximum_depth=2) +SELECT id, typeof(name), unnest(name) FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=2) ---- 1 JSON[] "O" 1 JSON[] "Brother," @@ -206,7 +206,7 @@ SELECT id, typeof(name), unnest(name) FROM read_json_auto('data/json/with_list.j # with depth 0 we don't bother detecting anything, everything defaults to JSON (even the "id" column in this case) query II -SELECT typeof(id), typeof(name) FROM read_json_auto('data/json/with_list.json', maximum_depth=1) +SELECT typeof(id), typeof(name) FROM read_json_auto('{DATA_DIR}/json/with_list.json', maximum_depth=1) ---- JSON JSON JSON JSON @@ -216,7 +216,7 @@ JSON JSON # we can detect UUID's query II -SELECT id, typeof(id) FROM 'data/json/with_uuid.json' +SELECT id, typeof(id) FROM '{DATA_DIR}/json/with_uuid.json' ---- bbd05ae7-76e5-4f1a-a31f-247408251fc9 UUID d5c52052-5f8e-473f-bc8d-176342643ef5 UUID @@ -226,60 +226,60 @@ ae24e69e-e0bf-4e85-9848-27d35df85b8b UUID # top-level array of values query I -select * from read_json('data/json/top_level_array.json', columns={conclusion: 'VARCHAR'}) +select * from read_json('{DATA_DIR}/json/top_level_array.json', columns={conclusion: 'VARCHAR'}) ---- cancelled cancelled query I -select * from read_json('data/json/top_level_array.json', auto_detect=true) +select * from read_json('{DATA_DIR}/json/top_level_array.json', auto_detect=true) ---- cancelled cancelled # if we try to read it as 'unstructured' records statement error -select * from read_json('data/json/top_level_array.json', columns={conclusion: 'VARCHAR'}, format='unstructured', records=true) +select * from read_json('{DATA_DIR}/json/top_level_array.json', columns={conclusion: 'VARCHAR'}, format='unstructured', records=true) ---- -Invalid Input Error: JSON transform error in file "data/json/top_level_array.json", in record/value 1: Expected OBJECT, but got ARRAY +Invalid Input Error: JSON transform error in file "{DATA_DIR}/json/top_level_array.json", in record/value 1: Expected OBJECT, but got ARRAY # if we try to read an ndjson file as if it is an array of values, we get an error statement error -select * from read_json_auto('data/json/example_n.ndjson', format='array') +select * from read_json_auto('{DATA_DIR}/json/example_n.ndjson', format='array') ---- Invalid Input Error: Expected top-level JSON array # test that we can read a list of longer than STANDARD_VECTOR_SIZE properly statement ok -copy (select 42 duck from range(10000)) to '__TEST_DIR__/my_file.json' (array true) +copy (select 42 duck from range(10000)) to '{TEMP_DIR}/my_file.json' (array true) query T -select count(*) from read_json('__TEST_DIR__/my_file.json', columns={duck: 'INTEGER'}, format='array') +select count(*) from read_json('{TEMP_DIR}/my_file.json', columns={duck: 'INTEGER'}, format='array') ---- 10000 query T -select sum(duck) = 42*10000 from read_json('__TEST_DIR__/my_file.json', columns={duck: 'INTEGER'}, format='array') +select sum(duck) = 42*10000 from read_json('{TEMP_DIR}/my_file.json', columns={duck: 'INTEGER'}, format='array') ---- true # read_json_auto also understands ARRAY format query T -select count(*) from '__TEST_DIR__/my_file.json' +select count(*) from '{TEMP_DIR}/my_file.json' ---- 10000 query T -select sum(duck) = 42*10000 from '__TEST_DIR__/my_file.json' +select sum(duck) = 42*10000 from '{TEMP_DIR}/my_file.json' ---- true # what if we do an array of non-records? statement ok -copy (select list(range) from range(10)) to '__TEST_DIR__/my_file.json' (format csv, quote '', HEADER 0) +copy (select list(range) from range(10)) to '{TEMP_DIR}/my_file.json' (format csv, quote '', HEADER 0) query T -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 0 1 @@ -294,18 +294,18 @@ select * from '__TEST_DIR__/my_file.json' # fails because it's not records statement error -select * from read_json('__TEST_DIR__/my_file.json', format='array', columns={range: 'INTEGER'}, records=true) +select * from read_json('{TEMP_DIR}/my_file.json', format='array', columns={range: 'INTEGER'}, records=true) ---- Invalid Input Error: JSON transform error # fails because it's not records statement error -select * from read_json_auto('__TEST_DIR__/my_file.json', format='array', records=true) +select * from read_json_auto('{TEMP_DIR}/my_file.json', format='array', records=true) ---- Binder Error: json_read expected records query T -select * from read_json('__TEST_DIR__/my_file.json', format='auto', records=false, auto_detect=true) +select * from read_json('{TEMP_DIR}/my_file.json', format='auto', records=false, auto_detect=true) ---- 0 1 @@ -320,19 +320,19 @@ select * from read_json('__TEST_DIR__/my_file.json', format='auto', records=fals # need to supply columns statement error -select * from read_json('__TEST_DIR__/my_file.json', format='auto', records='false', auto_detect=false) +select * from read_json('{TEMP_DIR}/my_file.json', format='auto', records='false', auto_detect=false) ---- Binder Error # read as unstructured values, so we just get the array query T -select * from read_json('__TEST_DIR__/my_file.json', format='unstructured', records='false', auto_detect=true) +select * from read_json('{TEMP_DIR}/my_file.json', format='unstructured', records='false', auto_detect=true) ---- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # array of non-records query T -select * from read_json('__TEST_DIR__/my_file.json', format='array', records='false', auto_detect=true) +select * from read_json('{TEMP_DIR}/my_file.json', format='array', records='false', auto_detect=true) ---- 0 1 @@ -347,7 +347,7 @@ select * from read_json('__TEST_DIR__/my_file.json', format='array', records='fa # also works with auto query T -select * from read_json('__TEST_DIR__/my_file.json', format='array', records='auto', auto_detect=true) +select * from read_json('{TEMP_DIR}/my_file.json', format='array', records='auto', auto_detect=true) ---- 0 1 @@ -367,7 +367,7 @@ SET threads=2 # issue 6646, this is not an array, but we try to read it as one statement error select json_structure(json ->> '$.metadata') as structure, -from read_json('data/json/issue.json', format='array', columns={'json': 'JSON'}, maximum_object_size=104857600) +from read_json('{DATA_DIR}/json/issue.json', format='array', columns={'json': 'JSON'}, maximum_object_size=104857600) limit 1; ---- Invalid Input Error: Expected top-level JSON array @@ -375,7 +375,7 @@ Invalid Input Error: Expected top-level JSON array # let's try a variation statement error select json_structure(json ->> '$.metadata') as structure, -from read_json('data/json/issue.json', format='array', records='false', columns={'json': 'JSON'}, maximum_object_size=104857600) +from read_json('{DATA_DIR}/json/issue.json', format='array', records='false', columns={'json': 'JSON'}, maximum_object_size=104857600) limit 1; ---- Invalid Input Error: Expected top-level JSON array @@ -383,7 +383,7 @@ Invalid Input Error: Expected top-level JSON array # we can parse it as unstructured values, and give it a different col name query I select json_structure(my_json ->> '$.metadata') as structure, -from read_json('data/json/issue.json', format='unstructured', records='false', columns={'my_json': 'JSON'}, maximum_object_size=104857600) +from read_json('{DATA_DIR}/json/issue.json', format='unstructured', records='false', columns={'my_json': 'JSON'}, maximum_object_size=104857600) limit 1; ---- {"argv":["VARCHAR"],"dag":{"dag_size":"VARCHAR","tasks":{"load_oscar":{"status":"VARCHAR","type":"VARCHAR","upstream":"VARCHAR","products":{"nb":"VARCHAR"}},"load_weather":{"status":"VARCHAR","type":"VARCHAR","upstream":"VARCHAR","products":{"nb":"VARCHAR"}},"compress":{"status":"VARCHAR","type":"VARCHAR","upstream":{"load_oscar":"VARCHAR"},"products":{"nb":"VARCHAR"}}}}} @@ -395,10 +395,10 @@ pragma disable_verification # the JSON is 55 bytes, and the minimum buffer size is 32MB # let's do 50k to be safe statement ok -copy (select 42 this_is_a_very_long_field_name_yes_very_much_so from range(50000)) to '__TEST_DIR__/my_file.json' (array true) +copy (select 42 this_is_a_very_long_field_name_yes_very_much_so from range(50000)) to '{TEMP_DIR}/my_file.json' (array true) query T -select sum(this_is_a_very_long_field_name_yes_very_much_so) = 42 * 50000 from '__TEST_DIR__/my_file.json' +select sum(this_is_a_very_long_field_name_yes_very_much_so) = 42 * 50000 from '{TEMP_DIR}/my_file.json' ---- true diff --git a/test/sql/json/table/read_json_auto.test_slow b/test/sql/json/table/read_json_auto.test_slow index eac953ddc6ed..cbef2ab7ff17 100644 --- a/test/sql/json/table/read_json_auto.test_slow +++ b/test/sql/json/table/read_json_auto.test_slow @@ -12,52 +12,52 @@ pragma enable_verification # instead of adding all of these files to data/test we just create them on the fly here # whenever we add a '' at the end it's just to check we skip the newline at the end that's sometimes there statement ok -copy (select * from (values ('{"a": 1, "b": 2}'), (''))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0); +copy (select * from (values ('{"a": 1, "b": 2}'), (''))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0); query II -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 1 2 statement ok -copy (select * from (values ('{"a": 1}'), ('{"a": 2}'), ('{"a": 3}'))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"a": 1}'), ('{"a": 2}'), ('{"a": 3}'))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query I -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 1 2 3 query I -select count(*) from '__TEST_DIR__/my_file.json' +select count(*) from '{TEMP_DIR}/my_file.json' ---- 3 statement ok -copy (select * from (values ('{"a": 1,"b": 2, "c": 3}'), ('{"a": 4,"b": 5, "c": 6}'))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"a": 1,"b": 2, "c": 3}'), ('{"a": 4,"b": 5, "c": 6}'))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query III -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 1 2 3 4 5 6 statement ok -copy (select * from (values ('{"a": 1,"b": 2, "c": "3", "d": false}'), ('{"a": 4.0, "b": -5, "c": "foo", "d": true}'), (''))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"a": 1,"b": 2, "c": "3", "d": false}'), ('{"a": 4.0, "b": -5, "c": "foo", "d": true}'), (''))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query IIII -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 1.0 2 3 false 4.0 -5 foo true # mixed types that cannot be resolved, defaults to JSON (column 3) statement ok -copy (select * from (values ('{"a": 1, "b": 2, "c": null, "d": null, "e": null}'), ('{"a": null, "b": -5, "c": "foo", "d": null, "e": true}'), ('{"a": 4.5, "b": null, "c": "nan", "d": null,"e": false}'), (''))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"a": 1, "b": 2, "c": null, "d": null, "e": null}'), ('{"a": null, "b": -5, "c": "foo", "d": null, "e": true}'), ('{"a": 4.5, "b": null, "c": "nan", "d": null,"e": false}'), (''))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query IIIII -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- 1.0 2 NULL NULL NULL NULL -5 foo NULL true @@ -65,10 +65,10 @@ NULL -5 foo NULL true # mixed types are resolved to DOUBLE here statement ok -copy (select * from (values ('{"a": 1}'), ('{"a": 1.45}'), ('{"a": -23.456}'), ('{}'), (''))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"a": 1}'), ('{"a": 1.45}'), ('{"a": -23.456}'), ('{}'), (''))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query II -select typeof(a), a from '__TEST_DIR__/my_file.json' +select typeof(a), a from '{TEMP_DIR}/my_file.json' ---- DOUBLE 1.0 DOUBLE 1.45 @@ -76,67 +76,67 @@ DOUBLE -23.456 DOUBLE NULL statement ok -copy (select * from (values ('{"foo": "bar", "num": 0}'), ('{"foo": "baz", "num": 1}'), (''))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"foo": "bar", "num": 0}'), ('{"foo": "baz", "num": 1}'), (''))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) query II -select * from '__TEST_DIR__/my_file.json' +select * from '{TEMP_DIR}/my_file.json' ---- bar 0 baz 1 # we can read values from a top-level list query I -select * from 'data/json/top_level_array.json' +select * from '{DATA_DIR}/json/top_level_array.json' ---- cancelled cancelled query I -select count(*) from 'data/json/top_level_array.json' +select count(*) from '{DATA_DIR}/json/top_level_array.json' ---- 2 # for maximum_depth=0 this is two records of JSON query I -select * from read_json_auto('data/json/top_level_array.json', maximum_depth=0) +select * from read_json_auto('{DATA_DIR}/json/top_level_array.json', maximum_depth=0) ---- {"conclusion":"cancelled"} {"conclusion":"cancelled"} # for 1 it's 1 column of JSON query I -select * from read_json_auto('data/json/top_level_array.json', maximum_depth=1) +select * from read_json_auto('{DATA_DIR}/json/top_level_array.json', maximum_depth=1) ---- "cancelled" "cancelled" # if we read this with records='false', we get the struct instead of the unpacked columns query I -select typeof(json) from read_json_auto('data/json/top_level_array.json', records='false') +select typeof(json) from read_json_auto('{DATA_DIR}/json/top_level_array.json', records='false') ---- STRUCT(conclusion VARCHAR) STRUCT(conclusion VARCHAR) # however, if there are multiple top-level arrays, we default to reading them as lists query I -select * from 'data/json/top_level_two_arrays.json' +select * from '{DATA_DIR}/json/top_level_two_arrays.json' ---- [{'conclusion': cancelled}, {'conclusion': cancelled}] [{'conclusion': cancelled}, {'conclusion': cancelled}] # if we read a top-level array as if it is a record, then we get an error statement error -select * from read_json_auto('data/json/top_level_array.json', format='unstructured', records='true') +select * from read_json_auto('{DATA_DIR}/json/top_level_array.json', format='unstructured', records='true') ---- Binder Error: json_read expected records # issue Mark found when analyzing a JSON dump of our CI - projection pushdown wasn't working properly statement ok -select * from 'data/json/projection_pushdown_example.json' WHERE status <> 'completed' +select * from '{DATA_DIR}/json/projection_pushdown_example.json' WHERE status <> 'completed' # different schema's - this one should work regardless of sampling 1 or all lines query II -select * from read_json_auto('data/json/different_schemas.ndjson', sample_size=1) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', sample_size=1) ---- 1 O Brother, Where Art Thou? 2 NULL @@ -145,7 +145,7 @@ select * from read_json_auto('data/json/different_schemas.ndjson', sample_size=1 5 Raising Arizona query II -select * from read_json_auto('data/json/different_schemas.ndjson', sample_size=-1) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', sample_size=-1) ---- 1 O Brother, Where Art Thou? 2 NULL @@ -155,12 +155,12 @@ select * from read_json_auto('data/json/different_schemas.ndjson', sample_size=- # if we require fields to appear in all objects by setting field_appearance_threshold=1, we default to MAP query I -select typeof(COLUMNS(*)) from read_json_auto('data/json/different_schemas.ndjson', field_appearance_threshold=1) limit 1 +select typeof(COLUMNS(*)) from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', field_appearance_threshold=1) limit 1 ---- MAP(VARCHAR, JSON) query I -select * from read_json_auto('data/json/different_schemas.ndjson', field_appearance_threshold=1) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', field_appearance_threshold=1) ---- {id=1, name='"O Brother, Where Art Thou?"'} {id=2} @@ -170,7 +170,7 @@ select * from read_json_auto('data/json/different_schemas.ndjson', field_appeara # if we set it to 0.5 it should work already since "name" appears in 3/5 objects, which is greater than 0.5 query II -select * from read_json_auto('data/json/different_schemas.ndjson', field_appearance_threshold=0.5) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', field_appearance_threshold=0.5) ---- 1 O Brother, Where Art Thou? 2 NULL @@ -180,24 +180,24 @@ select * from read_json_auto('data/json/different_schemas.ndjson', field_appeara # can't set it to less than 0 or more than 1 statement error -select * from read_json_auto('data/json/different_schemas.ndjson', field_appearance_threshold=-1) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', field_appearance_threshold=-1) ---- Binder Error: read_json_auto "field_appearance_threshold" parameter must be between 0 and 1 statement error -select * from read_json_auto('data/json/different_schemas.ndjson', field_appearance_threshold=2) +select * from read_json_auto('{DATA_DIR}/json/different_schemas.ndjson', field_appearance_threshold=2) ---- Binder Error: read_json_auto "field_appearance_threshold" parameter must be between 0 and 1 # inconsistent schema's - if we only sample 1 row, we get an error, because we only see a NULL value for the 2nd column statement error -select * from read_json_auto('data/json/inconsistent_schemas.ndjson', sample_size=1, convert_strings_to_integers=true) +select * from read_json_auto('{DATA_DIR}/json/inconsistent_schemas.ndjson', sample_size=1, convert_strings_to_integers=true) ---- -Invalid Input Error: JSON transform error in file "data/json/inconsistent_schemas.ndjson", in line 3 +Invalid Input Error: JSON transform error in file "{DATA_DIR}/json/inconsistent_schemas.ndjson", in line 3 # if we increase the sample size to 2, we can read it just fine query II -select * from read_json_auto('data/json/inconsistent_schemas.ndjson', sample_size=2) +select * from read_json_auto('{DATA_DIR}/json/inconsistent_schemas.ndjson', sample_size=2) ---- "1" NULL 2 Home for the Holidays @@ -207,18 +207,18 @@ select * from read_json_auto('data/json/inconsistent_schemas.ndjson', sample_siz # we can also find bigint in strings (happens a lot in JSON for some reason ...) statement ok -copy (select * from (values ('{"id": "26941143801"}'), ('{"id": "26941143807"}'))) to '__TEST_DIR__/my_file.json' (format csv, quote '', header 0) +copy (select * from (values ('{"id": "26941143801"}'), ('{"id": "26941143807"}'))) to '{TEMP_DIR}/my_file.json' (format csv, quote '', header 0) # but only if we set the parameter to true query T -select typeof(id) from read_json('__TEST_DIR__/my_file.json', convert_strings_to_integers=true) +select typeof(id) from read_json('{TEMP_DIR}/my_file.json', convert_strings_to_integers=true) ---- BIGINT BIGINT # empty array and the example file works query II -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson']); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson']); ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -228,96 +228,96 @@ select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n # Simple map inference with default threshold query T -select distinct typeof(a) from read_json_auto('data/json/simple_map.jsonl') +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/simple_map.jsonl') ---- MAP(VARCHAR, BIGINT) # Test setting map_inference_threshold high query T -select distinct typeof(a) from read_json_auto('data/json/simple_map.jsonl', map_inference_threshold=1000) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/simple_map.jsonl', map_inference_threshold=1000) ---- MAP(VARCHAR, BIGINT) # Map inference can be disabled query T -select distinct typeof(a) from read_json_auto('data/json/simple_map.jsonl', map_inference_threshold=-1, field_appearance_threshold=0) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/simple_map.jsonl', map_inference_threshold=-1, field_appearance_threshold=0) ---- STRUCT("1" JSON, "2" BIGINT, "3" BIGINT, "4" BIGINT, "5" BIGINT, "6" BIGINT, "7" BIGINT, "8" BIGINT, "9" BIGINT, "10" BIGINT, "11" BIGINT, "12" BIGINT, "13" BIGINT, "14" BIGINT, "15" BIGINT, "16" JSON, "17" BIGINT, "18" BIGINT, "19" BIGINT, "20" BIGINT, "21" BIGINT, "22" BIGINT, "23" BIGINT, "24" BIGINT, "25" BIGINT, "26" BIGINT, "27" BIGINT, "28" BIGINT, "29" BIGINT, "30" BIGINT, "31" BIGINT, "32" BIGINT, "33" BIGINT, "34" BIGINT, "35" BIGINT, "36" BIGINT, "37" BIGINT, "38" BIGINT, "39" BIGINT, "40" BIGINT, "41" BIGINT, "42" BIGINT, "43" BIGINT, "44" BIGINT, "45" BIGINT, "46" BIGINT, "47" BIGINT, "48" BIGINT, "49" BIGINT, "50" BIGINT, "51" BIGINT, "52" BIGINT, "53" BIGINT, "54" BIGINT, "55" BIGINT, "56" BIGINT, "57" BIGINT, "58" BIGINT, "59" BIGINT, "60" BIGINT, "61" BIGINT, "62" BIGINT, "63" BIGINT, "64" BIGINT, "65" BIGINT, "66" BIGINT, "67" BIGINT, "68" BIGINT, "69" BIGINT, "70" BIGINT, "71" BIGINT, "72" BIGINT, "73" BIGINT, "74" BIGINT, "75" BIGINT, "76" BIGINT, "77" BIGINT, "78" BIGINT, "79" BIGINT, "80" BIGINT, "81" BIGINT, "82" BIGINT, "83" BIGINT, "84" BIGINT, "85" BIGINT, "86" BIGINT, "87" BIGINT, "88" BIGINT, "89" BIGINT, "90" BIGINT, "91" BIGINT, "92" BIGINT, "93" BIGINT, "94" BIGINT, "95" BIGINT, "96" BIGINT, "97" BIGINT, "98" BIGINT, "99" BIGINT, "100" BIGINT) # Map inference with max_depth works as expected query T -select distinct typeof(a) from read_json_auto('data/json/simple_map.jsonl', maximum_depth=2) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/simple_map.jsonl', maximum_depth=2) ---- MAP(VARCHAR, JSON) query T -select distinct typeof(a) from read_json_auto('data/json/simple_map.jsonl', maximum_depth=1) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/simple_map.jsonl', maximum_depth=1) ---- JSON # Map where all values are null query T -select distinct typeof(a) from read_json_auto('data/json/map_of_nulls.jsonl') +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_of_nulls.jsonl') ---- MAP(VARCHAR, JSON) # Map type can be inferred at the top level query T -select distinct typeof(json) from read_json_auto('data/json/top_level_map.jsonl') +select distinct typeof(json) from read_json_auto('{DATA_DIR}/json/top_level_map.jsonl') ---- MAP(VARCHAR, BIGINT) # Map type can be inferred for struct value type query T -select distinct typeof(a) from read_json_auto('data/json/map_of_structs.jsonl') +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_of_structs.jsonl') ---- MAP(VARCHAR, STRUCT(b BIGINT)) # Map 80% similarity check works query T -select distinct typeof(a) from read_json_auto('data/json/map_50_50.jsonl', map_inference_threshold=10) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_50_50.jsonl', map_inference_threshold=10) ---- STRUCT(s1 STRUCT(f1 BIGINT[]), s2 STRUCT(f2 BIGINT[]), s3 STRUCT(f1 BIGINT[]), s4 STRUCT(f2 BIGINT[]), s5 STRUCT(f1 BIGINT[]), s6 STRUCT(f2 BIGINT[]), s7 STRUCT(f1 BIGINT[]), s8 STRUCT(f2 BIGINT[]), s9 STRUCT(f1 BIGINT[]), s10 STRUCT(f2 BIGINT[])) # Map of maps query T -select distinct typeof(a) from read_json_auto('data/json/map_of_map.jsonl', map_inference_threshold=10) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_of_map.jsonl', map_inference_threshold=10) ---- MAP(VARCHAR, MAP(VARCHAR, BIGINT)) # All NULL types get converted to JSON if we do map inference query T -select distinct typeof(a) from read_json_auto('data/json/map_of_struct_with_nulls.jsonl', map_inference_threshold=10) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_of_struct_with_nulls.jsonl', map_inference_threshold=10) ---- MAP(VARCHAR, STRUCT(a JSON[])) # Candidate types are properly handled for map inference query I -SELECT distinct typeof(a) FROM read_json_auto('data/json/map_of_dates.jsonl', map_inference_threshold=25) +SELECT distinct typeof(a) FROM read_json_auto('{DATA_DIR}/json/map_of_dates.jsonl', map_inference_threshold=25) ---- MAP(VARCHAR, DATE) # Mixed candidate types are also handled query I -SELECT distinct typeof(a) FROM read_json_auto('data/json/map_of_mixed_date_timestamps.jsonl', map_inference_threshold=25) +SELECT distinct typeof(a) FROM read_json_auto('{DATA_DIR}/json/map_of_mixed_date_timestamps.jsonl', map_inference_threshold=25) ---- MAP(VARCHAR, VARCHAR) # Incompatible types are handled correctly query T -select distinct typeof(a) from read_json_auto('data/json/map_incompatible.jsonl', map_inference_threshold=10) +select distinct typeof(a) from read_json_auto('{DATA_DIR}/json/map_incompatible.jsonl', map_inference_threshold=10) ---- STRUCT(s1 STRUCT("1" JSON), s2 STRUCT("1" MAP(VARCHAR, JSON)), s3 STRUCT("1" VARCHAR), s4 STRUCT("1" BIGINT[]), s5 STRUCT("1" BIGINT), s6 STRUCT("1" VARCHAR), s7 STRUCT("1" BIGINT[]), s8 STRUCT("1" BIGINT), s9 STRUCT("1" VARCHAR), s10 STRUCT("1" BIGINT[])) # Can't set map_inference_threshold to a negative value (except -1) statement error -select * from read_json_auto('data/json/simple_map.jsonl', map_inference_threshold=-10) +select * from read_json_auto('{DATA_DIR}/json/simple_map.jsonl', map_inference_threshold=-10) ---- Binder Error: read_json_auto "map_inference_threshold" parameter must be 0 or positive, or -1 to disable map inference for consistent objects. # if we only sample the first file, we default to a single JSON column query I -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson'], maximum_sample_files=1); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson'], maximum_sample_files=1); ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -327,7 +327,7 @@ select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n # -1 is unlimited query II -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson'], maximum_sample_files=-1); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson'], maximum_sample_files=-1); ---- 1 O Brother, Where Art Thou? 2 Home for the Holidays @@ -337,18 +337,18 @@ select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n # can't be -2 or lower statement error -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson'], maximum_sample_files=-2); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson'], maximum_sample_files=-2); ---- Binder Error # can't be 0 statement error -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson'], maximum_sample_files=0); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson'], maximum_sample_files=0); ---- Binder Error # cannot be NULL either statement error -select * from read_json_auto(['data/json/empty_array.json', 'data/json/example_n.ndjson'], maximum_sample_files=NULL); +select * from read_json_auto(['{DATA_DIR}/json/empty_array.json', '{DATA_DIR}/json/example_n.ndjson'], maximum_sample_files=NULL); ---- Binder Error diff --git a/test/sql/json/table/read_json_dates.test b/test/sql/json/table/read_json_dates.test index 43fdd5544de6..70c2242eda58 100644 --- a/test/sql/json/table/read_json_dates.test +++ b/test/sql/json/table/read_json_dates.test @@ -9,7 +9,7 @@ pragma enable_verification # issue #6774 query I -select * from read_json_auto('data/json/simple_timestamp.json', columns={"ts": "TIMESTAMP[]"}); +select * from read_json_auto('{DATA_DIR}/json/simple_timestamp.json', columns={"ts": "TIMESTAMP[]"}); ---- ['2022-06-01 06:41:58', '2021-08-21 08:26:55.5', '2009-11-15 21:58:54.636'] @@ -22,22 +22,22 @@ create table timestamp_test as select '1996-03-27 07:42:33'::TIMESTAMP t # cannot be empty statement error -copy (select d from date_test) to '__TEST_DIR__/my_file.json' (dateformat) +copy (select d from date_test) to '{TEMP_DIR}/my_file.json' (dateformat) ---- Binder Error statement error -copy (select d from date_test) to '__TEST_DIR__/my_file.json' (timestampformat) +copy (select d from date_test) to '{TEMP_DIR}/my_file.json' (timestampformat) ---- Binder Error statement error -copy date_test from 'data/json/simple_timestamp.json' (dateformat) +copy date_test from '{DATA_DIR}/json/simple_timestamp.json' (dateformat) ---- Binder Error statement error -copy date_test from 'data/json/simple_timestamp.json' (timestampformat) +copy date_test from '{DATA_DIR}/json/simple_timestamp.json' (timestampformat) ---- Binder Error @@ -45,23 +45,23 @@ Binder Error foreach date_format '%m-%d-%Y' '%m-%d-%y' '%d-%m-%Y' '%d-%m-%y' '%Y-%m-%d' '%y-%m-%d' statement ok -copy (select d from date_test) to '__TEST_DIR__/my_file.json' (dateformat ${date_format}) +copy (select d from date_test) to '{TEMP_DIR}/my_file.json' (dateformat ${date_format}) # auto-detect query II -select typeof(d), d from '__TEST_DIR__/my_file.json' +select typeof(d), d from '{TEMP_DIR}/my_file.json' ---- DATE 1996-03-27 # forced format read_ndjson query II -select typeof(d), d from read_ndjson('__TEST_DIR__/my_file.json', columns={d: 'DATE'}, dateformat=${date_format}) +select typeof(d), d from read_ndjson('{TEMP_DIR}/my_file.json', columns={d: 'DATE'}, dateformat=${date_format}) ---- DATE 1996-03-27 # wrong format read_ndjson statement error -select typeof(d), d from read_ndjson('__TEST_DIR__/my_file.json', columns={d: 'DATE'}, dateformat='%d-%Y-%m') +select typeof(d), d from read_ndjson('{TEMP_DIR}/my_file.json', columns={d: 'DATE'}, dateformat='%d-%Y-%m') ---- Invalid Input Error @@ -73,7 +73,7 @@ statement ok create table date_copy_test (d date) statement ok -copy date_copy_test from '__TEST_DIR__/my_file.json' (dateformat ${date_format}) +copy date_copy_test from '{TEMP_DIR}/my_file.json' (dateformat ${date_format}) query II select typeof(d), d from date_copy_test @@ -86,23 +86,23 @@ endloop foreach a,b,c '%Y-%m-%d,%H:%M:%S.%f,' '%m-%d-%Y,%I:%M:%S,%p' '%m-%d-%y,%I:%M:%S,%p' '%d-%m-%Y,%H:%M:%S,' '%d-%m-%y,%H:%M:%S,' '%Y-%m-%d,%H:%M:%S,' '%y-%m-%d,%H:%M:%S,' statement ok -copy (select t from timestamp_test) to '__TEST_DIR__/my_file.json' (format json, timestampformat ${a} ${b} ${c}) +copy (select t from timestamp_test) to '{TEMP_DIR}/my_file.json' (format json, timestampformat ${a} ${b} ${c}) # auto-detect query II -select typeof(t), t from '__TEST_DIR__/my_file.json' +select typeof(t), t from '{TEMP_DIR}/my_file.json' ---- TIMESTAMP 1996-03-27 07:42:33 # forced format read_ndjson query II -select typeof(t), t from read_ndjson('__TEST_DIR__/my_file.json', columns={t: 'TIMESTAMP'}, timestamp_format=${a} ${b} ${c}) +select typeof(t), t from read_ndjson('{TEMP_DIR}/my_file.json', columns={t: 'TIMESTAMP'}, timestamp_format=${a} ${b} ${c}) ---- TIMESTAMP 1996-03-27 07:42:33 # wrong format read_ndjson statement error -select typeof(t), t from read_ndjson('__TEST_DIR__/my_file.json', columns={t: 'TIMESTAMP'}, timestamp_format='%H:%M:%S%y-%m-%d') +select typeof(t), t from read_ndjson('{TEMP_DIR}/my_file.json', columns={t: 'TIMESTAMP'}, timestamp_format='%H:%M:%S%y-%m-%d') ---- Invalid Input Error @@ -114,7 +114,7 @@ statement ok create table timestamp_copy_test (t timestamp) statement ok -copy timestamp_copy_test from '__TEST_DIR__/my_file.json' (format json, timestampformat ${a} ${b} ${c}) +copy timestamp_copy_test from '{TEMP_DIR}/my_file.json' (format json, timestampformat ${a} ${b} ${c}) query II select typeof(t), t from timestamp_copy_test @@ -125,6 +125,6 @@ endloop # test this format too query II -select typeof(createdAt), createdAt from 'data/json/timestamp_example.json' +select typeof(createdAt), createdAt from '{DATA_DIR}/json/timestamp_example.json' ---- TIMESTAMP 2023-02-07 19:12:28 diff --git a/test/sql/json/table/read_json_objects.test b/test/sql/json/table/read_json_objects.test index 8064d4a4cef7..75fa66522e8a 100644 --- a/test/sql/json/table/read_json_objects.test +++ b/test/sql/json/table/read_json_objects.test @@ -7,13 +7,13 @@ require json # we cannot check the error output for the specific byte, because on Windows the \n are replaced with \r\n # therefore, the byte count is different. So, we cut off the error message here statement error -select * from read_json_objects('data/json/unterminated_quotes.ndjson') +select * from read_json_objects('{DATA_DIR}/json/unterminated_quotes.ndjson') ---- Invalid Input Error: Malformed JSON # now it should work! query I -SELECT * FROM read_csv('data/json/example_n.ndjson', columns={'json': 'JSON'}, delim=NULL, header=0, quote=NULL, escape=NULL, auto_detect = false) +SELECT * FROM read_csv('{DATA_DIR}/json/example_n.ndjson', columns={'json': 'JSON'}, delim=NULL, header=0, quote=NULL, escape=NULL, auto_detect = false) ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -23,7 +23,7 @@ SELECT * FROM read_csv('data/json/example_n.ndjson', columns={'json': 'JSON'}, d # example_n is with regular \n newlines query I -SELECT * FROM read_ndjson_objects('data/json/example_n.ndjson') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_n.ndjson') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -33,12 +33,12 @@ SELECT * FROM read_ndjson_objects('data/json/example_n.ndjson') # this one does not have the 'records' param statement error -SELECT * FROM read_ndjson_objects('data/json/example_n.ndjson', records='false') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_n.ndjson', records='false') ---- Binder Error: Invalid named parameter query I -SELECT * FROM read_ndjson_objects('data/json/example_n.ndjson') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_n.ndjson') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -48,7 +48,7 @@ SELECT * FROM read_ndjson_objects('data/json/example_n.ndjson') # we can auto-detect that it's newline-delimited query I -SELECT * FROM read_json_objects('data/json/example_n.ndjson', format='auto') +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_n.ndjson', format='auto') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -58,7 +58,7 @@ SELECT * FROM read_json_objects('data/json/example_n.ndjson', format='auto') # example_r is with \r newlines - works with unstructured query I -SELECT * FROM read_json_objects('data/json/example_r.ndjson', format='unstructured') +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_r.ndjson', format='unstructured') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -68,7 +68,7 @@ SELECT * FROM read_json_objects('data/json/example_r.ndjson', format='unstructur # we can detect that it's not newline-delimited query I -SELECT * FROM read_json_objects('data/json/example_r.ndjson', format='auto') +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_r.ndjson', format='auto') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -78,13 +78,13 @@ SELECT * FROM read_json_objects('data/json/example_r.ndjson', format='auto') # \r newlines are NOT valid according to ndjson spec - this does not work, all a single line statement error -SELECT * FROM read_ndjson_objects('data/json/example_r.ndjson') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_r.ndjson') ---- -Invalid Input Error: Malformed JSON in file "data/json/example_r.ndjson" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/json/example_r.ndjson" # example_rn is with \r\n newlines query I -SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_rn.ndjson') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -93,7 +93,7 @@ SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson') {"id":5,"name":"Raising Arizona"} query I -SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_rn.ndjson') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -103,7 +103,7 @@ SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson') # same but gzipped query I -SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson.gz') +SELECT * FROM read_ndjson_objects('{DATA_DIR}/json/example_rn.ndjson.gz') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -112,7 +112,7 @@ SELECT * FROM read_ndjson_objects('data/json/example_rn.ndjson.gz') {"id":5,"name":"Raising Arizona"} query I -SELECT * FROM read_json_objects('data/json/example_rn.ndjson.gz', format='nd') +SELECT * FROM read_json_objects('{DATA_DIR}/json/example_rn.ndjson.gz', format='nd') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -122,49 +122,49 @@ SELECT * FROM read_json_objects('data/json/example_rn.ndjson.gz', format='nd') # multi-file scan query I -SELECT count(*) from read_json_objects(['data/json/example_n.ndjson', 'data/json/example_r.ndjson', 'data/json/example_rn.ndjson'], format='auto') +SELECT count(*) from read_json_objects(['{DATA_DIR}/json/example_n.ndjson', '{DATA_DIR}/json/example_r.ndjson', '{DATA_DIR}/json/example_rn.ndjson'], format='auto') ---- 15 query I -SELECT count(*) from read_ndjson_objects(['data/json/example_n.ndjson', 'data/json/example_rn.ndjson']) +SELECT count(*) from read_ndjson_objects(['{DATA_DIR}/json/example_n.ndjson', '{DATA_DIR}/json/example_rn.ndjson']) ---- 10 # globbing query I -SELECT count(*) from read_json_objects('data/json/example_*.ndjson', format='auto') +SELECT count(*) from read_json_objects('{DATA_DIR}/json/example_*.ndjson', format='auto') ---- 15 query I -SELECT count(*) from read_ndjson_objects('data/json/example_*n.ndjson') +SELECT count(*) from read_ndjson_objects('{DATA_DIR}/json/example_*n.ndjson') ---- 10 # empty file query I -select * from read_json_objects('data/json/empty.ndjson') +select * from read_json_objects('{DATA_DIR}/json/empty.ndjson') ---- query I -select * from read_ndjson_objects('data/json/empty.ndjson') +select * from read_ndjson_objects('{DATA_DIR}/json/empty.ndjson') ---- # invalid json stuff statement error -select * from read_json_objects('data/json/unterminated_quotes.ndjson', format='nd') +select * from read_json_objects('{DATA_DIR}/json/unterminated_quotes.ndjson', format='nd') ---- -Invalid Input Error: Malformed JSON in file "data/json/unterminated_quotes.ndjson" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/json/unterminated_quotes.ndjson" statement error -select * from read_ndjson_objects('data/json/unterminated_quotes.ndjson') +select * from read_ndjson_objects('{DATA_DIR}/json/unterminated_quotes.ndjson') ---- -Invalid Input Error: Malformed JSON in file "data/json/unterminated_quotes.ndjson" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/json/unterminated_quotes.ndjson" # we can auto-detect and ignore the error (becomes NULL) query I -select * from read_json_objects('data/json/unterminated_quotes.ndjson', format='auto', ignore_errors=true) +select * from read_json_objects('{DATA_DIR}/json/unterminated_quotes.ndjson', format='auto', ignore_errors=true) ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -174,7 +174,7 @@ NULL # multiple values per line (works for read_json_objects) query I -select * from read_json_objects('data/json/multiple_objects_per_line.ndjson', format='unstructured') +select * from read_json_objects('{DATA_DIR}/json/multiple_objects_per_line.ndjson', format='unstructured') ---- {"id":1,"name":"O Brother, Where Art Thou?"} {"id":2,"name":"Home for the Holidays"} @@ -184,48 +184,48 @@ select * from read_json_objects('data/json/multiple_objects_per_line.ndjson', fo # does not work for read_ndjson_objects statement error -select * from read_ndjson_objects('data/json/multiple_objects_per_line.ndjson') +select * from read_ndjson_objects('{DATA_DIR}/json/multiple_objects_per_line.ndjson') ---- -Invalid Input Error: Malformed JSON in file "data/json/multiple_objects_per_line.ndjson" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/json/multiple_objects_per_line.ndjson" # what if we try to read a CSV? statement error -select * from read_json_objects('data/csv/tpcds_14.csv') +select * from read_json_objects('{DATA_DIR}/csv/tpcds_14.csv') ---- Invalid Input Error: Malformed JSON statement error -select * from read_ndjson_objects('data/csv/tpcds_14.csv') +select * from read_ndjson_objects('{DATA_DIR}/csv/tpcds_14.csv') ---- -Invalid Input Error: Malformed JSON in file "data/csv/tpcds_14.csv" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/csv/tpcds_14.csv" # how about parquet? statement error -select * from read_json_objects('data/parquet-testing/blob.parquet') +select * from read_json_objects('{DATA_DIR}/parquet-testing/blob.parquet') ---- Invalid Input Error: Malformed JSON statement error -select * from read_ndjson_objects('data/parquet-testing/blob.parquet') +select * from read_ndjson_objects('{DATA_DIR}/parquet-testing/blob.parquet') ---- -Invalid Input Error: Malformed JSON in file "data/parquet-testing/blob.parquet" +Invalid Input Error: Malformed JSON in file "{DATA_DIR}/parquet-testing/blob.parquet" # we can also read the objects from a JSON array (not newline-delimited) query I -select * from read_json_objects('data/json/top_level_array.json') +select * from read_json_objects('{DATA_DIR}/json/top_level_array.json') ---- {"conclusion":"cancelled"} {"conclusion":"cancelled"} # and auto-detect it query I -select * from read_json_objects('data/json/top_level_array.json', format='auto') +select * from read_json_objects('{DATA_DIR}/json/top_level_array.json', format='auto') ---- {"conclusion":"cancelled"} {"conclusion":"cancelled"} # the file only has one line, so if we read this as ndjson, we just get the array query I -select * from read_json_objects('data/json/top_level_array.json', format='nd') +select * from read_json_objects('{DATA_DIR}/json/top_level_array.json', format='nd') ---- [{"conclusion":"cancelled"}, {"conclusion":"cancelled"}] diff --git a/test/sql/json/table/read_json_union.test b/test/sql/json/table/read_json_union.test index 52933332d87a..44159881d125 100644 --- a/test/sql/json/table/read_json_union.test +++ b/test/sql/json/table/read_json_union.test @@ -8,28 +8,28 @@ statement ok pragma enable_verification query I -SELECT data FROM read_ndjson('data/json/union.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) +SELECT data FROM read_ndjson('{DATA_DIR}/json/union.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) ---- Frank 5 false statement error -SELECT * FROM read_ndjson('data/json/malformed/union/bad_key.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/malformed/union/bad_key.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) ---- Found object containing unknown key, instead of union statement error -SELECT * FROM read_ndjson('data/json/malformed/union/empty_object.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/malformed/union/empty_object.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) ---- Found empty object, instead of union statement error -SELECT * FROM read_ndjson('data/json/malformed/union/non_object.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/malformed/union/non_object.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) ---- Expected an object representing a union, got uint statement error -SELECT * FROM read_ndjson('data/json/malformed/union/too_many_keys.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) +SELECT * FROM read_ndjson('{DATA_DIR}/json/malformed/union/too_many_keys.ndjson', columns={data: 'UNION(name VARCHAR, age INT, veteran BOOL)'}) ---- Found object containing more than one key, instead of union diff --git a/test/sql/json/test_json_copy.test_slow b/test/sql/json/test_json_copy.test_slow index dfc0951bbe93..390ded5128a0 100644 --- a/test/sql/json/test_json_copy.test_slow +++ b/test/sql/json/test_json_copy.test_slow @@ -13,13 +13,13 @@ statement ok create table integers as select 42 i statement ok -copy integers to '__TEST_DIR__/integers.json.gz' (FORMAT JSON, COMPRESSION GZIP) +copy integers to '{TEMP_DIR}/integers.json.gz' (FORMAT JSON, COMPRESSION GZIP) statement ok delete from integers query I -copy integers from '__TEST_DIR__/integers.json.gz' +copy integers from '{TEMP_DIR}/integers.json.gz' ---- 1 @@ -43,7 +43,7 @@ copy ( select * exclude (varchar, blob, bit, medium_enum, large_enum, hugeint, uhugeint, bignum) replace (dec_18_6::DOUBLE as dec_18_6, dec38_10::DOUBLE as dec38_10) from test_all_types() -) to '__TEST_DIR__/all_types.ndjson' +) to '{TEMP_DIR}/all_types.ndjson' statement ok create table roundtrip as @@ -53,7 +53,7 @@ from test_all_types() limit 0 statement ok -copy roundtrip from '__TEST_DIR__/all_types.ndjson' +copy roundtrip from '{TEMP_DIR}/all_types.ndjson' query I nosort q0 select * from roundtrip @@ -67,10 +67,10 @@ copy ( select * exclude (varchar, blob, bit, medium_enum, large_enum, hugeint, uhugeint, bignum) replace (dec_18_6::DOUBLE as dec_18_6, dec38_10::DOUBLE as dec38_10) from test_all_types() -) to '__TEST_DIR__/all_types.json' (array true) +) to '{TEMP_DIR}/all_types.json' (array true) statement ok -copy roundtrip from '__TEST_DIR__/all_types.json' (array true) +copy roundtrip from '{TEMP_DIR}/all_types.json' (array true) query I nosort q0 select * from roundtrip @@ -80,12 +80,12 @@ select * from roundtrip # test issue 18816 statement ok copy (select 42 i) -to '__TEST_DIR__/json_batch' +to '{TEMP_DIR}/json_batch' (format json, per_thread_output true, overwrite true); statement ok copy (select 42 i) -to '__TEST_DIR__/json_batch' +to '{TEMP_DIR}/json_batch' (format json, per_thread_output true, append true); # test issue #6305 @@ -97,10 +97,10 @@ copy ( (uuid(), 15), (uuid(), 5) v (order_id, revenue) -) to '__TEST_DIR__/query.json' (format json) +) to '{TEMP_DIR}/query.json' (format json) query II -select typeof(order_id), revenue from '__TEST_DIR__/query.json' +select typeof(order_id), revenue from '{TEMP_DIR}/query.json' ---- UUID 10 UUID 10 @@ -116,10 +116,10 @@ copy ( ({order_id: uuid(), revenue: 15}), ({order_id: uuid(), revenue: 5}), t (v) -) to '__TEST_DIR__/query.json' (format json) +) to '{TEMP_DIR}/query.json' (format json) query II -select typeof(order_id), revenue from '__TEST_DIR__/query.json' +select typeof(order_id), revenue from '{TEMP_DIR}/query.json' ---- UUID 10 UUID 10 @@ -135,10 +135,10 @@ copy ( (uuid(), 15), (uuid(), 5) v (order_id, revenue) -) to '__TEST_DIR__/query.json' (format json) +) to '{TEMP_DIR}/query.json' (format json) query II -select typeof(order_id), revenue from '__TEST_DIR__/query.json' +select typeof(order_id), revenue from '{TEMP_DIR}/query.json' ---- UUID 10 UUID 10 @@ -154,10 +154,10 @@ copy ( (uuid(), 15), (uuid(), 5) v (order_id, revenue) -) to '__TEST_DIR__/query.json' (format json) +) to '{TEMP_DIR}/query.json' (format json) query II -select typeof(order_id), revenue from '__TEST_DIR__/query.json' +select typeof(order_id), revenue from '{TEMP_DIR}/query.json' ---- UUID 11 UUID 11 @@ -165,10 +165,10 @@ UUID 16 UUID 6 statement ok -copy (select 42 as a, a + 1) to '__TEST_DIR__/out.json' (format json); +copy (select 42 as a, a + 1) to '{TEMP_DIR}/out.json' (format json); query II -select * from '__TEST_DIR__/out.json' +select * from '{TEMP_DIR}/out.json' ---- 42 43 @@ -177,11 +177,11 @@ create table conclusions (conclusion varchar) # works because we auto-detect by default statement ok -copy conclusions from 'data/json/top_level_array.json' +copy conclusions from '{DATA_DIR}/json/top_level_array.json' # doesn't work if we disable auto-detection statement error -copy conclusions from 'data/json/top_level_array.json' (AUTO_DETECT FALSE) +copy conclusions from '{DATA_DIR}/json/top_level_array.json' (AUTO_DETECT FALSE) ---- Invalid Input Error @@ -190,7 +190,7 @@ delete from conclusions; # and also if we say it's an array statement ok -copy conclusions from 'data/json/top_level_array.json' (ARRAY TRUE) +copy conclusions from '{DATA_DIR}/json/top_level_array.json' (ARRAY TRUE) query I select * from conclusions @@ -200,16 +200,16 @@ cancelled # same with ARRAY FALSE statement error -copy conclusions from 'data/json/top_level_array.json' (ARRAY FALSE) +copy conclusions from '{DATA_DIR}/json/top_level_array.json' (ARRAY FALSE) ---- Invalid Input Error # we can also write JSON arrays instead of newline-delimited statement ok -copy (select range as i from range(10)) to '__TEST_DIR__/my.json' (ARRAY TRUE) +copy (select range as i from range(10)) to '{TEMP_DIR}/my.json' (ARRAY TRUE) query T -select * from read_json_auto('__TEST_DIR__/my.json', format='array') +select * from read_json_auto('{TEMP_DIR}/my.json', format='array') ---- 0 1 @@ -224,32 +224,32 @@ select * from read_json_auto('__TEST_DIR__/my.json', format='array') # compression stuff (cannot be empty) statement error -copy (select range as i from range(10)) to '__TEST_DIR__/my.json' (COMPRESSION) +copy (select range as i from range(10)) to '{TEMP_DIR}/my.json' (COMPRESSION) ---- Invalid Input Error statement ok -copy (select range as i from range(10)) to '__TEST_DIR__/my.json.gz' (COMPRESSION GZIP) +copy (select range as i from range(10)) to '{TEMP_DIR}/my.json.gz' (COMPRESSION GZIP) statement ok create table my_range (i bigint) statement ok -copy my_range from '__TEST_DIR__/my.json.gz' (COMPRESSION GZIP) +copy my_range from '{TEMP_DIR}/my.json.gz' (COMPRESSION GZIP) # we can auto-detect even though we have compressed statement ok -select * from '__TEST_DIR__/my.json.gz' +select * from '{TEMP_DIR}/my.json.gz' # works with zstd too, but we skip this test for now # it works in CLI, but not in unittest for some reason (ZSTD is not in VirtualFileSystem::compressed_fs) require parquet statement ok -copy (select range as i from range(10)) to '__TEST_DIR__/my.json.zst' (COMPRESSION ZSTD) +copy (select range as i from range(10)) to '{TEMP_DIR}/my.json.zst' (COMPRESSION ZSTD) statement ok -select * from '__TEST_DIR__/my.json.zst' +select * from '{TEMP_DIR}/my.json.zst' query I select * from my_range diff --git a/test/sql/json/test_json_serialize_sql.test b/test/sql/json/test_json_serialize_sql.test index 01ff0641a084..b9459e17e7a7 100644 --- a/test/sql/json/test_json_serialize_sql.test +++ b/test/sql/json/test_json_serialize_sql.test @@ -86,6 +86,12 @@ PRAGMA json_execute_serialized_sql( 15 24 +# Test execute json serialized sql with multiple nested type tags +query I +select json_serialize_sql($$select '10'::blob$$); +---- +:.*query_location:.*11 + # TODO: We should add an option for the deserializer to allow missing properties in the JSON if they can be default constructed # Alternatively, make them optional for all the Deserializer's. statement error diff --git a/test/sql/logging/http_log_timing.test b/test/sql/logging/http_log_timing.test new file mode 100644 index 000000000000..d0c2f6d965d7 --- /dev/null +++ b/test/sql/logging/http_log_timing.test @@ -0,0 +1,32 @@ +# name: test/sql/logging/http_log_timing.test +# description: Test basic logging functionality +# group: [logging] + +require noforcestorage + +require httpfs + +statement ok +CALL enable_logging('HTTP') + +statement ok +SET http_retries=2 + +statement error +FROM "http://localhost:2338/test.csv" +---- + +# Confirm that our request timing returns something reasonable +query III +SELECT + request.type, + request.duration_ms >= 0, + request.duration_ms <= 1000 * 1000 +FROM + duckdb_logs_parsed('HTTP') +WHERE + request.type='HEAD' +---- +HEAD true true +HEAD true true +HEAD true true diff --git a/test/sql/logging/logging_call_functions.test b/test/sql/logging/logging_call_functions.test index dc0fc751cf9f..9db811ac8025 100644 --- a/test/sql/logging/logging_call_functions.test +++ b/test/sql/logging/logging_call_functions.test @@ -93,7 +93,7 @@ logging_storage memory ### statement ok -CALL enable_logging(['QueryLog', 'FileSystem']); +CALL enable_logging(['QueryLog', 'filesystem']); # Use sorted list to avoid indeterministic result query II diff --git a/test/sql/logging/logging_csv.test b/test/sql/logging/logging_csv.test index 0e51af45ae40..66b6a8efae16 100644 --- a/test/sql/logging/logging_csv.test +++ b/test/sql/logging/logging_csv.test @@ -9,14 +9,14 @@ require notwindows # Enable FileSystem logging to single csv file statement ok -CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': false}); +CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_log.csv', 'normalize': false}); # Read some data to trigger FileSystem log statement ok -FROM "data/csv/big_number.csv" +FROM "{DATA_DIR}/csv/big_number.csv" query IIIIII -DESCRIBE FROM '__TEST_DIR__/logging_csv_log.csv'; +DESCRIBE FROM '{TEMP_DIR}/logging_csv_log.csv'; ---- context_id BIGINT YES NULL NULL NULL scope VARCHAR YES NULL NULL NULL @@ -24,7 +24,7 @@ connection_id BIGINT YES NULL NULL NULL transaction_id BIGINT YES NULL NULL NULL query_id BIGINT YES NULL NULL NULL thread_id VARCHAR YES NULL NULL NULL -timestamp TIMESTAMP YES NULL NULL NULL +timestamp TIMESTAMP WITH TIME ZONE YES NULL NULL NULL type VARCHAR YES NULL NULL NULL log_level VARCHAR YES NULL NULL NULL message VARCHAR YES NULL NULL NULL @@ -35,13 +35,13 @@ SELECT scope, path: parse_duckdb_log_message('FileSystem', message)['path'], op: parse_duckdb_log_message('FileSystem', message)['op'], -FROM "__TEST_DIR__/logging_csv_log.csv" -WHERE path = 'data/csv/big_number.csv'; +FROM "{TEMP_DIR}/logging_csv_log.csv" +WHERE path = '{DATA_DIR}/csv/big_number.csv'; ---- -CONNECTION data/csv/big_number.csv OPEN -CONNECTION data/csv/big_number.csv READ -CONNECTION data/csv/big_number.csv READ -CONNECTION data/csv/big_number.csv CLOSE +CONNECTION {DATA_DIR}/csv/big_number.csv OPEN +CONNECTION {DATA_DIR}/csv/big_number.csv READ +CONNECTION {DATA_DIR}/csv/big_number.csv READ +CONNECTION {DATA_DIR}/csv/big_number.csv CLOSE statement ok CALL disable_logging() @@ -51,17 +51,17 @@ statement ok CALL truncate_duckdb_logs(); query I -select count(*) FROM "__TEST_DIR__/logging_csv_log.csv"; +select count(*) FROM "{TEMP_DIR}/logging_csv_log.csv"; ---- 0 # Enable FileSystem logging to normalized files statement ok -CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_logs_normalized', 'normalize': true}); +CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_logs_normalized', 'normalize': true}); # Read some data to trigger FileSystem log statement ok -FROM "data/csv/big_number.csv" +FROM "{DATA_DIR}/csv/big_number.csv" # Ensure we can reparse the structured log message from the csv query III @@ -69,36 +69,36 @@ SELECT context_id is not null, path: parse_duckdb_log_message('FileSystem', message)['path'], op: parse_duckdb_log_message('FileSystem', message)['op'], -FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv" -WHERE path = 'data/csv/big_number.csv'; +FROM "{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_entries.csv" +WHERE path = '{DATA_DIR}/csv/big_number.csv'; ---- -1 data/csv/big_number.csv OPEN -1 data/csv/big_number.csv READ -1 data/csv/big_number.csv READ -1 data/csv/big_number.csv CLOSE +1 {DATA_DIR}/csv/big_number.csv OPEN +1 {DATA_DIR}/csv/big_number.csv READ +1 {DATA_DIR}/csv/big_number.csv READ +1 {DATA_DIR}/csv/big_number.csv CLOSE # Contexts are now in a separate csv file # TODO: is this correct? query I SELECT scope -FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv"; +FROM "{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_contexts.csv"; ---- CONNECTION CONNECTION # Check schema query IIIIII -DESCRIBE FROM '__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv'; +DESCRIBE FROM '{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_entries.csv'; ---- context_id BIGINT YES NULL NULL NULL -timestamp TIMESTAMP YES NULL NULL NULL +timestamp TIMESTAMP WITH TIME ZONE YES NULL NULL NULL type VARCHAR YES NULL NULL NULL log_level VARCHAR YES NULL NULL NULL message VARCHAR YES NULL NULL NULL # Check schema query IIIIII -DESCRIBE FROM '__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv'; +DESCRIBE FROM '{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_contexts.csv'; ---- context_id BIGINT YES NULL NULL NULL scope VARCHAR YES NULL NULL NULL @@ -115,12 +115,12 @@ statement ok CALL truncate_duckdb_logs(); query I -select count(*) FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv"; +select count(*) FROM "{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_contexts.csv"; ---- 0 query I -select count(*) FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv"; +select count(*) FROM "{TEMP_DIR}/logging_csv_logs_normalized/duckdb_log_entries.csv"; ---- 0 @@ -162,7 +162,7 @@ statement ok CALL truncate_duckdb_logs() statement ok -CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv'}); +CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_log.csv'}); statement ok SELECT 1; @@ -171,7 +171,7 @@ statement ok CALL truncate_duckdb_logs() # statement error -# CALL enable_logging(['QueryLog'], storage='file', storage_config={'entries_path': '__TEST_DIR__/logging_csv_log_entries.csv', 'contexts_path': '__TEST_DIR__/logging_csv_log_contexts.csv'}); +# CALL enable_logging(['QueryLog'], storage='file', storage_config={'entries_path': '{TEMP_DIR}/logging_csv_log_entries.csv', 'contexts_path': '{TEMP_DIR}/logging_csv_log_contexts.csv'}); # ---- # Invalid Configuration Error: Cannot change between normalized and denormalized with a non-empty log. Please truncate the log first @@ -180,12 +180,12 @@ explain FROM duckdb_logs # This is not allowed statement error -CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': true}); +CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_log.csv', 'normalize': true}); ---- Invalid Configuration Error: Can not set path to ' statement error -CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': true}); +CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_log.csv', 'normalize': true}); ---- ' while normalize is true. Normalize will make DuckDB write multiple log files to more efficiently store log entries. Please specify a directory path instead of a csv file path, or set normalize to false. @@ -197,12 +197,12 @@ CALL disable_logging(); # Test switching CSV delimiters statement ok -CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log_delim.csv', 'delim': ';'}); +CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '{TEMP_DIR}/logging_csv_log_delim.csv', 'delim': ';'}); statement ok SELECT 1; query I -SELECT message FROM read_csv('__TEST_DIR__/logging_csv_log_delim.csv', delim=';'); +SELECT message FROM read_csv('{TEMP_DIR}/logging_csv_log_delim.csv', delim=';'); ---- SELECT 1 diff --git a/test/sql/logging/logging_file_bind_replace.test b/test/sql/logging/logging_file_bind_replace.test index e33d043f5724..aa27d0e170de 100644 --- a/test/sql/logging/logging_file_bind_replace.test +++ b/test/sql/logging/logging_file_bind_replace.test @@ -30,6 +30,11 @@ SELECT message FROM duckdb_logs_parsed('QueryLog') WHERE starts_with(message, 'S ---- SELECT 1 as a +query I +SELECT message FROM duckdb_logs_parsed('querylog') WHERE starts_with(message, 'SELECT 1'); +---- +SELECT 1 as a + statement ok CALL truncate_duckdb_logs(); diff --git a/test/sql/logging/logging_file_persistence.test b/test/sql/logging/logging_file_persistence.test index e836810b9c38..633f5ab4dcf3 100644 --- a/test/sql/logging/logging_file_persistence.test +++ b/test/sql/logging/logging_file_persistence.test @@ -6,15 +6,15 @@ require notwindows # load the DB from disk -load __TEST_DIR__/logging_file_persistence.test +load {TEMP_DIR}/logging_file_persistence.test # Enable FileSystem logging to single csv file statement ok -CALL enable_logging('FileSystem', storage='file', storage_config={'path': '__TEST_DIR__/logging_file_persistence.csv'}); +CALL enable_logging('FileSystem', storage='file', storage_config={'path': '{TEMP_DIR}/logging_file_persistence.csv'}); # Read some data to trigger FileSystem log statement ok -FROM "data/csv/big_number.csv" +FROM "{DATA_DIR}/csv/big_number.csv" statement ok CALL disable_logging(); @@ -27,7 +27,7 @@ restart # TODO: configuring log storage config is clunky now statement ok -CALL enable_logging('FileSystem', storage='file', storage_config={'path': '__TEST_DIR__/logging_file_persistence.csv'}); +CALL enable_logging('FileSystem', storage='file', storage_config={'path': '{TEMP_DIR}/logging_file_persistence.csv'}); statement ok CALL disable_logging(); diff --git a/test/sql/logging/physical_operator_logging.test_slow b/test/sql/logging/physical_operator_logging.test_slow index d399f905858a..29373c24440e 100644 --- a/test/sql/logging/physical_operator_logging.test_slow +++ b/test/sql/logging/physical_operator_logging.test_slow @@ -43,6 +43,11 @@ select count(*) from duckdb_logs_parsed('PhysicalOperator') where class = 'JoinH ---- 16 +query I +select info.total_probe_matches from duckdb_logs_parsed('PhysicalOperator') where class = 'PhysicalHashJoin' and event = 'GetData' +---- +3000000 + # all flushed row groups should be logged, these should be equal query I select count(*) = ( diff --git a/test/sql/merge/merge_into_invalid_action.test b/test/sql/merge/merge_into_invalid_action.test new file mode 100644 index 000000000000..424ee456dec8 --- /dev/null +++ b/test/sql/merge/merge_into_invalid_action.test @@ -0,0 +1,22 @@ +# name: test/sql/merge/merge_into_invalid_action.test +# description: Test MERGE INTO with invalid actions +# group: [merge] + +statement ok +CREATE TABLE t AS SELECT range a FROM generate_series(0,9) t(range); + +statement error +MERGE INTO t + USING (SELECT range a from generate_series (10,19) t(range)) AS s + USING(a) + WHEN NOT MATCHED BY TARGET THEN DELETE RETURNING merge_action, *; +---- +cannot be combined with UPDATE or DELETE actions + +statement error +MERGE INTO t + USING (SELECT range a from generate_series (10,19) t(range)) AS s + USING(a) + WHEN NOT MATCHED BY TARGET THEN UPDATE RETURNING merge_action, *; +---- +cannot be combined with UPDATE or DELETE actions diff --git a/test/sql/merge/merge_into_join_as_filter.test b/test/sql/merge/merge_into_join_as_filter.test new file mode 100644 index 000000000000..36b442017ff6 --- /dev/null +++ b/test/sql/merge/merge_into_join_as_filter.test @@ -0,0 +1,31 @@ +# name: test/sql/merge/merge_into_join_as_filter.test +# description: Test MERGE INTO with joins that are effectively just filters +# group: [merge] + +statement ok +create table foo (bar integer); + +statement ok +insert into foo values (1); + +statement ok +merge into foo as f using (select 2 as bar) b on f.bar is not null when matched then update when not matched then insert; + +query I +FROM foo +---- +2 + +statement ok +create or replace table aaa (id int, status varchar, flag int, starttime datetime, endtime datetime); + +statement ok +merge into aaa + using ( + select 1 as id, 'xx' as status, 1 as flag, now() as starttime, null as endtime + ) as upserts + on (upserts.id = aaa.id and aaa.flag =1::int and aaa.status = upserts.status) + when matched then + update set endtime = upserts.starttime + when not matched then + insert by name; diff --git a/test/sql/merge/merge_into_subquery_action.test b/test/sql/merge/merge_into_subquery_action.test new file mode 100644 index 000000000000..b887512d95e3 --- /dev/null +++ b/test/sql/merge/merge_into_subquery_action.test @@ -0,0 +1,44 @@ +# name: test/sql/merge/merge_into_subquery_action.test +# description: Test MERGE INTO with subqueries in the merge action condition +# group: [merge] + +statement ok +CREATE TABLE Totals(item_id int, balance int, biggest_item BOOL); + +statement ok +CREATE TABLE Buy(item_id int, volume int); + +statement ok +INSERT INTO Buy values(10, 1000), (30, 300), (20, 2000); + +statement ok +MERGE INTO Totals USING Buy USING (item_id) +WHEN NOT MATCHED AND Buy.volume = (SELECT MAX(Volume) FROM Buy) + THEN INSERT VALUES (Buy.item_id, Buy.volume, true) +WHEN NOT MATCHED + THEN INSERT VALUES (Buy.item_id, Buy.volume, false) + +query III +SELECT * FROM Totals ORDER BY item_id +---- +10 1000 false +20 2000 true +30 300 false + +# original issue +statement ok +CREATE TABLE dummy_edge(id INTEGER, ref_id INTEGER, "value" VARCHAR, note VARCHAR); + +statement ok +CREATE TABLE dummy_user(user_id INTEGER, "name" VARCHAR, email VARCHAR, created_at DATE); + +statement ok +CREATE TABLE dummy_null(id INTEGER, "value" INTEGER, optional_text VARCHAR); + +statement ok +MERGE INTO main.dummy_edge as target_0 +USING dummy_user as ref_0 +ON target_0.note = ref_0.name +WHEN NOT MATCHED AND EXISTS ( + SELECT id FROM main.dummy_null WHERE true +) THEN DO NOTHING diff --git a/test/sql/order/persistent_list_of_varchar_order.test_slow b/test/sql/order/persistent_list_of_varchar_order.test_slow index 83479b144197..5000da2d51d1 100644 --- a/test/sql/order/persistent_list_of_varchar_order.test_slow +++ b/test/sql/order/persistent_list_of_varchar_order.test_slow @@ -4,13 +4,13 @@ require parquet -load __TEST_DIR__/candidate.db +load {TEMP_DIR}/candidate.db statement ok PRAGMA enable_verification statement ok -CREATE TABLE candidate AS SELECT * FROM 'data/parquet-testing/candidate.parquet' +CREATE TABLE candidate AS SELECT * FROM '{DATA_DIR}/parquet-testing/candidate.parquet' statement ok select * from candidate order by name; diff --git a/test/sql/order/top_n_nulls.test b/test/sql/order/top_n_nulls.test index eac9e79dc35d..65e60461a152 100644 --- a/test/sql/order/top_n_nulls.test +++ b/test/sql/order/top_n_nulls.test @@ -8,7 +8,7 @@ statement ok PRAGMA enable_verification statement ok -CREATE TABLE orders_small AS SELECT * FROM parquet_scan('data/parquet-testing/orders_small_parquet.test'); +CREATE TABLE orders_small AS SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/orders_small_parquet.test'); query IIII nosort select o_orderkey, o_clerk, o_orderstatus, o_totalprice from orders_small diff --git a/test/sql/ordinality/ordinality.test_slow b/test/sql/ordinality/ordinality.test_slow index 8cde3b0c2e5a..4d8b684a6e6a 100644 --- a/test/sql/ordinality/ordinality.test_slow +++ b/test/sql/ordinality/ordinality.test_slow @@ -17,18 +17,18 @@ true query II nosort read_csv_result -SELECT drug_exposure_id, ordinality FROM read_csv('data/csv/drug_exposure.csv') WITH ORDINALITY ORDER BY ordinality,drug_exposure_id; +SELECT drug_exposure_id, ordinality FROM read_csv('{DATA_DIR}/csv/drug_exposure.csv') WITH ORDINALITY ORDER BY ordinality,drug_exposure_id; query II nosort read_csv_result -SELECT drug_exposure_id, row_number() OVER () AS ordinality FROM read_csv('data/csv/drug_exposure.csv') ORDER BY ordinality,drug_exposure_id; +SELECT drug_exposure_id, row_number() OVER () AS ordinality FROM read_csv('{DATA_DIR}/csv/drug_exposure.csv') ORDER BY ordinality,drug_exposure_id; require parquet query III nosort parquet_result -SELECT name,id,ordinality FROM read_parquet('data/parquet-testing/candidate.parquet') WITH ORDINALITY ORDER BY ordinality,name,id; +SELECT name,id,ordinality FROM read_parquet('{DATA_DIR}/parquet-testing/candidate.parquet') WITH ORDINALITY ORDER BY ordinality,name,id; query III nosort parquet_result -SELECT name,id, row_number() OVER () AS ordinality FROM read_parquet('data/parquet-testing/candidate.parquet') ORDER BY ordinality,name,id; \ No newline at end of file +SELECT name,id, row_number() OVER () AS ordinality FROM read_parquet('{DATA_DIR}/parquet-testing/candidate.parquet') ORDER BY ordinality,name,id; \ No newline at end of file diff --git a/test/sql/ordinality/ordinality_constant.test b/test/sql/ordinality/ordinality_constant.test index 3f1d46dc8a53..d9f1d338f7b4 100644 --- a/test/sql/ordinality/ordinality_constant.test +++ b/test/sql/ordinality/ordinality_constant.test @@ -19,7 +19,7 @@ SELECT o,range FROM range(1) WITH ORDINALITY AS _(range,o); query II -SELECT col = 'data/csv/customer.csv' OR col = 'data\csv\customer.csv', o FROM glob('data/csv/customer.csv') with ordinality AS _(col,o); +SELECT col = '{DATA_DIR}/csv/customer.csv' OR col = '{DATA_DIR}\csv\customer.csv', o FROM glob('{DATA_DIR}/csv/customer.csv') with ordinality AS _(col,o); ---- true 1 @@ -125,19 +125,19 @@ SELECT * FROM unnest([41,42,43]) WITH ORDINALITY; query II nosort read_csv_result -SELECT column00, ordinality FROM read_csv('data/csv/customer.csv') WITH ORDINALITY ORDER BY ordinality,column00; +SELECT column00, ordinality FROM read_csv('{DATA_DIR}/csv/customer.csv') WITH ORDINALITY ORDER BY ordinality,column00; query II nosort read_csv_result -SELECT column00, row_number() OVER () AS ordinality FROM read_csv('data/csv/customer.csv') ORDER BY ordinality,column00; +SELECT column00, row_number() OVER () AS ordinality FROM read_csv('{DATA_DIR}/csv/customer.csv') ORDER BY ordinality,column00; require parquet query II nosort parquet_result -SELECT d,ordinality FROM read_parquet('data/parquet-testing/date.parquet') WITH ORDINALITY ORDER BY ordinality,d; +SELECT d,ordinality FROM read_parquet('{DATA_DIR}/parquet-testing/date.parquet') WITH ORDINALITY ORDER BY ordinality,d; query III nosort parquet_result -SELECT d, row_number() OVER () AS ordinality FROM read_parquet('data/parquet-testing/date.parquet') ORDER BY ordinality,d; \ No newline at end of file +SELECT d, row_number() OVER () AS ordinality FROM read_parquet('{DATA_DIR}/parquet-testing/date.parquet') ORDER BY ordinality,d; diff --git a/test/sql/parallelism/interquery/concurrent_attach_detach.cpp b/test/sql/parallelism/interquery/concurrent_attach_detach.cpp index 6410e46e3548..e66e46c7afb1 100644 --- a/test/sql/parallelism/interquery/concurrent_attach_detach.cpp +++ b/test/sql/parallelism/interquery/concurrent_attach_detach.cpp @@ -1,9 +1,12 @@ #include "catch.hpp" + #include "duckdb/common/atomic.hpp" #include "duckdb/common/map.hpp" #include "duckdb/common/mutex.hpp" -#include "duckdb/common/vector.hpp" #include "duckdb/common/optional_idx.hpp" +#include "duckdb/common/profiler.hpp" +#include "duckdb/common/vector.hpp" + #include "test_helpers.hpp" #include @@ -15,6 +18,25 @@ enum class AttachTaskType { CREATE_TABLE, LOOKUP, APPEND, APPLY_CHANGES, DESCRIB namespace { +string AttachTaskTypeToString(AttachTaskType task_type) { + switch (task_type) { + case AttachTaskType::CREATE_TABLE: + return "CREATE"; + case AttachTaskType::LOOKUP: + return "LOOKUP"; + case AttachTaskType::APPEND: + return "APPEND"; + case AttachTaskType::APPLY_CHANGES: + return "UPSERT"; + case AttachTaskType::DESCRIBE_TABLE: + return "DESCRIBE"; + case AttachTaskType::CHECKPOINT: + return "CHECKPOINT"; + default: + return "UNKNOWN"; + } +} + string test_dir_path; const string prefix = "db_"; const string suffix = ".db"; @@ -35,7 +57,7 @@ const idx_t nr_initial_rows = 2050; vector> logging; atomic success {true}; -duckdb::unique_ptr execQuery(Connection &conn, const string &query) { +unique_ptr execQuery(Connection &conn, const string &query) { auto result = conn.Query(query); if (result->HasError()) { Printer::PrintF("Failed to execute query %s:\n------\n%s\n-------", query, result->GetError()); @@ -56,10 +78,10 @@ struct DBInfo { struct AttachTask { AttachTaskType type; - duckdb::optional_idx db_id; - duckdb::optional_idx tbl_id; - duckdb::optional_idx tbl_size; - std::vector ids; + optional_idx db_id; + optional_idx tbl_id; + optional_idx tbl_size; + vector ids; bool actual_describe = false; }; @@ -83,7 +105,7 @@ struct AttachWorker { } public: - duckdb::unique_ptr execQuery(const string &query) { + unique_ptr execQuery(const string &query) { return ::execQuery(conn, query); } void Work(); @@ -92,7 +114,7 @@ struct AttachWorker { AttachTask RandomTask(); void createTbl(AttachTask &task); void lookup(AttachTask &task); - void append_internal(AttachTask &task); + void append_internal(AttachTask &task, const bool is_upsert); void append(AttachTask &task); void delete_internal(AttachTask &task); void apply_changes(AttachTask &task); @@ -146,8 +168,7 @@ void AttachWorker::createTbl(AttachTask &task) { string tbl_path = StringUtil::Format("%s.tbl_%d", getDBName(db_id), tbl_id); string create_sql = StringUtil::Format( - "CREATE TABLE %s(i BIGINT PRIMARY KEY, s VARCHAR, ts TIMESTAMP, obj STRUCT(key1 UBIGINT, key2 VARCHAR))", - tbl_path); + "CREATE TABLE %s(i BIGINT, s VARCHAR, ts TIMESTAMP, obj STRUCT(key1 UBIGINT, key2 VARCHAR))", tbl_path); addLog("; q: " + create_sql); execQuery(create_sql); string insert_sql = "INSERT INTO " + tbl_path + @@ -179,6 +200,7 @@ void AttachWorker::lookup(AttachTask &task) { if (result->RowCount() == 0) { addLog("FAILURE - No rows returned from query"); success = false; + return; } if (!CHECK_COLUMN(result, 0, {Value::UBIGINT(expected_max_val)})) { success = false; @@ -196,11 +218,10 @@ void AttachWorker::lookup(AttachTask &task) { result, 3, {Value::STRUCT({{"key1", Value::UBIGINT(expected_max_val)}, {"key2", to_string(expected_max_val)}})})) { success = false; - return; } } -void AttachWorker::append_internal(AttachTask &task) { +void AttachWorker::append_internal(AttachTask &task, bool is_upsert) { auto db_id = task.db_id.GetIndex(); auto tbl_id = task.tbl_id.GetIndex(); auto tbl_str = "tbl_" + to_string(tbl_id); @@ -208,18 +229,33 @@ void AttachWorker::append_internal(AttachTask &task) { addLog("db: " + getDBName(db_id) + "; table: " + tbl_str + "; append rows"); try { - Appender appender(conn, getDBName(db_id), DEFAULT_SCHEMA, tbl_str); - DataChunk chunk; - + // QueryAppender child_list_t struct_children; struct_children.emplace_back(make_pair("key1", LogicalTypeId::UBIGINT)); struct_children.emplace_back(make_pair("key2", LogicalTypeId::VARCHAR)); const vector types = {LogicalType::UBIGINT, LogicalType::VARCHAR, LogicalType::TIMESTAMP, LogicalType::STRUCT(struct_children)}; + unique_ptr base_appender; + if (is_upsert) { + auto query = StringUtil::Format("MERGE INTO %s.main.%s USING appended_data USING (i) WHEN MATCHED THEN " + "UPDATE WHEN NOT MATCHED THEN INSERT", + SQLIdentifier(getDBName(db_id)), SQLIdentifier(tbl_str)); + vector names; + names.push_back("i"); + names.push_back("s"); + names.push_back("ts"); + names.push_back("obj"); + base_appender = make_uniq(conn, query, types, names); + } else { + base_appender = make_uniq(conn, getDBName(db_id), DEFAULT_SCHEMA, tbl_str); + } + auto &appender = *base_appender; - // fill up datachunk + // Fill the data chunk. + DataChunk chunk; chunk.Initialize(*conn.context, types); + // int auto &col_ubigint = chunk.data[0]; auto data_ubigint = FlatVector::GetData(col_ubigint); @@ -253,11 +289,9 @@ void AttachWorker::append_internal(AttachTask &task) { } catch (const std::exception &e) { addLog("Caught exception when using Appender: " + string(e.what())); success = false; - return; } catch (...) { addLog("Caught error when using Appender!"); success = false; - return; } } @@ -276,7 +310,7 @@ void AttachWorker::append(AttachTask &task) { task.ids.push_back(current_num_rows + i); } - append_internal(task); + append_internal(task, false); db_infos[db_id].tables[tbl_id].size += append_count; } @@ -308,8 +342,7 @@ void AttachWorker::apply_changes(AttachTask &task) { auto &db_infos = db_pool.db_infos; lock_guard lock(db_infos[db_id].mu); execQuery("BEGIN"); - delete_internal(task); - append_internal(task); + append_internal(task, true); execQuery("COMMIT"); } @@ -347,7 +380,6 @@ void AttachWorker::GetRandomTable(AttachTask &task) { auto db_id = task.db_id.GetIndex(); lock_guard lock(db_infos[db_id].mu); auto max_tbl_id = db_infos[db_id].table_count; - if (max_tbl_id == 0) { return; } @@ -360,7 +392,6 @@ AttachTask AttachWorker::RandomTask() { AttachTask result; idx_t scenario_id = std::rand() % 10; result.db_id = std::rand() % db_count; - auto db_id = result.db_id.GetIndex(); switch (scenario_id) { case 0: result.type = AttachTaskType::CREATE_TABLE; @@ -379,7 +410,8 @@ AttachTask AttachWorker::RandomTask() { GetRandomTable(result); if (result.tbl_id.IsValid()) { auto current_num_rows = result.tbl_size.GetIndex(); - idx_t delete_count = std::rand() % (STANDARD_VECTOR_SIZE / 3); + idx_t modulo = STANDARD_VECTOR_SIZE < 3 ? STANDARD_VECTOR_SIZE : STANDARD_VECTOR_SIZE / 3; + idx_t delete_count = std::rand() % modulo; if (delete_count == 0) { delete_count = 1; } @@ -410,6 +442,9 @@ AttachTask AttachWorker::RandomTask() { } void AttachWorker::Work() { + Profiler profiler; + AttachTask slowest_task; + for (idx_t i = 0; i < iteration_count; i++) { if (!success) { return; @@ -417,9 +452,9 @@ void AttachWorker::Work() { try { auto task = RandomTask(); - db_pool.addWorker(*this, task.db_id.GetIndex()); + profiler.Start(); switch (task.type) { case AttachTaskType::CREATE_TABLE: createTbl(task); @@ -444,7 +479,15 @@ void AttachWorker::Work() { success = false; return; } + profiler.End(); db_pool.removeWorker(*this, task.db_id.GetIndex()); + auto elapsed = profiler.Elapsed(); + + // NOTE: Magic threshold used for debugging slowness in this test. + // NOTE Set to a fairly high value for CI purposes. + if (elapsed >= 0.5) { + Printer::PrintF("Slow task %s - took %lf seconds\n", AttachTaskTypeToString(task.type), elapsed); + } } catch (const std::exception &e) { addLog("Caught exception when running iterations: " + string(e.what())); diff --git a/test/sql/peg_parser/support_unreserved_keywords.test b/test/sql/peg_parser/support_unreserved_keywords.test index 7bae23fcaea7..eedc2348d90d 100644 --- a/test/sql/peg_parser/support_unreserved_keywords.test +++ b/test/sql/peg_parser/support_unreserved_keywords.test @@ -42,7 +42,7 @@ statement ok CALL check_peg_parser($TEST_PEG_PARSER$FROM database.schema.table;$TEST_PEG_PARSER$); statement ok -CALL check_peg_parser($TEST_PEG_PARSER$select * from 'data/csv/nullbyte.csv';$TEST_PEG_PARSER$); +CALL check_peg_parser($TEST_PEG_PARSER$select * from '{DATA_DIR}/csv/nullbyte.csv';$TEST_PEG_PARSER$); statement ok CALL check_peg_parser($TEST_PEG_PARSER$CREATE TABLE foo(try_cast INTEGER);$TEST_PEG_PARSER$); @@ -84,7 +84,7 @@ statement ok CALL check_peg_parser($TEST_PEG_PARSER$SELECT 1::SMALLINT$TEST_PEG_PARSER$); statement ok -CALL check_peg_parser($TEST_PEG_PARSER$copy integers to '__TEST_DIR__/integers.json.gz' (FORMAT JSON, COMPRESSION GZIP)$TEST_PEG_PARSER$); +CALL check_peg_parser($TEST_PEG_PARSER$copy integers to '{TEMP_DIR}/integers.json.gz' (FORMAT JSON, COMPRESSION GZIP)$TEST_PEG_PARSER$); statement ok CALL check_peg_parser($TEST_PEG_PARSER$CREATE OR REPLACE TABLE t( x VARCHAR USING COMPRESSION Dictionary );$TEST_PEG_PARSER$); @@ -99,10 +99,10 @@ statement ok CALL check_peg_parser($TEST_PEG_PARSER$create table t2 (id int, v_map struct(foo integer[]));$TEST_PEG_PARSER$); statement ok -CALL check_peg_parser($TEST_PEG_PARSER$ATTACH '__TEST_DIR__/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY 'asdf');$TEST_PEG_PARSER$); +CALL check_peg_parser($TEST_PEG_PARSER$ATTACH '{TEMP_DIR}/encrypted.duckdb' AS encrypted (ENCRYPTION_KEY 'asdf');$TEST_PEG_PARSER$); statement ok -CALL check_peg_parser($TEST_PEG_PARSER$SELECT COUNT(*) FROM glob('__TEST_DIR__/attach_no_wal.db.wal');$TEST_PEG_PARSER$); +CALL check_peg_parser($TEST_PEG_PARSER$SELECT COUNT(*) FROM glob('{TEMP_DIR}/attach_no_wal.db.wal');$TEST_PEG_PARSER$); statement ok CALL check_peg_parser($TEST_PEG_PARSER$prepare p4 as select $name, $other_name$TEST_PEG_PARSER$); diff --git a/test/sql/pragma/profiling/test_logging_interaction.test b/test/sql/pragma/profiling/test_logging_interaction.test new file mode 100644 index 000000000000..35b676b07234 --- /dev/null +++ b/test/sql/pragma/profiling/test_logging_interaction.test @@ -0,0 +1,37 @@ +# name: test/sql/pragma/profiling/test_logging_interaction.test +# description: Test profiling all operator types. +# group: [profiling] + +require skip_reload + +statement ok +PRAGMA profiling_output = '__TEST_DIR__/profile_attach.json'; + +statement ok +PRAGMA enable_profiling = 'json'; + +statement ok +ATTACH '__TEST_DIR__/profiler_logger.db' AS my_db; + +statement ok +USE my_db; + +statement ok +call enable_logging(); + +statement ok +CREATE TABLE small AS FROM range(100); + +statement ok +CREATE TABLE medium AS FROM range(10000); + +statement ok +CREATE TABLE big AS FROM range(1000000); + +statement ok +PRAGMA disable_profiling; + +query I +SELECT count(*) FROM duckdb_logs_parsed('Metrics') WHERE metric == 'CPU_TIME'; +---- +3 diff --git a/test/sql/pragma/test_pragma_version.test b/test/sql/pragma/test_pragma_version.test index 3fb8c4eb18be..e2aa037d4fe7 100644 --- a/test/sql/pragma/test_pragma_version.test +++ b/test/sql/pragma/test_pragma_version.test @@ -2,6 +2,8 @@ # description: Test version pragma # group: [pragma] +tags release + statement ok PRAGMA version; @@ -27,3 +29,18 @@ SELECT count(*) FROM pragma_version() WHERE library_version LIKE 'v%'; statement ok pragma extension_versions; + +statement ok +SET VARIABLE v = (select library_version from pragma_version()); + +statement ok +SET VARIABLE v_or_latest = (select CASE WHEN getvariable('v') ILIKE 'v%-dev%' THEN 'latest' WHEN getvariable('v') ILIKE 'v0.0.1' THEN 'latest' ELSE getvariable('v') END); + +statement ok +SET VARIABLE codename = (select CASE WHEN getvariable('v') ILIKE 'v%-dev%' THEN 'Development Version' WHEN getvariable('v') ILIKE 'v0.0.1' THEN 'Unknown Version' ELSE '%' END); + +query I +select count(*) FROM (select codename from pragma_version() WHERE codename LIKE getvariable('codename')); +---- +1 + diff --git a/test/sql/projection/select_star_like.test b/test/sql/projection/select_star_like.test index 1925dfe3cc80..44702b2830de 100644 --- a/test/sql/projection/select_star_like.test +++ b/test/sql/projection/select_star_like.test @@ -105,3 +105,34 @@ statement error SELECT * RENAME (col1 AS other_) SIMILAR TO '.*col.*' FROM integers ---- Rename list cannot be combined with a filtering operation + +# Create two tables with overlapping column name +statement ok +CREATE TABLE t1(id INTEGER, col1 INTEGER, col2 INTEGER) + +statement ok +INSERT INTO t1 VALUES (1, 10, 20) + +statement ok +CREATE TABLE t2(name VARCHAR, category VARCHAR, col2 INTEGER) + +statement ok +INSERT INTO t2 VALUES ('foo', 'bar', 30) + +# t1.* LIKE should only select from t1 table +query II +SELECT t1.* LIKE 'col%' FROM t1, t2 +---- +10 20 + +# t2.* LIKE should only select from t2 table +query I +SELECT t2.* LIKE 'col%' FROM t1, t2 +---- +30 + +# Combining both tables - each should only select from its own table +query III +SELECT t1.* LIKE 'col%', t2.* LIKE 'col%' FROM t1, t2 +---- +10 20 30 diff --git a/test/sql/sample/test_sample_too_big.test b/test/sql/sample/test_sample_too_big.test new file mode 100644 index 000000000000..37a619d461db --- /dev/null +++ b/test/sql/sample/test_sample_too_big.test @@ -0,0 +1,35 @@ +# name: test/sql/sample/test_sample_too_big.test +# description: Test SAMPLE keyword +# group: [sample] + +require ram 16gb + +statement ok +CREATE TABLE t1(a INT); + +statement ok +INSERT INTO t1 VALUES(1), (2), (3), (3), (5); + +statement error +SELECT * FROM t1 TABLESAMPLE RESERVOIR(1222222220022220); +---- +:.*Sample rows.*out of range.* + +statement error +SELECT * FROM t1 WHERE a IN (SELECT * FROM t1 TABLESAMPLE RESERVOIR(1222222220022220)); +---- +:.*Sample rows.*out of range.* + +statement error +SELECT * FROM t1 WHERE a IN (SELECT * FROM t1 TABLESAMPLE RESERVOIR(1000000001)); +---- +:.*Sample rows.*out of range.* + +query I +SELECT * FROM t1 WHERE a IN (SELECT * FROM t1 TABLESAMPLE RESERVOIR(1000000000)) order by all +---- +1 +2 +3 +3 +5 diff --git a/test/sql/settings/allowed_directories.test b/test/sql/settings/allowed_directories.test index 6e259e12d560..556d74a547c9 100644 --- a/test/sql/settings/allowed_directories.test +++ b/test/sql/settings/allowed_directories.test @@ -13,13 +13,13 @@ require json # we can set allowed_directories as much as we want statement ok -SET allowed_directories=['data/csv/glob'] +SET allowed_directories=['{DATA_DIR}/csv/glob'] statement ok RESET allowed_directories statement ok -SET allowed_directories=['data/csv/glob', 'data/csv/glob/1', 'data/parquet-testing/glob', 'data/json', '__TEST_DIR__'] +SET allowed_directories=['{DATA_DIR}/csv/glob', '{DATA_DIR}/csv/glob/1', '{DATA_DIR}/parquet-testing/glob', '{DATA_DIR}/json', '{TEMP_DIR}'] statement ok SET enable_external_access=false @@ -37,7 +37,7 @@ Cannot change allowed_directories when enable_external_access is disabled # we can read CSV files from the allowed_directories query III -SELECT * FROM 'data/csv/glob/f_1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/f_1.csv' ---- 1 alice alice@email.com 2 eve eve@email.com @@ -45,7 +45,7 @@ SELECT * FROM 'data/csv/glob/f_1.csv' # also within contained directories query I -SELECT * FROM 'data/csv/glob/a1/a1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/a1/a1.csv' ---- 2019-06-05 2019-06-15 @@ -53,7 +53,7 @@ SELECT * FROM 'data/csv/glob/a1/a1.csv' # we can also use "..", as long as we remain inside our directory query III -SELECT * FROM 'data/csv/glob/a1/../f_1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/a1/../f_1.csv' ---- 1 alice alice@email.com 2 eve eve@email.com @@ -61,7 +61,7 @@ SELECT * FROM 'data/csv/glob/a1/../f_1.csv' # and we can use ./ query III -SELECT * FROM 'data/csv/glob/./f_1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/./f_1.csv' ---- 1 alice alice@email.com 2 eve eve@email.com @@ -69,59 +69,59 @@ SELECT * FROM 'data/csv/glob/./f_1.csv' # we cannot read files that are not in the allowed directories statement error -SELECT * FROM 'data/csv/all_quotes.csv' +SELECT * FROM '{DATA_DIR}/csv/all_quotes.csv' ---- Permission Error # also not through usage of ".." statement error -SELECT * FROM 'data/csv/glob/../all_quotes.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/../all_quotes.csv' ---- Permission Error # //.. edge case statement error -SELECT * FROM 'data/csv/glob//../all_quotes.csv' +SELECT * FROM '{DATA_DIR}/csv/glob//../all_quotes.csv' ---- Permission Error statement error -SELECT * FROM 'data/csv/glob/a1/../../all_quotes.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/a1/../../all_quotes.csv' ---- Permission Error # we can also sniff csv files statement ok -SELECT * FROM sniff_csv('data/csv/glob/f_1.csv') +SELECT * FROM sniff_csv('{DATA_DIR}/csv/glob/f_1.csv') # but not outside of allowed directories statement error -SELECT * FROM sniff_csv('data/csv/all_quotes.csv') +SELECT * FROM sniff_csv('{DATA_DIR}/csv/all_quotes.csv') ---- Permission Error # we can also glob allowed directories query I -SELECT replace(fname, '\', '/') as fname FROM glob('data/csv/glob/*.csv') t(fname) +SELECT parse_filename(fname) as fname FROM glob('{DATA_DIR}/csv/glob/*.csv') t(fname) ---- -data/csv/glob/f_1.csv -data/csv/glob/f_2.csv -data/csv/glob/f_3.csv +f_1.csv +f_2.csv +f_3.csv statement error -SELECT * FROM glob('data/csv/**.csv') +SELECT * FROM glob('{DATA_DIR}/csv/**.csv') ---- Permission Error # we can write to our test dir statement ok -COPY (SELECT 42 i) TO '__TEST_DIR__/permission_test.csv' (FORMAT csv) +COPY (SELECT 42 i) TO '{TEMP_DIR}/permission_test.csv' (FORMAT csv) statement ok CREATE TABLE integers(i INT); statement ok -COPY integers FROM '__TEST_DIR__/permission_test.csv' +COPY integers FROM '{TEMP_DIR}/permission_test.csv' query I FROM integers @@ -141,7 +141,7 @@ Permission Error # we can attach databases in allowed directories statement ok -ATTACH '__TEST_DIR__/attached_dir.db' AS a1 +ATTACH '{TEMP_DIR}/attached_dir.db' AS a1 statement ok CREATE TABLE a1.integers(i INTEGER); @@ -165,7 +165,7 @@ Permission Error # export/import also work with allowed_directories statement ok -EXPORT DATABASE a1 TO '__TEST_DIR__/export_test' +EXPORT DATABASE a1 TO '{TEMP_DIR}/export_test' statement error EXPORT DATABASE a1 TO 'export_test' @@ -173,7 +173,7 @@ EXPORT DATABASE a1 TO 'export_test' Permission Error statement error -IMPORT DATABASE '__TEST_DIR__/export_test' +IMPORT DATABASE '{TEMP_DIR}/export_test' ---- Table with name "integers" already exists! @@ -184,16 +184,16 @@ Permission Error # we can read parquet/json files query II -SELECT * FROM 'data/parquet-testing/glob/t1.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/glob/t1.parquet' ---- 1 a statement error -SELECT * FROM 'data/parquet-testing/aws2.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/aws2.parquet' ---- Permission Error query II -SELECT * FROM 'data/parquet-testing/glob/t1.parquet' +SELECT * FROM '{DATA_DIR}/parquet-testing/glob/t1.parquet' ---- 1 a diff --git a/test/sql/settings/allowed_paths.test b/test/sql/settings/allowed_paths.test index a50f918555d8..3940c8b25c0a 100644 --- a/test/sql/settings/allowed_paths.test +++ b/test/sql/settings/allowed_paths.test @@ -9,13 +9,13 @@ require no_extension_autoloading "EXPECTED: Test disable loading of extensions" # we can set allowed_directories as much as we want statement ok -SET allowed_paths=['data/csv/glob/f_1.csv'] +SET allowed_paths=['{DATA_DIR}/csv/glob/f_1.csv'] statement ok RESET allowed_paths statement ok -SET allowed_paths=['data/csv/glob/f_1.csv', '__TEST_DIR__/allowed_file.csv'] +SET allowed_paths=['{DATA_DIR}/csv/glob/f_1.csv', '{TEMP_DIR}/allowed_file.csv'] statement ok SET enable_external_access=false @@ -33,7 +33,7 @@ Cannot change allowed_paths when enable_external_access is disabled # we can read our allowed files query III -SELECT * FROM 'data/csv/glob/f_1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/f_1.csv' ---- 1 alice alice@email.com 2 eve eve@email.com @@ -41,17 +41,17 @@ SELECT * FROM 'data/csv/glob/f_1.csv' # but not files that are not allowed statement error -SELECT * FROM 'data/csv/glob/a1/a1.csv' +SELECT * FROM '{DATA_DIR}/csv/glob/a1/a1.csv' ---- Permission Error # we can also write to our allowed file statement ok -COPY (SELECT 42 i) TO '__TEST_DIR__/allowed_file.csv' +COPY (SELECT 42 i) TO '{TEMP_DIR}/allowed_file.csv' # but not to not-allowed files statement error -COPY (SELECT 42 i) TO '__TEST_DIR__/not_allowed_file.csv' +COPY (SELECT 42 i) TO '{TEMP_DIR}/not_allowed_file.csv' ---- Permission Error diff --git a/test/sql/settings/test_disabled_file_systems.test b/test/sql/settings/test_disabled_file_systems.test index 228a251046c8..d2453afe5ef3 100644 --- a/test/sql/settings/test_disabled_file_systems.test +++ b/test/sql/settings/test_disabled_file_systems.test @@ -24,7 +24,7 @@ SET disabled_filesystems=''; # we can read from the local file system statement ok -SELECT * FROM read_csv_auto('data/csv/auto/skip_row.csv') +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/auto/skip_row.csv') statement ok SET disabled_filesystems='LocalFileSystem'; @@ -35,7 +35,7 @@ RESET disabled_filesystems; File system "LocalFileSystem" has been disabled previously statement error -SELECT * FROM read_csv_auto('data/csv/auto/skip_row.csv') +SELECT * FROM read_csv_auto('{DATA_DIR}/csv/auto/skip_row.csv') ---- File system LocalFileSystem has been disabled by configuration diff --git a/test/sql/storage/compact_block_size/compact_block_size.test b/test/sql/storage/compact_block_size/compact_block_size.test index 62e383b41edb..00fb832df1b5 100644 --- a/test/sql/storage/compact_block_size/compact_block_size.test +++ b/test/sql/storage/compact_block_size/compact_block_size.test @@ -5,11 +5,11 @@ require exact_vector_size 2048 statement ok -ATTACH 'data/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); +ATTACH '{DATA_DIR}/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); # vector size is 2048, block size is 16KB statement ok -ATTACH 'data/storage/block_size_16kb.db' (TYPE DUCKDB, READONLY); +ATTACH '{DATA_DIR}/storage/block_size_16kb.db' (TYPE DUCKDB, READONLY); query I SELECT * FROM block_size_16kb.tbl; diff --git a/test/sql/storage/compact_block_size/compact_vector_size.test b/test/sql/storage/compact_block_size/compact_vector_size.test index bda60255754e..61df9b5b8bc2 100644 --- a/test/sql/storage/compact_block_size/compact_vector_size.test +++ b/test/sql/storage/compact_block_size/compact_vector_size.test @@ -6,12 +6,12 @@ require exact_vector_size 512 # The vector size of this file is 2048. statement error -ATTACH 'data/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); +ATTACH '{DATA_DIR}/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); ---- Cannot read database file statement ok -ATTACH 'data/storage/vector_size_512.db' (TYPE DUCKDB, READONLY); +ATTACH '{DATA_DIR}/storage/vector_size_512.db' (TYPE DUCKDB, READONLY); query I SELECT * FROM vector_size_512.tbl; diff --git a/test/sql/storage/compact_block_size/default_block_size.test b/test/sql/storage/compact_block_size/default_block_size.test index ff58d8ce67b2..20b67c70a788 100644 --- a/test/sql/storage/compact_block_size/default_block_size.test +++ b/test/sql/storage/compact_block_size/default_block_size.test @@ -6,10 +6,10 @@ require vector_size 2048 statement ok -ATTACH 'data/storage/block_size_16kb.db' (TYPE DUCKDB, READONLY) +ATTACH '{DATA_DIR}/storage/block_size_16kb.db' (TYPE DUCKDB, READONLY) statement error -ATTACH 'data/storage/vector_size_512.db' (TYPE DUCKDB, READONLY) +ATTACH '{DATA_DIR}/storage/vector_size_512.db' (TYPE DUCKDB, READONLY) ---- Cannot read database file @@ -17,4 +17,4 @@ Cannot read database file require 64bit statement ok -ATTACH 'data/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); +ATTACH '{DATA_DIR}/storage/index_0-9-1.db' (TYPE DUCKDB, READONLY); diff --git a/test/sql/storage/compression/dict_fsst/test_dict_fsst_with_smaller_block_size.test b/test/sql/storage/compression/dict_fsst/test_dict_fsst_with_smaller_block_size.test index 0ff93645afbe..8cbf9c88c99a 100644 --- a/test/sql/storage/compression/dict_fsst/test_dict_fsst_with_smaller_block_size.test +++ b/test/sql/storage/compression/dict_fsst/test_dict_fsst_with_smaller_block_size.test @@ -6,16 +6,16 @@ statement ok SET storage_compatibility_version='latest'; statement ok -ATTACH '__TEST_DIR__/partial_manager.db' AS db (BLOCK_SIZE 16384); +ATTACH '{TEMP_DIR}/partial_manager.db' AS db (BLOCK_SIZE 16384); statement ok -CREATE TABLE db.t AS FROM read_csv('data/csv/rabo-anon.csv.gz', strict_mode=FALSE); +CREATE TABLE db.t AS FROM read_csv('{DATA_DIR}/csv/rabo-anon.csv.gz', strict_mode=FALSE); statement ok DETACH db; statement ok -ATTACH '__TEST_DIR__/partial_manager.db' AS db; +ATTACH '{TEMP_DIR}/partial_manager.db' AS db; query I SELECT COUNT("XXX XXX/XXX") FROM db.t WHERE "XXX XXX/XXX" IS NOT NULL; diff --git a/test/sql/storage/compression/dict_fsst/test_null_filter_pushdown.test b/test/sql/storage/compression/dict_fsst/test_null_filter_pushdown.test new file mode 100644 index 000000000000..2da9af0b19ae --- /dev/null +++ b/test/sql/storage/compression/dict_fsst/test_null_filter_pushdown.test @@ -0,0 +1,18 @@ +# name: test/sql/storage/compression/dict_fsst/test_null_filter_pushdown.test +# group: [dict_fsst] + +load __TEST_DIR__/null_filter_dict_fsst.db readwrite v1.4.2 + +statement ok +pragma force_compression='DICT_FSST'; + +statement ok +CREATE OR REPLACE TABLE t1(type VARCHAR, id VARCHAR, problem VARCHAR); + +statement ok +INSERT INTO t1(type,id,problem) select 'events', 'test', NULL from range(40) + +query I +SELECT COUNT(*) FROM t1 WHERE problem IS NULL; +---- +40 diff --git a/test/sql/storage/compression/dict_fsst/test_null_update.test b/test/sql/storage/compression/dict_fsst/test_null_update.test new file mode 100644 index 000000000000..c0b3020cb985 --- /dev/null +++ b/test/sql/storage/compression/dict_fsst/test_null_update.test @@ -0,0 +1,25 @@ +# name: test/sql/storage/compression/dict_fsst/test_null_update.test +# group: [dict_fsst] + +load __TEST_DIR__/missing_null.db readwrite v1.4.2 + +statement ok +CREATE OR REPLACE TABLE t( + compressed VARCHAR USING COMPRESSION 'DICT_FSST' +); + +statement ok +INSERT INTO t VALUES + ('Error3'); + +statement ok +UPDATE t SET compressed = NULL; + +# Only the validity is changed, no data changes occurred +statement ok +CHECKPOINT; + +query I +SELECT * FROM t AS e WHERE e.compressed IS NULL; +---- +NULL diff --git a/test/sql/storage/compression/rle/rle_select_list.test b/test/sql/storage/compression/rle/rle_select_list.test new file mode 100644 index 000000000000..0bb572fc0acc --- /dev/null +++ b/test/sql/storage/compression/rle/rle_select_list.test @@ -0,0 +1,38 @@ +# name: test/sql/storage/compression/rle/rle_select_list.test +# description: Test selecting from RLE compression with a list comparison pushed down +# group: [rle] + +load __TEST_DIR__/rle_select_bug.db + +statement ok +pragma force_compression='rle'; + +statement ok +create table tbl as SELECT * FROM ( + VALUES + (['first name', 'last name', 'username'], 60), + (['first name'], 0), + (['username'], 0), + (['first name', 'last name', 'username'], 0), + (['first name', 'last name', 'username'], 0), + (['username'], 0), + (['username'], 0) +) AS t(attributes, minutes_duration); + +statement ok +checkpoint + +query I +SELECT + "minutes_duration" +FROM + tbl +WHERE NOT list_sort(['first name']) = tbl."attributes" +ORDER BY ALL +---- +0 +0 +0 +0 +0 +60 diff --git a/test/sql/storage/compression/test_using_compression.test b/test/sql/storage/compression/test_using_compression.test index 547ac458070e..89b4413aeaf4 100644 --- a/test/sql/storage/compression/test_using_compression.test +++ b/test/sql/storage/compression/test_using_compression.test @@ -24,3 +24,8 @@ statement ok CREATE OR REPLACE TABLE t( x VARCHAR USING COMPRESSION Dictionary ); + +statement error +create table foo (str VARCHAR USING COMPRESSION 'dict_fsst'); +---- +Binder Error: Can't compress using user-provided compression type 'DICT_FSST', that type is not available yet diff --git a/test/sql/storage/encryption/temp_files/encrypt_asof_join_merge.test_slow b/test/sql/storage/encryption/temp_files/encrypt_asof_join_merge.test_slow index 1c705ae1c66c..e79f371d4259 100644 --- a/test/sql/storage/encryption/temp_files/encrypt_asof_join_merge.test_slow +++ b/test/sql/storage/encryption/temp_files/encrypt_asof_join_merge.test_slow @@ -2,6 +2,9 @@ # description: Test merge queue and repartitioning with encrypted temporary files # group: [temp_files] +# We need httpfs to do encrypted writes +require httpfs + foreach cipher GCM CTR statement ok @@ -41,4 +44,4 @@ FROM probe ASOF JOIN build USING(k, t) restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/temp_files/encrypted_offloading_block_files.test_slow b/test/sql/storage/encryption/temp_files/encrypted_offloading_block_files.test_slow index 280803a839bc..e0325361931e 100644 --- a/test/sql/storage/encryption/temp_files/encrypted_offloading_block_files.test_slow +++ b/test/sql/storage/encryption/temp_files/encrypted_offloading_block_files.test_slow @@ -1,8 +1,10 @@ # name: test/sql/storage/encryption/temp_files/encrypted_offloading_block_files.test_slow # group: [temp_files] -foreach cipher GCM CTR +# We need httpfs to do encrypted writes +require httpfs +foreach cipher GCM CTR require block_size 262144 @@ -40,4 +42,4 @@ SELECT * FROM tbl ORDER BY random_value; restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/temp_files/encrypted_tmp_file_setting.test b/test/sql/storage/encryption/temp_files/encrypted_tmp_file_setting.test index 4c402d78465b..0f29ba99dfc1 100644 --- a/test/sql/storage/encryption/temp_files/encrypted_tmp_file_setting.test +++ b/test/sql/storage/encryption/temp_files/encrypted_tmp_file_setting.test @@ -1,6 +1,9 @@ # name: test/sql/storage/encryption/temp_files/encrypted_tmp_file_setting.test # group: [temp_files] +# httpfs require to write encrypted data +require httpfs + foreach cipher GCM CTR require block_size 262144 @@ -49,4 +52,4 @@ Permission Error: Existing temporary files found: Modifying the temp_file_encryp restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/temp_files/encrypted_tpch_join.test_slow b/test/sql/storage/encryption/temp_files/encrypted_tpch_join.test_slow index 2491d644956f..96da86ca53e5 100644 --- a/test/sql/storage/encryption/temp_files/encrypted_tpch_join.test_slow +++ b/test/sql/storage/encryption/temp_files/encrypted_tpch_join.test_slow @@ -5,6 +5,8 @@ foreach cipher GCM CTR require tpch +require httpfs + statement ok SET threads = 8; @@ -32,4 +34,4 @@ JOIN lineitem2 l2 USING (l_orderkey , l_linenumber); restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/temp_files/temp_directory_enable_external_access.test b/test/sql/storage/encryption/temp_files/temp_directory_enable_external_access.test index 1a9a97017460..22b6ad7c573f 100644 --- a/test/sql/storage/encryption/temp_files/temp_directory_enable_external_access.test +++ b/test/sql/storage/encryption/temp_files/temp_directory_enable_external_access.test @@ -1,8 +1,10 @@ # name: test/sql/storage/encryption/temp_files/temp_directory_enable_external_access.test # group: [temp_files] -foreach cipher GCM CTR +# httpfs require to write encrypted data +require httpfs +foreach cipher GCM CTR require block_size 262144 @@ -38,4 +40,4 @@ CREATE TEMPORARY TABLE tbl AS FROM range(10_000_000) restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/wal/encrypted_wal_blob_storage.test b/test/sql/storage/encryption/wal/encrypted_wal_blob_storage.test index d92c055832cc..ae29847af68d 100644 --- a/test/sql/storage/encryption/wal/encrypted_wal_blob_storage.test +++ b/test/sql/storage/encryption/wal/encrypted_wal_blob_storage.test @@ -2,8 +2,10 @@ # description: Test BLOB with persistent storage with an encrypted WAL # group: [wal] -foreach cipher GCM CTR +# httpfs require to write encrypted data +require httpfs +foreach cipher GCM CTR load __TEST_DIR__/any_file.db @@ -28,7 +30,7 @@ statement ok DETACH enc statement ok -ATTACH '__TEST_DIR__/encrypted_blob_storage_${cipher}.db' AS enc (ENCRYPTION_KEY 'asdf'); +ATTACH '__TEST_DIR__/encrypted_blob_storage_${cipher}.db' AS enc (ENCRYPTION_KEY 'asdf', ENCRYPTION_CIPHER '${cipher}'); query I SELECT * FROM enc.blobs diff --git a/test/sql/storage/encryption/wal/encrypted_wal_lazy_creation.test b/test/sql/storage/encryption/wal/encrypted_wal_lazy_creation.test index a704efb73c28..cbb404785cf1 100644 --- a/test/sql/storage/encryption/wal/encrypted_wal_lazy_creation.test +++ b/test/sql/storage/encryption/wal/encrypted_wal_lazy_creation.test @@ -13,6 +13,9 @@ require noforcestorage require skip_reload +# We need httpfs to do encrypted writes +require httpfs + statement ok ATTACH '__TEST_DIR__/attach_no_wal_${cipher}.db' AS attach_no_wal (ENCRYPTION_KEY 'asdf', ENCRYPTION_CIPHER '${cipher}'); @@ -42,4 +45,4 @@ SELECT COUNT(*) FROM attach_no_wal.integers; restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/wal/encrypted_wal_pragmas.test b/test/sql/storage/encryption/wal/encrypted_wal_pragmas.test index d42b1b73d99c..f6b31638f7ac 100644 --- a/test/sql/storage/encryption/wal/encrypted_wal_pragmas.test +++ b/test/sql/storage/encryption/wal/encrypted_wal_pragmas.test @@ -2,6 +2,9 @@ # description: test encrypted wal debug PRAGMAS # group: [wal] +# We need httpfs to do encrypted writes +require httpfs + foreach cipher GCM CTR @@ -41,7 +44,7 @@ DETACH enc # WAL replay succeeds statement ok -ATTACH '__TEST_DIR__/encrypted_wal_restart_${cipher}.db' as enc (ENCRYPTION_KEY 'asdf'); +ATTACH '__TEST_DIR__/encrypted_wal_restart_${cipher}.db' as enc (ENCRYPTION_KEY 'asdf', ENCRYPTION_CIPHER '${cipher}'); statement ok DETACH enc @@ -75,7 +78,7 @@ SELECT * FROM enc.test ORDER BY 1 restart statement ok -ATTACH '__TEST_DIR__/encrypted_wal_restart_new_${cipher}.db' as enc (ENCRYPTION_KEY 'asdf'); +ATTACH '__TEST_DIR__/encrypted_wal_restart_new_${cipher}.db' as enc (ENCRYPTION_KEY 'asdf', ENCRYPTION_CIPHER '${cipher}'); query IT SELECT * FROM enc.test ORDER BY 1 @@ -130,4 +133,4 @@ SELECT * FROM enc.test ORDER BY 1 restart -endloop \ No newline at end of file +endloop diff --git a/test/sql/storage/encryption/wal/encrypted_wal_restart.test b/test/sql/storage/encryption/wal/encrypted_wal_restart.test index 197990dfa63a..f9a9cf87144a 100644 --- a/test/sql/storage/encryption/wal/encrypted_wal_restart.test +++ b/test/sql/storage/encryption/wal/encrypted_wal_restart.test @@ -2,6 +2,9 @@ # description: test wal restart # group: [wal] +# We need httpfs to do encrypted writes +require httpfs + foreach cipher GCM CTR load __TEST_DIR__/any_wal_db.db @@ -73,4 +76,4 @@ SELECT * FROM enc.test ORDER BY 1 endloop -restart \ No newline at end of file +restart diff --git a/test/sql/storage/external_file_cache/external_file_cache_parquet.test_slow b/test/sql/storage/external_file_cache/external_file_cache_parquet.test_slow index 8b9711996537..7c5275f72402 100644 --- a/test/sql/storage/external_file_cache/external_file_cache_parquet.test_slow +++ b/test/sql/storage/external_file_cache/external_file_cache_parquet.test_slow @@ -110,3 +110,69 @@ select i = ${i} from '__TEST_DIR__/test_efc_i.parquet'; true endloop + +# internal issue #5732 +statement ok +CREATE TABLE t1("time" BIGINT, "value" DOUBLE, "year" BIGINT, "month" BIGINT, "day" BIGINT); + +statement ok +INSERT INTO t1 VALUES(1704067200000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t1 VALUES(1704067201000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t1 VALUES(1704067202000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t1 VALUES(1704067203000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t1 VALUES(1704067204000000000,42.0,2024,1,1); + +statement ok +CREATE TABLE t2("time" BIGINT, "value" DOUBLE, "year" BIGINT, "month" BIGINT, "day" BIGINT); + +statement ok +INSERT INTO t2 VALUES(1704067200000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704067201000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704067202000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704067203000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704067204000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704069000000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704069001000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704069002000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704069003000000000,42.0,2024,1,1); + +statement ok +INSERT INTO t2 VALUES(1704069004000000000,42.0,2024,1,1); + +statement ok +COPY t1 TO '__TEST_DIR__/violations.parquet' (FORMAT PARQUET, OVERWRITE_OR_IGNORE 1, USE_TMP_FILE 1); + +statement ok +SELECT * FROM read_parquet('__TEST_DIR__/violations.*parquet', hive_partitioning = true, union_by_name = true); + +statement ok +COPY t2 TO '__TEST_DIR__/violations.parquet' (FORMAT PARQUET, OVERWRITE_OR_IGNORE 1, USE_TMP_FILE 1); + +sleep 10 seconds + +statement ok +SELECT * FROM read_parquet('__TEST_DIR__/violations.*parquet', hive_partitioning = true, union_by_name = true); diff --git a/test/sql/storage/memory/in_memory_disabled_zstd.test b/test/sql/storage/memory/in_memory_disabled_zstd.test new file mode 100644 index 000000000000..33b71c7d9c78 --- /dev/null +++ b/test/sql/storage/memory/in_memory_disabled_zstd.test @@ -0,0 +1,28 @@ +# name: test/sql/storage/memory/in_memory_disabled_zstd.test +# group: [memory] + +statement ok +attach ':memory:' as db2 (compress); + +statement ok +use db2; + +statement ok +pragma force_compression='zstd'; + +statement ok +create table tbl as +select + i // 5_000 as num, + num::varchar || list_reduce([uuid()::varchar for x in range(10)], lambda x, y: concat(x, y)) str +from range(20_000) t(i) order by num; + +# Because we're running in In-Memory mode, we have explicitly disabled ZSTD +# Since the InMemoryBlockManager doesn't support the methods required by ZSTD +statement ok +force checkpoint; + +query I +select distinct compression = 'Uncompressed' from pragma_storage_info('tbl') where segment_type = 'VARCHAR' +---- +true diff --git a/test/sql/storage/storage_versions.test b/test/sql/storage/storage_versions.test index c20a27457788..84807f62cd08 100644 --- a/test/sql/storage/storage_versions.test +++ b/test/sql/storage/storage_versions.test @@ -8,16 +8,16 @@ require vector_size 2048 ## Files created via `duckdb file_name -c "CHECKPOINT;" statement ok -ATTACH 'data/storage/empty64.db' (READ_ONLY); +ATTACH '{DATA_DIR}/storage/empty64.db' (READ_ONLY); statement ok -ATTACH 'data/storage/empty65.db' (READ_ONLY); +ATTACH '{DATA_DIR}/storage/empty65.db' (READ_ONLY); statement ok -ATTACH 'data/storage/empty66.db' (READ_ONLY); +ATTACH '{DATA_DIR}/storage/empty66.db' (READ_ONLY); # statement error -# ATTACH 'data/storage/empty67.db' (READ_ONLY); +# ATTACH '{DATA_DIR}/storage/empty67.db' (READ_ONLY); # ---- # IO Error: Trying to read a database file with version number 67, but we can only read versions between 64 and 66. diff --git a/test/sql/storage/test_restart_extension.test b/test/sql/storage/test_restart_extension.test index bb4384112d03..823557396d91 100644 --- a/test/sql/storage/test_restart_extension.test +++ b/test/sql/storage/test_restart_extension.test @@ -9,27 +9,27 @@ require parquet # we can load a parquet file query ITIIIIRRTTT nosort parquet_scan_result -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') ---- # now load a DB from disk -load __TEST_DIR__/test_extension_restart.db +load {TEMP_DIR}/test_extension_restart.db # we can still load the parquet file query ITIIIIRRTTT nosort parquet_scan_result -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') ---- # store it in the db statement ok -CREATE TABLE t1 AS SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') +CREATE TABLE t1 AS SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') # now restart the DB restart # we can still load the parquet file query ITIIIIRRTTT nosort parquet_scan_result -SELECT * FROM parquet_scan('data/parquet-testing/arrow/alltypes_plain.parquet') +SELECT * FROM parquet_scan('{DATA_DIR}/parquet-testing/arrow/alltypes_plain.parquet') # the file also exists in the database now query ITIIIIRRTTT nosort parquet_scan_result diff --git a/test/sql/storage/types/struct/struct_of_empty_list.test b/test/sql/storage/types/struct/struct_of_empty_list.test new file mode 100644 index 000000000000..392e05b07e2b --- /dev/null +++ b/test/sql/storage/types/struct/struct_of_empty_list.test @@ -0,0 +1,10 @@ +# name: test/sql/storage/types/struct/struct_of_empty_list.test +# group: [struct] + +load __TEST_DIR__/empty_list_in_struct.db + +statement ok +create table tbl (col STRUCT(a VARCHAR[])) + +statement ok +insert into tbl SELECT {'a': []} from range(122881) diff --git a/test/sql/storage/wal/wal_index_delete.test b/test/sql/storage/wal/wal_index_delete.test new file mode 100644 index 000000000000..fbd2e484ee74 --- /dev/null +++ b/test/sql/storage/wal/wal_index_delete.test @@ -0,0 +1,73 @@ +# name: test/sql/storage/wal/wal_index_delete.test +# description: Test index delete replays. +# group: [wal] + +load __TEST_DIR__/index_delete_test.db + +statement ok +SET index_scan_max_count = 1; + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +PRAGMA wal_autocheckpoint='1TB'; + +statement ok +CREATE TABLE tbl(a INTEGER, b VARCHAR, c DOUBLE, d TIMESTAMP); + +statement ok +CREATE INDEX idx_ab ON tbl(a, b); + +statement ok +CREATE INDEX idx_a ON tbl(a); + +statement ok +INSERT INTO tbl SELECT range, 'value_' || range, range * 1.5, '2023-01-01 10:00:00'::TIMESTAMP + INTERVAL (range) DAY FROM range(10); + +statement ok +DELETE FROM tbl WHERE a % 5 = 0; + +query II +EXPLAIN ANALYZE SELECT a, b, c, d FROM tbl WHERE (a) = 1; +---- +analyzed_plan :.*Type: Index Scan.* + +restart + +query II +EXPLAIN ANALYZE SELECT a, b, c, d FROM tbl WHERE (a) = 1; +---- +analyzed_plan :.*Type: Index Scan.* + +query IIII +SELECT a, b, c, d FROM tbl WHERE (a) = 1; +---- +1 value_1 1.5 2023-01-02 10:00:00 + +query II +EXPLAIN ANALYZE SELECT a, b, c, d FROM tbl WHERE (a) = 5; +---- +analyzed_plan :.*Type: Index Scan.* + +query IIII +SELECT a, b, c, d FROM tbl WHERE (a) = 5; +---- + +statement ok +INSERT INTO tbl VALUES (5, 'value_5', 7.5, '2023-01-06 10:00:00'); + +query IIII +SELECT a, b, c, d FROM tbl WHERE (a) = 5; +---- +5 value_5 7.5 2023-01-06 10:00:00 + +query II +EXPLAIN ANALYZE SELECT a, b, c, d FROM tbl WHERE (a) = 2; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT COUNT(*) FROM tbl where (a) = 2; +---- +1 \ No newline at end of file diff --git a/test/sql/storage/wal/wal_index_delete_gen.test b/test/sql/storage/wal/wal_index_delete_gen.test new file mode 100644 index 000000000000..9c4cbd1ec1d1 --- /dev/null +++ b/test/sql/storage/wal/wal_index_delete_gen.test @@ -0,0 +1,57 @@ +# name: test/sql/storage/wal/wal_index_delete_gen.test +# description: Test index delete replays with generated columns. +# group: [wal] + +# load the DB from disk +load __TEST_DIR__/index_delete_gen.db + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +PRAGMA wal_autocheckpoint='1TB'; + +statement ok +CREATE TABLE tbl(a BIGINT, b INT AS (2*a), c VARCHAR, d DOUBLE, e as (d + 2), f TIMESTAMP); + +statement ok +CREATE INDEX idx_cd ON tbl(c,d); + +statement ok +CREATE INDEX idx_df ON tbl(d, f) + +statement ok +INSERT INTO tbl VALUES (1, 'foo', 10.5, '2023-01-01 10:00:00'), (2, 'bar', 20.5, '2023-02-01 11:00:00'), (3, 'baz', 30.5, '2023-03-01 12:00:00'); + +query IIIIII +SELECT a, b, c, d, e, f FROM tbl ORDER BY a; +---- +1 2 foo 10.5 12.5 2023-01-01 10:00:00 +2 4 bar 20.5 22.5 2023-02-01 11:00:00 +3 6 baz 30.5 32.5 2023-03-01 12:00:00 + +statement ok +DELETE FROM tbl WHERE a in (2); + +restart + +statement ok +INSERT INTO tbl VALUES (1, 'foo', 10.5, '2023-01-01 10:00:00') + +query II +SELECT b, e FROM tbl WHERE (c,d) = ('baz', 30.5) +---- +6 +32.5 + +query IIIIII +SELECT a, b, c, d, e, f FROM tbl ORDER BY a; +---- +1 2 foo 10.5 12.5 2023-01-01 10:00:00 +1 2 foo 10.5 12.5 2023-01-01 10:00:00 +3 6 baz 30.5 32.5 2023-03-01 12:00:00 + +query I +SELECT COUNT(*) FROM tbl; +---- +3 \ No newline at end of file diff --git a/test/sql/storage/wal/wal_index_interleaved.test b/test/sql/storage/wal/wal_index_interleaved.test new file mode 100644 index 000000000000..c74192008e57 --- /dev/null +++ b/test/sql/storage/wal/wal_index_interleaved.test @@ -0,0 +1,132 @@ +# name: test/sql/storage/wal/wal_index_interleaved.test +# description: Test WAL replay with interleaved inserts and deletes on a single column index +# group: [wal] + +load __TEST_DIR__/index_interleaved_test.db + +statement ok +SET index_scan_percentage = 1.0; + +statement ok +SET index_scan_max_count = 1; + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +PRAGMA wal_autocheckpoint='1TB'; + +statement ok +CREATE TABLE tbl(a INTEGER); + +statement ok +CREATE INDEX idx_a ON tbl(a); + +loop i 0 15 + +statement ok +INSERT INTO tbl VALUES (${i} * 10); + +statement ok +DELETE FROM tbl WHERE a = ${i} * 10 AND (${i} * 10) % 30 = 0; + +endloop + +query I +SELECT COUNT(*) FROM tbl; +---- +10 + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 10; +---- +analyzed_plan :.*Type: Index Scan.* + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 0; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 0; +---- + +restart + +statement ok +SET index_scan_percentage = 1.0; + +statement ok +SET index_scan_max_count = 1; + +query I +SELECT COUNT(*) FROM tbl; +---- +10 + +loop i 0 15 + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = ${i} * 10; +---- +analyzed_plan :.*Type: Index Scan.* + +endloop + +query I +SELECT * FROM tbl WHERE a = 10; +---- +10 + +query I +SELECT * FROM tbl WHERE a = 70; +---- +70 + +query I +SELECT * FROM tbl WHERE a = 140; +---- +140 + +query I +SELECT * FROM tbl WHERE a = 0; +---- + +query I +SELECT * FROM tbl WHERE a = 30; +---- + +query I +SELECT * FROM tbl WHERE a = 90; +---- + +statement ok +INSERT INTO tbl VALUES (150); + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 150; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 150; +---- +150 + +statement ok +INSERT INTO tbl VALUES (0); + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 0; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 0; +---- +0 + +query I +SELECT COUNT(*) FROM tbl; +---- +12 \ No newline at end of file diff --git a/test/sql/storage/wal/wal_index_large_batch_interleaved.test b/test/sql/storage/wal/wal_index_large_batch_interleaved.test new file mode 100644 index 000000000000..273671c36790 --- /dev/null +++ b/test/sql/storage/wal/wal_index_large_batch_interleaved.test @@ -0,0 +1,61 @@ +# name: test/sql/storage/wal/wal_index_large_batch_interleaved.test +# description: Test WAL replay with large interleaved insert/delete batches on indexed column +# group: [wal] + +load __TEST_DIR__/wal_index_large_batch_interleaved.test.db + +statement ok +SET index_scan_percentage = 1.0; + +statement ok +SET index_scan_max_count = 1; + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +SET checkpoint_threshold='1TB' + +statement ok +CREATE TABLE tbl(a INTEGER); + +statement ok +CREATE INDEX idx_a ON tbl(a); + +loop i 0 3 + +statement ok +INSERT INTO tbl SELECT r FROM range(${i}*10000, ${i}*10000 + 5120) t(r); + +statement ok +DELETE FROM tbl WHERE a >= ${i}*10000 AND a <= ${i}*10000 + 5000; + +endloop + +restart + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +SET checkpoint_threshold='1TB' + +statement ok +SET index_scan_percentage = 1.0; + +statement ok +SET index_scan_max_count = 1; + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 25010; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 25010; +---- +25010 + +query I +SELECT * FROM tbl WHERE a = 24999; +---- \ No newline at end of file diff --git a/test/sql/storage/wal/wal_index_replay.test b/test/sql/storage/wal/wal_index_replay.test new file mode 100644 index 000000000000..69af2f9f933f --- /dev/null +++ b/test/sql/storage/wal/wal_index_replay.test @@ -0,0 +1,74 @@ +# name: test/sql/storage/wal/wal_index_replay.test +# description: Test insert and delete replays with indexes +# group: [wal] + +statement ok +SET index_scan_max_count = 1; + +load __TEST_DIR__/index_replay_test.db + +statement ok +PRAGMA disable_checkpoint_on_shutdown + +statement ok +PRAGMA wal_autocheckpoint='1TB'; + +statement ok +CREATE TABLE tbl(a INTEGER); + +statement ok +CREATE INDEX idx_a ON tbl(a); + +statement ok +INSERT INTO tbl SELECT range FROM range(100); + +statement ok +DELETE FROM tbl WHERE a % 5 = 0; + +statement ok +INSERT INTO tbl SELECT range + 100 FROM range(50); + +query I +SELECT COUNT(*) FROM tbl; +---- +130 + +restart + +query I +SELECT COUNT(*) FROM tbl; +---- +130 + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 1; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 1; +---- +1 + +query II +EXPLAIN ANALYZE SELECT * FROM tbl WHERE a = 5; +---- +analyzed_plan :.*Type: Index Scan.* + +query I +SELECT * FROM tbl WHERE a = 5; +---- + +statement ok +INSERT INTO tbl VALUES (5); + +query I +SELECT * FROM tbl WHERE a = 5; +---- +5 + +query I +SELECT COUNT(*) FROM tbl; +---- +131 + diff --git a/test/sql/subquery/lateral/pg_lateral.test b/test/sql/subquery/lateral/pg_lateral.test index a570e76a0a65..3ce5a09a9647 100644 --- a/test/sql/subquery/lateral/pg_lateral.test +++ b/test/sql/subquery/lateral/pg_lateral.test @@ -192,7 +192,7 @@ CREATE TABLE tenk1 ( ); statement ok -COPY tenk1 FROM 'data/csv/tenk.tsv.gz' (DELIMITER '\t') +COPY tenk1 FROM '{DATA_DIR}/csv/tenk.tsv.gz' (DELIMITER '\t') query II rowsort select * from diff --git a/test/sql/table_function/read_text_and_blob.test b/test/sql/table_function/read_text_and_blob.test index 8aada1310a57..1b0d3d1eddb6 100644 --- a/test/sql/table_function/read_text_and_blob.test +++ b/test/sql/table_function/read_text_and_blob.test @@ -54,13 +54,29 @@ true # test parsing hive partitioning scheme query IIII -select parse_path(filename), size, part, date from read_blob('data/parquet-testing/hive-partitioning/simple/*/*/test.parquet') order by filename +select parse_path(filename)[-6:], size, part, date from read_blob('{DATA_DIR}/parquet-testing/hive-partitioning/simple/*/*/test.parquet') order by filename ---- -[data, parquet-testing, hive-partitioning, simple, 'part=a', 'date=2012-01-01', test.parquet] 266 a 2012-01-01 -[data, parquet-testing, hive-partitioning, simple, 'part=b', 'date=2013-01-01', test.parquet] 266 b 2013-01-01 +[parquet-testing, hive-partitioning, simple, 'part=a', 'date=2012-01-01', test.parquet] 266 a 2012-01-01 +[parquet-testing, hive-partitioning, simple, 'part=b', 'date=2013-01-01', test.parquet] 266 b 2013-01-01 query IIII -select parse_path(filename), size, part, date from read_text('data/parquet-testing/hive-partitioning/simple/*/*/test.parquet') order by filename +select parse_path(filename)[-6:], size, part, date from read_text('{DATA_DIR}/parquet-testing/hive-partitioning/simple/*/*/test.parquet') order by filename +---- +[parquet-testing, hive-partitioning, simple, 'part=a', 'date=2012-01-01', test.parquet] 266 a 2012-01-01 +[parquet-testing, hive-partitioning, simple, 'part=b', 'date=2013-01-01', test.parquet] 266 b 2013-01-01 + + +# Union by name is not supported +statement error +select filename from read_blob('.*', union_by_name := true); +---- +Binder Error: Invalid named parameter "union_by_name" for function read_blob + + +# Searching for non-existing remote file should return empty result set (like local files) +# This is probably wrong behavior in our httpfs implementation, but at least its consistent in this case. +require httpfs + +query I +select filename from read_blob('s3://does-not-exist1144/date=2025-10-11/file.parquet'); ---- -[data, parquet-testing, hive-partitioning, simple, 'part=a', 'date=2012-01-01', test.parquet] 266 a 2012-01-01 -[data, parquet-testing, hive-partitioning, simple, 'part=b', 'date=2013-01-01', test.parquet] 266 b 2013-01-01 diff --git a/test/sql/timezone/test_icu_timezone.test b/test/sql/timezone/test_icu_timezone.test index a1c775e4c934..56a03a0f19ff 100644 --- a/test/sql/timezone/test_icu_timezone.test +++ b/test/sql/timezone/test_icu_timezone.test @@ -196,6 +196,58 @@ infinity infinity 20:15:37.123456+00 NULL NULL NULL NULL NULL NULL +# +# Verify UTC±NNNN translation +# Note sign inversion (see https://en.wikipedia.org/wiki/Tz_database#Area) +# +foreach utc UTC-0800 UTC-08 UTC-8 UTC-08:00 + +statement ok +set TimeZone = '${utc}'; + +query I +select value from duckdb_settings() where name = 'TimeZone'; +---- +Etc/GMT+8 + +endloop + +foreach utc UTC+0500 UTC+05 UTC+5 UTC+05:00 + +statement ok +set TimeZone = '${utc}'; + +query I +select value from duckdb_settings() where name = 'TimeZone'; +---- +Etc/GMT-5 + +endloop + +foreach utc UTC+1400 UTC+14 UTC+14:00 + +statement ok +set TimeZone = '${utc}'; + +query I +select value from duckdb_settings() where name = 'TimeZone'; +---- +Etc/GMT-14 + +endloop + +foreach utc UTC-0 UTC-00 UTC-000 UTC-0000 UTC+0 UTC+00 UTC+000 UTC+0000 + +statement ok +set TimeZone = '${utc}'; + +query I +select value from duckdb_settings() where name = 'TimeZone'; +---- +Etc/GMT+0 + +endloop + # # Verify local time functions are implemented. # diff --git a/test/sql/topn/tpcds_q14_topn.test b/test/sql/topn/tpcds_q14_topn.test index e1de6b0b5f8e..1810dcefe5c7 100644 --- a/test/sql/topn/tpcds_q14_topn.test +++ b/test/sql/topn/tpcds_q14_topn.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE final(channel VARCHAR, i_brand_id INTEGER, i_class_id INTEGER, i_category_id INTEGER, sum_sales DECIMAL(38,2), number_sales HUGEINT); statement ok -COPY final FROM 'data/csv/tpcds_14.csv' (FORMAT CSV, DELIMITER '|', HEADER); +COPY final FROM '{DATA_DIR}/csv/tpcds_14.csv' (FORMAT CSV, DELIMITER '|', HEADER); query IIIIII SELECT * FROM final diff --git a/test/sql/topn/tpcds_q59_topn.test b/test/sql/topn/tpcds_q59_topn.test index 32e348a9bb84..5553ab7d5ee9 100644 --- a/test/sql/topn/tpcds_q59_topn.test +++ b/test/sql/topn/tpcds_q59_topn.test @@ -9,7 +9,7 @@ statement ok CREATE TABLE final(s_store_name1 VARCHAR, s_store_id1 VARCHAR, d_week_seq1 INTEGER, "sun_sales1 / sun_sales2" DOUBLE, "mon_sales1 / mon_sales2" DOUBLE, "tue_sales1 / tue_sales2" DOUBLE, "wed_sales1 / wed_sales2" DOUBLE, "thu_sales1 / thu_sales2" DOUBLE, "fri_sales1 / fri_sales2" DOUBLE, "sat_sales1 / sat_sales2" DOUBLE); statement ok -COPY final FROM 'data/csv/tpcds_59.csv' (FORMAT CSV, DELIMITER '|', HEADER); +COPY final FROM '{DATA_DIR}/csv/tpcds_59.csv' (FORMAT CSV, DELIMITER '|', HEADER); query IIIIIIIIII SELECT * diff --git a/test/sql/types/list/const_struct_null_bug.test_slow b/test/sql/types/list/const_struct_null_bug.test_slow index 282d718ca38c..54547bfdfac6 100644 --- a/test/sql/types/list/const_struct_null_bug.test_slow +++ b/test/sql/types/list/const_struct_null_bug.test_slow @@ -10,7 +10,7 @@ query III SELECT hits_0.access.page."pageTitle" as "pageTitle", COUNT(DISTINCT CONCAT(ga_sessions."__distinct_key", 'x', hits_0.__row_id)) as "hits_count", COUNT(DISTINCT CASE WHEN product_0.access."productQuantity">0 THEN CONCAT(ga_sessions."__distinct_key", 'x', hits_0."__row_id") END) as "sold_count" -FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM 'data/parquet-testing/issue_6013.parquet' as x) as ga_sessions, +FROM (SELECT GEN_RANDOM_UUID() as __distinct_key, * FROM '{DATA_DIR}/parquet-testing/issue_6013.parquet' as x) as ga_sessions, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(ga_sessions.hits)) as x(access)) as hits_0, (SELECT GEN_RANDOM_UUID() as __row_id, x.access FROM (SELECT UNNEST(hits_0.access.product)) as x(access)) as product_0 GROUP BY 1 ORDER BY ALL LIMIT 2; diff --git a/test/sql/types/nested/list/list_aggregate_dict.test b/test/sql/types/nested/list/list_aggregate_dict.test index 68012ec82da3..4499af75080e 100644 --- a/test/sql/types/nested/list/list_aggregate_dict.test +++ b/test/sql/types/nested/list/list_aggregate_dict.test @@ -2,7 +2,7 @@ # description: Test lists with aggregations on a table with dictionary compression # group: [list] -load __TEST_DIR__/store_dict.db +load {TEMP_DIR}/store_dict.db statement ok pragma force_compression='dictionary'; @@ -11,7 +11,7 @@ statement ok CREATE TABLE Hosts (ips varchar[]); statement ok -INSERT INTO Hosts SELECT * FROM 'data/csv/ips.csv.gz'; +INSERT INTO Hosts SELECT * FROM '{DATA_DIR}/csv/ips.csv.gz'; query I SELECT min(list_string_agg(ips)) FROM Hosts diff --git a/test/sql/types/nested/unnest_range_plan.test b/test/sql/types/nested/unnest_range_plan.test new file mode 100644 index 000000000000..565f0d327c2f --- /dev/null +++ b/test/sql/types/nested/unnest_range_plan.test @@ -0,0 +1,13 @@ +# name: test/sql/types/nested/unnest_range_plan.test +# group: [nested] + +# Fix issue #19645 + +query I rowsort +SELECT * +FROM UNNEST(ARRAY[6]) AS x +UNION ALL +SELECT 2 FROM generate_series(1, 1); +---- +2 +6 \ No newline at end of file diff --git a/test/sql/update/string_update_issue_2471.test_slow b/test/sql/update/string_update_issue_2471.test_slow index e18caefafeba..0a1e6a0fc43f 100644 --- a/test/sql/update/string_update_issue_2471.test_slow +++ b/test/sql/update/string_update_issue_2471.test_slow @@ -3,7 +3,7 @@ # group: [update] statement ok -CREATE VIEW test_table_view AS SELECT * FROM 'data/csv/issue2471.csv' +CREATE VIEW test_table_view AS SELECT * FROM '{DATA_DIR}/csv/issue2471.csv' statement ok create table test_table (isin VARCHAR(20), value VARCHAR(1)); diff --git a/test/sql/upsert/upsert_duplicates_issue.test b/test/sql/upsert/upsert_duplicates_issue.test index 6202aa69f635..8fa65085a156 100644 --- a/test/sql/upsert/upsert_duplicates_issue.test +++ b/test/sql/upsert/upsert_duplicates_issue.test @@ -11,7 +11,7 @@ CREATE TABLE tmp_edges(from_v VARCHAR, to_v VARCHAR, PRIMARY KEY(from_v, to_v)); statement ok INSERT INTO tmp_edges - from 'data/parquet-testing/upsert_bug.parquet' + from '{DATA_DIR}/parquet-testing/upsert_bug.parquet' ON CONFLICT DO UPDATE SET from_v = excluded.from_v ; diff --git a/test/sql/window/test_streaming_window.test b/test/sql/window/test_streaming_window.test index 5f2955b60c64..c41272c4aab6 100644 --- a/test/sql/window/test_streaming_window.test +++ b/test/sql/window/test_streaming_window.test @@ -311,22 +311,22 @@ physical_plan :.*STREAMING_WINDOW.* query TT explain select first_value(i) over (), last_value(i) over () from integers ---- -physical_plan :.*STREAMING_WINDOW.* WINDOW .* +physical_plan :.* WINDOW .*STREAMING_WINDOW.* query TT explain select last_value(i) over (), first_value(i) over () from integers ---- -physical_plan :.*STREAMING_WINDOW.* WINDOW .* +physical_plan :.* WINDOW .*STREAMING_WINDOW.* query TT explain select first_value(i) over (), last_value(i) over (order by j) from integers ---- -physical_plan :.*STREAMING_WINDOW.* WINDOW .* +physical_plan :.* WINDOW .*STREAMING_WINDOW.* query TT explain select last_value(i) over (order by j), first_value(i) over () from integers ---- -physical_plan :.*STREAMING_WINDOW.* WINDOW .* +physical_plan :.* WINDOW .*STREAMING_WINDOW.* # # Global state tests from #3275 diff --git a/test/sql/window/test_thread_count.test b/test/sql/window/test_thread_count.test index 517521fd3313..09ead4f1796e 100644 --- a/test/sql/window/test_thread_count.test +++ b/test/sql/window/test_thread_count.test @@ -8,8 +8,8 @@ select row_number() over () , sum(count_ch1_ch2) over () as bigram_count_all , count_ch1_ch2 / bigram_count_all as actual_bigram_frequency , ch1.actual_frequency * ch2.actual_frequency as expected_bigram_frequency -from read_csv('data/csv/issue_13525/stat_bigrams.csv') as stat_bigrams -inner join read_csv('data/csv/issue_13525/stat_stat_chars.csv') as ch1 +from read_csv('{DATA_DIR}/csv/issue_13525/stat_bigrams.csv') as stat_bigrams +inner join read_csv('{DATA_DIR}/csv/issue_13525/stat_stat_chars.csv') as ch1 on stat_bigrams.ascii_upper_ch1 = ch1.ascii_upper_ch -inner join read_csv('data/csv/issue_13525/stat_stat_chars.csv') as ch2 +inner join read_csv('{DATA_DIR}/csv/issue_13525/stat_stat_chars.csv') as ch2 on stat_bigrams.ascii_upper_ch2 = ch2.ascii_upper_ch; diff --git a/test/sql/window/test_tpcc_results.test b/test/sql/window/test_tpcc_results.test index 4a7b52627363..856c0eddc022 100644 --- a/test/sql/window/test_tpcc_results.test +++ b/test/sql/window/test_tpcc_results.test @@ -18,7 +18,7 @@ select first_value(dbsystem order by tps desc) over w AS best_system, lead(tps order by tps desc) over w AS second_performance, lead(dbsystem order by tps desc, dbsystem) over w AS second_system, -from 'data/csv/tpcc_results.csv' +from '{DATA_DIR}/csv/tpcc_results.csv' window w as ( order by submission_date range between unbounded preceding and current row diff --git a/test/sql/window/window_valid_end.test_slow b/test/sql/window/window_valid_end.test_slow index 276d8220ab24..17963a0a4eac 100644 --- a/test/sql/window/window_valid_end.test_slow +++ b/test/sql/window/window_valid_end.test_slow @@ -26,7 +26,7 @@ select * from range between 1 following and 1 following ) ) as recompute_next_year, - from 'data/csv/issue_16098.csv' + from '{DATA_DIR}/csv/issue_16098.csv' ) where next_year is distinct from recompute_next_year order by all; diff --git a/test/sqlite/sqllogic_test_runner.cpp b/test/sqlite/sqllogic_test_runner.cpp index 72559950dfa3..f6d51bfd49d2 100644 --- a/test/sqlite/sqllogic_test_runner.cpp +++ b/test/sqlite/sqllogic_test_runner.cpp @@ -224,24 +224,20 @@ string SQLLogicTestRunner::LoopReplacement(string text, const vector #include +#include #include using namespace duckdb; @@ -37,18 +38,18 @@ template static void testRunner() { // this is an ugly hack that uses the test case name to pass the script file // name if someone has a better idea... - auto name = Catch::getResultCapture().getCurrentTestName(); - + const auto name = Catch::getResultCapture().getCurrentTestName(); + const auto test_dir_path = TestDirectoryPath(); // can vary between tests, and does IO auto &test_config = TestConfiguration::Get(); string initial_dbpath = test_config.GetInitialDBPath(); test_config.ProcessPath(initial_dbpath, name); if (!initial_dbpath.empty()) { - auto test_path = StringUtil::Replace(initial_dbpath, TestDirectoryPath(), string()); + auto test_path = StringUtil::Replace(initial_dbpath, test_dir_path, string()); test_path = StringUtil::Replace(test_path, "\\", "/"); auto components = StringUtil::Split(test_path, "/"); components.pop_back(); - string total_path = TestDirectoryPath(); + string total_path = test_dir_path; for (auto &component : components) { if (component.empty()) { continue; @@ -61,11 +62,6 @@ static void testRunner() { runner.output_sql = Catch::getCurrentContext().getConfig()->outputSQL(); runner.enable_verification = VERIFICATION; - // Copy configured env vars - for (auto &kv : test_config.GetTestEnvMap()) { - runner.environment_variables[kv.first] = kv.second; - } - string prev_directory; // We assume the test working dir for extensions to be one dir above the test/sql. Note that this is very hacky. @@ -74,16 +70,24 @@ static void testRunner() { if (AUTO_SWITCH_TEST_DIR) { prev_directory = TestGetCurrentDirectory(); - std::size_t found = name.rfind("test/sql"); + std::size_t found = name.rfind("/test/sql"); if (found == std::string::npos) { throw InvalidInputException("Failed to auto detect working dir for test '" + name + "' because a non-standard path was used!"); } auto test_working_dir = name.substr(0, found); + test_config.ChangeWorkingDirectory(test_working_dir); + } - // Parse the test dir automatically - TestChangeDirectory(test_working_dir); + // setup this test runner with Config-based env, then override with ephemerals (only WORKING_DIR at this point) + for (auto &kv : test_config.GetTestEnvMap()) { + runner.environment_variables[kv.first] = kv.second; } + // Per runner vars + runner.environment_variables["WORKING_DIR"] = TestGetCurrentDirectory(); + runner.environment_variables["TEST_NAME"] = name; + runner.environment_variables["TEST_NAME__NO_SLASH"] = StringUtil::Replace(name, "/", "_"); + try { runner.ExecuteFile(name); } catch (...) { @@ -91,7 +95,7 @@ static void testRunner() { } if (AUTO_SWITCH_TEST_DIR) { - TestChangeDirectory(prev_directory); + test_config.ChangeWorkingDirectory(prev_directory); } auto on_cleanup = test_config.OnCleanupCommand(); diff --git a/test/unittest.cpp b/test/unittest.cpp index ad0d0dc5c4c6..6db4c7a41938 100644 --- a/test/unittest.cpp +++ b/test/unittest.cpp @@ -39,8 +39,8 @@ int main(int argc_in, char *argv[]) { new_argc++; } } + test_config.ChangeWorkingDirectory(test_directory); - TestChangeDirectory(test_directory); // delete the testing directory if it exists auto dir = TestCreatePath(""); try { diff --git a/third_party/catch/catch.hpp b/third_party/catch/catch.hpp index 23334dffa683..8a738b70c80a 100644 --- a/third_party/catch/catch.hpp +++ b/third_party/catch/catch.hpp @@ -16428,18 +16428,21 @@ class ConsoleAssertionPrinter { bool printInfoMessages; }; -std::size_t makeRatio(std::size_t number, std::size_t total) { - std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number / total : 0; - return (ratio == 0 && number > 0) ? 1 : ratio; +std::size_t makeRatio(std::uint64_t number, std::uint64_t total) { + const auto ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number / total : 0; + return (ratio == 0 && number > 0) ? 1 : static_cast(ratio); } -std::size_t& findMax(std::size_t& i, std::size_t& j, std::size_t& k) { - if (i > j && i > k) +std::size_t& +findMax( std::size_t& i, std::size_t& j, std::size_t& k, std::size_t& l ) { + if (i > j && i > k && i > l) return i; - else if (j > k) + else if (j > k && j > l) return j; - else + else if (k > l) return k; + else + return l; } struct ColumnInfo { @@ -16924,13 +16927,15 @@ void ConsoleReporter::printSummaryRow(std::string const& label, std::vector 0) { - std::size_t failedRatio = makeRatio(totals.testCases.failed, totals.testCases.total()); - std::size_t failedButOkRatio = makeRatio(totals.testCases.failedButOk + totals.skippedTests, totals.testCases.total()); - std::size_t passedRatio = makeRatio(totals.testCases.passed - totals.skippedTests, totals.testCases.total()); - while (failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH - 1) - findMax(failedRatio, failedButOkRatio, passedRatio)++; - while (failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH - 1) - findMax(failedRatio, failedButOkRatio, passedRatio)--; + const std::size_t total = totals.testCases.total() + totals.skippedTests; + std::size_t failedRatio = makeRatio(totals.testCases.failed, total); + std::size_t failedButOkRatio = makeRatio(totals.testCases.failedButOk, total); + std::size_t passedRatio = makeRatio(totals.testCases.passed, total); + std::size_t skippedRatio = makeRatio(totals.skippedTests, total); + while (failedRatio + failedButOkRatio + passedRatio + skippedRatio < CATCH_CONFIG_CONSOLE_WIDTH - 1) + findMax(failedRatio, failedButOkRatio, passedRatio, skippedRatio)++; + while (failedRatio + failedButOkRatio + passedRatio + skippedRatio > CATCH_CONFIG_CONSOLE_WIDTH - 1) + findMax(failedRatio, failedButOkRatio, passedRatio, skippedRatio)--; stream << Colour(Colour::Error) << std::string(failedRatio, '='); stream << Colour(Colour::ResultExpectedFailure) << std::string(failedButOkRatio, '='); diff --git a/third_party/mbedtls/include/mbedtls_wrapper.hpp b/third_party/mbedtls/include/mbedtls_wrapper.hpp index d9f8111d8095..e6e97a71791c 100644 --- a/third_party/mbedtls/include/mbedtls_wrapper.hpp +++ b/third_party/mbedtls/include/mbedtls_wrapper.hpp @@ -81,6 +81,7 @@ class AESStateMBEDTLS : public duckdb::EncryptionState { DUCKDB_API void GenerateRandomData(duckdb::data_ptr_t data, duckdb::idx_t len) override; DUCKDB_API void FinalizeGCM(duckdb::data_ptr_t tag, duckdb::idx_t tag_len); DUCKDB_API const mbedtls_cipher_info_t *GetCipher(size_t key_len); + DUCKDB_API static void SecureClearData(duckdb::data_ptr_t data, duckdb::idx_t len); private: DUCKDB_API void InitializeInternal(duckdb::const_data_ptr_t iv, duckdb::idx_t iv_len, duckdb::const_data_ptr_t aad, duckdb::idx_t aad_len); @@ -98,6 +99,10 @@ class AESStateMBEDTLS : public duckdb::EncryptionState { } ~AESStateMBEDTLSFactory() override {} // + + DUCKDB_API bool SupportsEncryption() override { + return false; + } }; }; diff --git a/third_party/mbedtls/mbedtls_wrapper.cpp b/third_party/mbedtls/mbedtls_wrapper.cpp index 7dc0af7fd199..3a6ce981ed9a 100644 --- a/third_party/mbedtls/mbedtls_wrapper.cpp +++ b/third_party/mbedtls/mbedtls_wrapper.cpp @@ -271,6 +271,10 @@ const mbedtls_cipher_info_t *MbedTlsWrapper::AESStateMBEDTLS::GetCipher(size_t k } } +void MbedTlsWrapper::AESStateMBEDTLS::SecureClearData(duckdb::data_ptr_t data, duckdb::idx_t len) { + mbedtls_platform_zeroize(data, len); +} + MbedTlsWrapper::AESStateMBEDTLS::AESStateMBEDTLS(duckdb::EncryptionTypes::CipherType cipher_p, duckdb::idx_t key_len) : EncryptionState(cipher_p, key_len), context(duckdb::make_uniq()) { mbedtls_cipher_init(context.get()); @@ -296,20 +300,12 @@ MbedTlsWrapper::AESStateMBEDTLS::~AESStateMBEDTLS() { } } -void MbedTlsWrapper::AESStateMBEDTLS::GenerateRandomDataStatic(duckdb::data_ptr_t data, duckdb::idx_t len) { - duckdb::RandomEngine random_engine; - - while (len) { - const auto random_integer = random_engine.NextRandomInteger(); - const auto next = duckdb::MinValue(len, sizeof(random_integer)); - memcpy(data, duckdb::const_data_ptr_cast(&random_integer), next); - data += next; - len -= next; - } +static void ThrowInsecureRNG() { + throw duckdb::InvalidConfigurationException("DuckDB requires a secure random engine to be loaded to enable secure crypto. Normally, this will be handled automatically by DuckDB by autoloading the `httpfs` Extension, but that seems to have failed. Please ensure the httpfs extension is loaded manually using `LOAD httpfs`."); } void MbedTlsWrapper::AESStateMBEDTLS::GenerateRandomData(duckdb::data_ptr_t data, duckdb::idx_t len) { - GenerateRandomDataStatic(data, len); + ThrowInsecureRNG(); } void MbedTlsWrapper::AESStateMBEDTLS::InitializeInternal(duckdb::const_data_ptr_t iv, duckdb::idx_t iv_len, duckdb::const_data_ptr_t aad, duckdb::idx_t aad_len){ @@ -325,16 +321,7 @@ void MbedTlsWrapper::AESStateMBEDTLS::InitializeInternal(duckdb::const_data_ptr_ } void MbedTlsWrapper::AESStateMBEDTLS::InitializeEncryption(duckdb::const_data_ptr_t iv, duckdb::idx_t iv_len, duckdb::const_data_ptr_t key, duckdb::idx_t key_len_p, duckdb::const_data_ptr_t aad, duckdb::idx_t aad_len) { - mode = duckdb::EncryptionTypes::ENCRYPT; - - if (key_len_p != key_len) { - throw duckdb::InternalException("Invalid encryption key length, expected %llu, got %llu", key_len, key_len_p); - } - if (mbedtls_cipher_setkey(context.get(), key, key_len * 8, MBEDTLS_ENCRYPT)) { - throw runtime_error("Failed to set AES key for encryption"); - } - - InitializeInternal(iv, iv_len, aad, aad_len); + ThrowInsecureRNG(); } void MbedTlsWrapper::AESStateMBEDTLS::InitializeDecryption(duckdb::const_data_ptr_t iv, duckdb::idx_t iv_len, duckdb::const_data_ptr_t key, duckdb::idx_t key_len_p, duckdb::const_data_ptr_t aad, duckdb::idx_t aad_len) { diff --git a/third_party/parquet/generate.sh b/third_party/parquet/generate.sh index c658827f239d..d0ecc8e449ff 100755 --- a/third_party/parquet/generate.sh +++ b/third_party/parquet/generate.sh @@ -3,6 +3,25 @@ rm -rf gen-cpp thrift --gen "cpp:moveable_types,no_default_operators=true" parquet.thrift cp gen-cpp/* . sed -i .bak -e "s/std::vector/duckdb::vector/" parquet_types.* +sed -i .bak -e "s/namespace parquet/namespace duckdb_parquet/" parquet_types.* sed -i .bak -e 's/namespace duckdb_parquet {/#include "windows_compatibility.h"\nnamespace apache = duckdb_apache;\n\nnamespace duckdb_parquet {/' parquet_types.h +sed -i .bak '/namespace duckdb_parquet {/a\ +\ +template \ +static typename ENUM::type SafeEnumCast(const std::map &values_to_names, const int &ecast) {\ + if (values_to_names.find(ecast) == values_to_names.end()) {\ + throw duckdb_apache::thrift::protocol::TProtocolException(duckdb_apache::thrift::protocol::TProtocolException::INVALID_DATA);\ + }\ + return static_cast(ecast);\ +}\ +' parquet_types.cpp +sed -i .bak -e 's/static_cast(/SafeEnumCast(_Type_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_ConvertedType_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_FieldRepetitionType_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_EdgeInterpolationAlgorithm_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_Encoding_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_CompressionCodec_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_PageType_VALUES_TO_NAMES, /' parquet_types.* +sed -i .bak -e 's/static_cast(/SafeEnumCast(_BoundaryOrder_VALUES_TO_NAMES, /' parquet_types.* rm *.bak rm -rf gen-cpp \ No newline at end of file diff --git a/third_party/parquet/parquet.thrift b/third_party/parquet/parquet.thrift index 41b5dc912d33..c88ab1e03a8b 100644 --- a/third_party/parquet/parquet.thrift +++ b/third_party/parquet/parquet.thrift @@ -175,6 +175,14 @@ enum ConvertedType { * particular timezone or date. */ INTERVAL = 21; + + /** + * Non-standard NULL value + * + * This was written by old writers - it is kept here for compatibility purposes. + * See https://github.com/duckdb/duckdb/pull/11774 + */ + PARQUET_NULL = 24; } /** diff --git a/third_party/parquet/parquet_types.cpp b/third_party/parquet/parquet_types.cpp index 95cfbc3f7eb3..a508a69f2bd1 100644 --- a/third_party/parquet/parquet_types.cpp +++ b/third_party/parquet/parquet_types.cpp @@ -1,5 +1,5 @@ /** - * Autogenerated by Thrift Compiler (0.21.0) + * Autogenerated by Thrift Compiler (0.22.0) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated @@ -13,6 +13,14 @@ namespace duckdb_parquet { +template +static typename ENUM::type SafeEnumCast(const std::map &values_to_names, const int &ecast) { + if (values_to_names.find(ecast) == values_to_names.end()) { + throw duckdb_apache::thrift::protocol::TProtocolException(duckdb_apache::thrift::protocol::TProtocolException::INVALID_DATA); + } + return static_cast(ecast); +} + int _kTypeValues[] = { Type::BOOLEAN, Type::INT32, @@ -176,7 +184,14 @@ int _kConvertedTypeValues[] = { * the provided duration. This duration of time is independent of any * particular timezone or date. */ - ConvertedType::INTERVAL + ConvertedType::INTERVAL, + /** + * Non-standard NULL value + * + * This was written by old writers - it is kept here for compatibility purposes. + * See https://github.com/duckdb/duckdb/pull/11774 + */ + ConvertedType::PARQUET_NULL }; const char* _kConvertedTypeNames[] = { /** @@ -300,9 +315,16 @@ const char* _kConvertedTypeNames[] = { * the provided duration. This duration of time is independent of any * particular timezone or date. */ - "INTERVAL" + "INTERVAL", + /** + * Non-standard NULL value + * + * This was written by old writers - it is kept here for compatibility purposes. + * See https://github.com/duckdb/duckdb/pull/11774 + */ + "PARQUET_NULL" }; -const std::map _ConvertedType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(22, _kConvertedTypeValues, _kConvertedTypeNames), ::apache::thrift::TEnumIterator(-1, nullptr, nullptr)); +const std::map _ConvertedType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(23, _kConvertedTypeValues, _kConvertedTypeNames), ::apache::thrift::TEnumIterator(-1, nullptr, nullptr)); std::ostream& operator<<(std::ostream& out, const ConvertedType::type& val) { std::map::const_iterator it = _ConvertedType_VALUES_TO_NAMES.find(val); @@ -3446,7 +3468,7 @@ GeographyType::~GeographyType() noexcept { GeographyType::GeographyType() noexcept : crs(), - algorithm(static_cast(0)) { + algorithm(SafeEnumCast(_EdgeInterpolationAlgorithm_VALUES_TO_NAMES, 0)) { } void GeographyType::__set_crs(const std::string& val) { @@ -3498,7 +3520,7 @@ uint32_t GeographyType::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast114; xfer += iprot->readI32(ecast114); - this->algorithm = static_cast(ecast114); + this->algorithm = SafeEnumCast(_EdgeInterpolationAlgorithm_VALUES_TO_NAMES, ecast114); this->__isset.algorithm = true; } else { xfer += iprot->skip(ftype); @@ -4067,12 +4089,12 @@ SchemaElement::~SchemaElement() noexcept { } SchemaElement::SchemaElement() noexcept - : type(static_cast(0)), + : type(SafeEnumCast(_Type_VALUES_TO_NAMES, 0)), type_length(0), - repetition_type(static_cast(0)), + repetition_type(SafeEnumCast(_FieldRepetitionType_VALUES_TO_NAMES, 0)), name(), num_children(0), - converted_type(static_cast(0)), + converted_type(SafeEnumCast(_ConvertedType_VALUES_TO_NAMES, 0)), scale(0), precision(0), field_id(0) { @@ -4159,7 +4181,7 @@ uint32_t SchemaElement::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast123; xfer += iprot->readI32(ecast123); - this->type = static_cast(ecast123); + this->type = SafeEnumCast(_Type_VALUES_TO_NAMES, ecast123); this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -4177,7 +4199,7 @@ uint32_t SchemaElement::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast124; xfer += iprot->readI32(ecast124); - this->repetition_type = static_cast(ecast124); + this->repetition_type = SafeEnumCast(_FieldRepetitionType_VALUES_TO_NAMES, ecast124); this->__isset.repetition_type = true; } else { xfer += iprot->skip(ftype); @@ -4203,7 +4225,7 @@ uint32_t SchemaElement::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast125; xfer += iprot->readI32(ecast125); - this->converted_type = static_cast(ecast125); + this->converted_type = SafeEnumCast(_ConvertedType_VALUES_TO_NAMES, ecast125); this->__isset.converted_type = true; } else { xfer += iprot->skip(ftype); @@ -4405,9 +4427,9 @@ DataPageHeader::~DataPageHeader() noexcept { DataPageHeader::DataPageHeader() noexcept : num_values(0), - encoding(static_cast(0)), - definition_level_encoding(static_cast(0)), - repetition_level_encoding(static_cast(0)) { + encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)), + definition_level_encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)), + repetition_level_encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)) { } void DataPageHeader::__set_num_values(const int32_t val) { @@ -4474,7 +4496,7 @@ uint32_t DataPageHeader::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast130; xfer += iprot->readI32(ecast130); - this->encoding = static_cast(ecast130); + this->encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast130); isset_encoding = true; } else { xfer += iprot->skip(ftype); @@ -4484,7 +4506,7 @@ uint32_t DataPageHeader::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast131; xfer += iprot->readI32(ecast131); - this->definition_level_encoding = static_cast(ecast131); + this->definition_level_encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast131); isset_definition_level_encoding = true; } else { xfer += iprot->skip(ftype); @@ -4494,7 +4516,7 @@ uint32_t DataPageHeader::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast132; xfer += iprot->readI32(ecast132); - this->repetition_level_encoding = static_cast(ecast132); + this->repetition_level_encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast132); isset_repetition_level_encoding = true; } else { xfer += iprot->skip(ftype); @@ -4697,7 +4719,7 @@ DictionaryPageHeader::~DictionaryPageHeader() noexcept { DictionaryPageHeader::DictionaryPageHeader() noexcept : num_values(0), - encoding(static_cast(0)), + encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)), is_sorted(0) { } @@ -4755,7 +4777,7 @@ uint32_t DictionaryPageHeader::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast141; xfer += iprot->readI32(ecast141); - this->encoding = static_cast(ecast141); + this->encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast141); isset_encoding = true; } else { xfer += iprot->skip(ftype); @@ -4859,7 +4881,7 @@ DataPageHeaderV2::DataPageHeaderV2() noexcept : num_values(0), num_nulls(0), num_rows(0), - encoding(static_cast(0)), + encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)), definition_levels_byte_length(0), repetition_levels_byte_length(0), is_compressed(true) { @@ -4960,7 +4982,7 @@ uint32_t DataPageHeaderV2::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast146; xfer += iprot->readI32(ecast146); - this->encoding = static_cast(ecast146); + this->encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast146); isset_encoding = true; } else { xfer += iprot->skip(ftype); @@ -5867,7 +5889,7 @@ PageHeader::~PageHeader() noexcept { } PageHeader::PageHeader() noexcept - : type(static_cast(0)), + : type(SafeEnumCast(_PageType_VALUES_TO_NAMES, 0)), uncompressed_page_size(0), compressed_page_size(0), crc(0) { @@ -5944,7 +5966,7 @@ uint32_t PageHeader::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast179; xfer += iprot->readI32(ecast179); - this->type = static_cast(ecast179); + this->type = SafeEnumCast(_PageType_VALUES_TO_NAMES, ecast179); isset_type = true; } else { xfer += iprot->skip(ftype); @@ -6435,8 +6457,8 @@ PageEncodingStats::~PageEncodingStats() noexcept { } PageEncodingStats::PageEncodingStats() noexcept - : page_type(static_cast(0)), - encoding(static_cast(0)), + : page_type(SafeEnumCast(_PageType_VALUES_TO_NAMES, 0)), + encoding(SafeEnumCast(_Encoding_VALUES_TO_NAMES, 0)), count(0) { } @@ -6486,7 +6508,7 @@ uint32_t PageEncodingStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast192; xfer += iprot->readI32(ecast192); - this->page_type = static_cast(ecast192); + this->page_type = SafeEnumCast(_PageType_VALUES_TO_NAMES, ecast192); isset_page_type = true; } else { xfer += iprot->skip(ftype); @@ -6496,7 +6518,7 @@ uint32_t PageEncodingStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast193; xfer += iprot->readI32(ecast193); - this->encoding = static_cast(ecast193); + this->encoding = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast193); isset_encoding = true; } else { xfer += iprot->skip(ftype); @@ -6593,8 +6615,8 @@ ColumnMetaData::~ColumnMetaData() noexcept { } ColumnMetaData::ColumnMetaData() noexcept - : type(static_cast(0)), - codec(static_cast(0)), + : type(SafeEnumCast(_Type_VALUES_TO_NAMES, 0)), + codec(SafeEnumCast(_CompressionCodec_VALUES_TO_NAMES, 0)), num_values(0), total_uncompressed_size(0), total_compressed_size(0), @@ -6721,7 +6743,7 @@ uint32_t ColumnMetaData::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast198; xfer += iprot->readI32(ecast198); - this->type = static_cast(ecast198); + this->type = SafeEnumCast(_Type_VALUES_TO_NAMES, ecast198); isset_type = true; } else { xfer += iprot->skip(ftype); @@ -6740,7 +6762,7 @@ uint32_t ColumnMetaData::read(::apache::thrift::protocol::TProtocol* iprot) { { int32_t ecast204; xfer += iprot->readI32(ecast204); - this->encodings[_i203] = static_cast(ecast204); + this->encodings[_i203] = SafeEnumCast(_Encoding_VALUES_TO_NAMES, ecast204); } xfer += iprot->readListEnd(); } @@ -6773,7 +6795,7 @@ uint32_t ColumnMetaData::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast210; xfer += iprot->readI32(ecast210); - this->codec = static_cast(ecast210); + this->codec = SafeEnumCast(_CompressionCodec_VALUES_TO_NAMES, ecast210); isset_codec = true; } else { xfer += iprot->skip(ftype); @@ -8651,7 +8673,7 @@ ColumnIndex::~ColumnIndex() noexcept { } ColumnIndex::ColumnIndex() noexcept - : boundary_order(static_cast(0)) { + : boundary_order(SafeEnumCast(_BoundaryOrder_VALUES_TO_NAMES, 0)) { } void ColumnIndex::__set_null_pages(const duckdb::vector & val) { @@ -8780,7 +8802,7 @@ uint32_t ColumnIndex::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_I32) { int32_t ecast310; xfer += iprot->readI32(ecast310); - this->boundary_order = static_cast(ecast310); + this->boundary_order = SafeEnumCast(_BoundaryOrder_VALUES_TO_NAMES, ecast310); isset_boundary_order = true; } else { xfer += iprot->skip(ftype); diff --git a/third_party/parquet/parquet_types.h b/third_party/parquet/parquet_types.h index a872a3d6b0dc..762d3533aaf9 100644 --- a/third_party/parquet/parquet_types.h +++ b/third_party/parquet/parquet_types.h @@ -1,5 +1,5 @@ /** - * Autogenerated by Thrift Compiler (0.21.0) + * Autogenerated by Thrift Compiler (0.22.0) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated @@ -178,7 +178,14 @@ struct ConvertedType { * the provided duration. This duration of time is independent of any * particular timezone or date. */ - INTERVAL = 21 + INTERVAL = 21, + /** + * Non-standard NULL value + * + * This was written by old writers - it is kept here for compatibility purposes. + * See https://github.com/duckdb/duckdb/pull/11774 + */ + PARQUET_NULL = 24 }; };