Skip to content

Commit

Permalink
Merge branch 'main' into feature/cli-command-improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
Brijeshthummar02 authored Mar 6, 2025
2 parents df82021 + 6451189 commit c7bb843
Show file tree
Hide file tree
Showing 14 changed files with 244 additions and 236 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/docker-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ jobs:
uses: docker/setup-qemu-action@v3

- name: Login to Docker Hub
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ github.event.inputs.username }}
password: ${{ secrets.DOCKER_REPOSITORY_PASSWORD }}

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3

- uses: actions/checkout@v4

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/python-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ jobs:
needs: changes
if: needs.changes.outputs.source_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 45
timeout-minutes: 60
strategy:
matrix:
# Integration test for AMD64 architecture
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/trino-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ jobs:
run: |
./gradlew -PskipTests -PtestMode=embedded -PjdkVersion=${{ matrix.java-version }} -PskipDockerTests=false :trino-connector:integration-test:test
./gradlew -PskipTests -PtestMode=deploy -PjdkVersion=${{ matrix.java-version }} -PskipDockerTests=false :trino-connector:integration-test:test
trino-connector/integration-test/trino-test-tools/run_test.sh
# Disable the Trino cascading query integration test, because the connector jars are private now.
#trino-connector/integration-test/trino-test-tools/run_test.sh
- name: Upload integrate tests reports
uses: actions/upload-artifact@v4
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,16 +56,14 @@ public class OSSFileSystemProvider implements FileSystemProvider, SupportsCreden

@Override
public FileSystem getFileSystem(Path path, Map<String, String> config) throws IOException {
Configuration configuration = new Configuration();

Map<String, String> hadoopConfMap =
FileSystemUtils.toHadoopConfigMap(config, GRAVITINO_KEY_TO_OSS_HADOOP_KEY);
// OSS do not use service loader to load the file system, so we need to set the impl class
if (!hadoopConfMap.containsKey(OSS_FILESYSTEM_IMPL)) {
hadoopConfMap.put(OSS_FILESYSTEM_IMPL, AliyunOSSFileSystem.class.getCanonicalName());
}

hadoopConfMap.forEach(configuration::set);
Configuration configuration = FileSystemUtils.createConfiguration(hadoopConfMap);

return AliyunOSSFileSystem.newInstance(path.toUri(), configuration);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,18 +62,17 @@ public class S3FileSystemProvider implements FileSystemProvider, SupportsCredent

@Override
public FileSystem getFileSystem(Path path, Map<String, String> config) throws IOException {
Configuration configuration = new Configuration();
Map<String, String> hadoopConfMap =
FileSystemUtils.toHadoopConfigMap(config, GRAVITINO_KEY_TO_S3_HADOOP_KEY);

hadoopConfMap.forEach(configuration::set);
if (!hadoopConfMap.containsKey(S3_CREDENTIAL_KEY)) {
configuration.set(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
hadoopConfMap.put(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
}

// Hadoop-aws 2 does not support IAMInstanceCredentialsProvider
checkAndSetCredentialProvider(configuration);
checkAndSetCredentialProvider(hadoopConfMap);

Configuration configuration = FileSystemUtils.createConfiguration(hadoopConfMap);
return S3AFileSystem.newInstance(path.toUri(), configuration);
}

Expand All @@ -89,8 +88,8 @@ public Map<String, String> getFileSystemCredentialConf(Credential[] credentials)
return result;
}

private void checkAndSetCredentialProvider(Configuration configuration) {
String provides = configuration.get(S3_CREDENTIAL_KEY);
private void checkAndSetCredentialProvider(Map<String, String> configs) {
String provides = configs.get(S3_CREDENTIAL_KEY);
if (provides == null) {
return;
}
Expand All @@ -115,15 +114,15 @@ private void checkAndSetCredentialProvider(Configuration configuration) {
LOG.warn(
"Credential provider {} not found in the Hadoop runtime, falling back to default",
provider);
configuration.set(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
configs.put(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
return;
}
}

if (validProviders.isEmpty()) {
configuration.set(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
configs.put(S3_CREDENTIAL_KEY, S3_SIMPLE_CREDENTIAL);
} else {
configuration.set(S3_CREDENTIAL_KEY, joiner.join(validProviders));
configs.put(S3_CREDENTIAL_KEY, joiner.join(validProviders));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ public class AzureFileSystemProvider implements FileSystemProvider, SupportsCred
@Override
public FileSystem getFileSystem(@Nonnull Path path, @Nonnull Map<String, String> config)
throws IOException {
Configuration configuration = new Configuration();

Map<String, String> hadoopConfMap =
FileSystemUtils.toHadoopConfigMap(config, ImmutableMap.of());
Expand All @@ -69,11 +68,10 @@ public FileSystem getFileSystem(@Nonnull Path path, @Nonnull Map<String, String>
}

if (!hadoopConfMap.containsKey(ABFS_IMPL_KEY)) {
configuration.set(ABFS_IMPL_KEY, ABFS_IMPL);
hadoopConfMap.put(ABFS_IMPL_KEY, ABFS_IMPL);
}

hadoopConfMap.forEach(configuration::set);

Configuration configuration = FileSystemUtils.createConfiguration(hadoopConfMap);
return FileSystem.newInstance(path.toUri(), configuration);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,9 @@ public class GCSFileSystemProvider implements FileSystemProvider, SupportsCreden

@Override
public FileSystem getFileSystem(Path path, Map<String, String> config) throws IOException {
Configuration configuration = new Configuration();
FileSystemUtils.toHadoopConfigMap(config, GRAVITINO_KEY_TO_GCS_HADOOP_KEY)
.forEach(configuration::set);

Map<String, String> hadoopConfMap =
FileSystemUtils.toHadoopConfigMap(config, GRAVITINO_KEY_TO_GCS_HADOOP_KEY);
Configuration configuration = FileSystemUtils.createConfiguration(hadoopConfMap);
return FileSystem.newInstance(path.toUri(), configuration);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
import com.google.common.collect.ImmutableMap;
import java.util.Map;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
Expand All @@ -38,6 +40,21 @@ void testToHadoopConfigMap(
Assertions.assertEquals(toHadoopConf, result);
}

@Test
void testCreateConfiguration() {
Map<String, String> confMap =
ImmutableMap.of(
"s3a-endpoint", "v1",
"fs.s3a.impl", "v2",
"fs.s3a.endpoint", "v3",
"gravitino.bypass.fs.s3a.endpoint", "v4");
Configuration configuration = FileSystemUtils.createConfiguration(confMap);
Assertions.assertEquals("v1", configuration.get("s3a-endpoint"));
Assertions.assertEquals("v2", configuration.get("fs.s3a.impl"));
Assertions.assertEquals("v3", configuration.get("fs.s3a.endpoint"));
Assertions.assertEquals("v4", configuration.get("gravitino.bypass.fs.s3a.endpoint"));
}

private static Stream<Arguments> mapArguments() {
return Stream.of(
Arguments.of(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,26 @@
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.collect.Streams;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.util.Arrays;
import java.util.Locale;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.stream.Collectors;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamWriter;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;

public class FileSystemUtils {

private static final String CONFIG_ROOT = "configuration";
private static final String PROPERTY_TAG = "property";
private static final String NAME_TAG = "name";
private static final String VALUE_TAG = "value";

private FileSystemUtils() {}

public static Map<String, FileSystemProvider> getFileSystemProviders(String fileSystemProviders) {
Expand Down Expand Up @@ -183,4 +193,62 @@ public static GravitinoFileSystemCredentialsProvider getGvfsCredentialProvider(
throw new RuntimeException("Failed to create GravitinoFileSystemCredentialProvider", e);
}
}

/**
* Create a configuration from the config map.
*
* @param config properties map.
* @return
*/
public static Configuration createConfiguration(Map<String, String> config) {
return createConfiguration(null, config);
}

/**
* Create a configuration from the config map.
*
* @param bypass prefix to remove from the config keys.
* @param config properties map.
* @return
*/
public static Configuration createConfiguration(String bypass, Map<String, String> config) {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
XMLStreamWriter writer = XMLOutputFactory.newInstance().createXMLStreamWriter(out);
writer.writeStartDocument();
writer.writeStartElement(CONFIG_ROOT);

config.forEach(
(k, v) ->
writeProperty(writer, StringUtils.isNotBlank(bypass) ? k.replace(bypass, "") : k, v));
writer.writeEndElement();
writer.writeEndDocument();
writer.close();

return new Configuration() {
{
addResource(new ByteArrayInputStream(out.toByteArray()));
}
};
} catch (Exception e) {
throw new RuntimeException("Failed to create configuration", e);
}
}

private static void writeProperty(XMLStreamWriter writer, String key, String value) {
try {
writer.writeStartElement(PROPERTY_TAG);
writeElement(writer, NAME_TAG, key);
writeElement(writer, VALUE_TAG, value);
writer.writeEndElement();
} catch (Exception e) {
throw new RuntimeException("Failed to write property: " + key, e);
}
}

private static void writeElement(XMLStreamWriter writer, String tag, String content)
throws Exception {
writer.writeStartElement(tag);
writer.writeCharacters(content);
writer.writeEndElement();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,7 @@ public class HDFSFileSystemProvider implements FileSystemProvider {
@Override
public FileSystem getFileSystem(@Nonnull Path path, @Nonnull Map<String, String> config)
throws IOException {
Configuration configuration = new Configuration();
config.forEach(
(k, v) -> {
configuration.set(k.replace(GRAVITINO_BYPASS, ""), v);
});
Configuration configuration = FileSystemUtils.createConfiguration(GRAVITINO_BYPASS, config);
return FileSystem.newInstance(path.toUri(), configuration);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,8 @@ public class LocalFileSystemProvider implements FileSystemProvider {

@Override
public FileSystem getFileSystem(Path path, Map<String, String> config) throws IOException {
Configuration configuration = new Configuration();
config.forEach(
(k, v) -> {
configuration.set(k.replace(BUILTIN_HDFS_FS_PROVIDER, ""), v);
});

Configuration configuration =
FileSystemUtils.createConfiguration(BUILTIN_HDFS_FS_PROVIDER, config);
return FileSystem.newInstance(path.toUri(), configuration);
}

Expand Down
1 change: 0 additions & 1 deletion web/web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
"@mui/lab": "5.0.0-alpha.170",
"@mui/material": "^5.16.14",
"@mui/x-data-grid": "^6.20.3",
"@mui/x-tree-view": "^6.17.0",
"@reduxjs/toolkit": "^1.9.7",
"antd": "^5.24.2",
"axios": "^1.7.4",
Expand Down
Loading

0 comments on commit c7bb843

Please sign in to comment.