Skip to content

fix: ClientBuilder auth info drop before connect #258

fix: ClientBuilder auth info drop before connect

fix: ClientBuilder auth info drop before connect #258

Workflow file for this run

name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}
cancel-in-progress: true
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Format
run: cargo fmt --all -- --check
- name: Clippy with all features
run: cargo clippy --all-features --all-targets -- -D warnings
test-default:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup java env
uses: actions/setup-java@v3
with:
distribution: temurin
java-version: "11"
- name: Setup hadoop env
shell: bash
run: |
curl -LsSf https://dlcdn.apache.org/hadoop/common/hadoop-3.3.5/hadoop-3.3.5.tar.gz | tar zxf - -C /home/runner
echo "HADOOP_HOME=/home/runner/hadoop-3.3.5" >> $GITHUB_ENV
- name: Test
shell: bash
run: |
export CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath --glob)
cargo test --features async_file -- --nocapture
env:
LD_LIBRARY_PATH: ${{ env.JAVA_HOME }}/lib/server:${{ env.HADOOP_HOME}}/lib/native
HDRS_TEST: on
HDRS_NAMENODE: default
HDRS_WORKDIR: /tmp/hdrs/
test-cluster:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Configure Hdfs
# namenode will use ports: 9870, 9000, 8020
# datanode will use ports: 9864
run: |
docker run -d \
--name namenode \
--network host \
-e CLUSTER_NAME=test \
-e WEBHDFS_CONF_dfs_webhdfs_enabled=true \
-e CORE_CONF_hadoop_http_staticuser_user=root \
-e HDFS_CONF_dfs_permissions_enabled=false \
bde2020/hadoop-namenode:2.0.0-hadoop3.1.3-java8
docker run -d \
--name datanode \
--network host \
-e CLUSTER_NAME=test \
-e WEBHDFS_CONF_dfs_webhdfs_enabled=true \
-e CORE_CONF_hadoop_http_staticuser_user=root \
-e HDFS_CONF_dfs_permissions_enabled=false \
bde2020/hadoop-datanode:2.0.0-hadoop3.1.3-java8
curl --retry 30 --retry-delay 1 --retry-connrefused http://localhost:9870
- name: Setup java env
uses: actions/setup-java@v3
with:
distribution: temurin
java-version: "11"
- name: Setup hadoop env
shell: bash
run: |
curl -LsSf https://archive.apache.org/dist/hadoop/common/hadoop-3.1.3/hadoop-3.1.3.tar.gz | tar zxf - -C /home/runner
echo "HADOOP_HOME=/home/runner/hadoop-3.1.3" >> $GITHUB_ENV
- name: Test
shell: bash
run: |
export CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath --glob)
cargo test --features async_file -- --nocapture
env:
LD_LIBRARY_PATH: ${{ env.JAVA_HOME }}/lib/server:${{ env.HADOOP_HOME }}/lib/native
HDRS_TEST: on
HDRS_INTEGRATED_TEST: on
HDRS_NAMENODE: hdfs://localhost:8020
HDRS_WORKDIR: /tmp/hdrs/
test-vendored:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup java env
uses: actions/setup-java@v3
with:
distribution: temurin
java-version: "11"
- name: Setup hadoop env
shell: bash
run: |
curl -LsSf https://dlcdn.apache.org/hadoop/common/hadoop-3.3.5/hadoop-3.3.5.tar.gz | tar zxf - -C /home/runner
echo "HADOOP_HOME=/home/runner/hadoop-3.3.5" >> $GITHUB_ENV
- name: Test
shell: bash
run: |
export CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath --glob)
cargo test --features async_file,vendored -- --nocapture
env:
# If vendored has been enabled, we don't need to load native libs
LD_LIBRARY_PATH: ${{ env.JAVA_HOME }}/lib/server
HDRS_TEST: on
HDRS_NAMENODE: default
HDRS_WORKDIR: /tmp/hdrs/