Skip to content

Commit a16e338

Browse files
authored
Merge pull request #325 from ClickHouse/no-no-verbose
Remove `--no-verbose` flag from wget
2 parents 4ecf226 + 1c7b16f commit a16e338

File tree

64 files changed

+74
-74
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+74
-74
lines changed

aurora-mysql/README.md

+1-1

aurora-postgresql/README.md

+1-1

bigquery/README.md

+1-1

byconity/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ function byconity()
1212
export -f byconity
1313

1414
byconity --time -n < create.sql
15-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
15+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1616
pigz -fkd hits.tsv.gz
1717
byconity --database bench --query "INSERT INTO hits FORMAT TSV" < hits.tsv
1818

bytehouse/NOTES.md

+1-1

bytehouse/README.md

+1-1

chdb-dataframe/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ pip install --break-system-packages pandas
88
pip install --break-system-packages chdb==2.2.0b1
99

1010
# Download the data
11-
wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
11+
wget --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
1212

1313
# Run the queries
1414

chdb-parquet/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ pip install --break-system-packages psutil
88
pip install --break-system-packages chdb==2.2.0b1
99

1010
# Load the data
11-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
11+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
1212

1313
# Run the queries
1414

chdb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ pip install --break-system-packages psutil
77
pip install --break-system-packages chdb==2.2.0b1
88

99
# Load the data
10-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
10+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
1111
gzip -d hits.csv.gz
1212
./load.py
1313

citus/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ sudo apt-get install -y postgresql-client
66

77
sudo docker run -d --name citus -p 5432:5432 -e POSTGRES_PASSWORD=mypass citusdata/citus:11.0
88

9-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
9+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1010
gzip -d hits.tsv.gz
1111

1212
echo "*:*:*:*:mypass" > .pgpass

clickhouse-parquet/benchmark.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@
55
curl https://clickhouse.com/ | sh
66

77
# Use for ClickHouse (Parquet, single)
8-
# wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
8+
# wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
99

1010
# Use for ClickHouse (Parquet, partitioned)
11-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
11+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
1212

1313
# Run the queries
1414

clickhouse-parquet/cloud-init.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@ chmod +x *.sh
1212

1313
curl https://clickhouse.com/ | sh
1414

15-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --continue https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_{}.parquet'
15+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --continue https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_{}.parquet'
1616

1717
echo "Partitioned:" > log
1818
./run.sh >> log
1919

20-
wget --no-verbose --continue 'https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/hits.parquet'
20+
wget --continue 'https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/hits.parquet'
2121

2222
sed -i 's/hits_\*\.parquet/hits.parquet/' create.sql
2323

clickhouse/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ fi
3535

3636
clickhouse-client < create"$SUFFIX".sql
3737

38-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
38+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
3939
gzip -d hits.tsv.gz
4040

4141
clickhouse-client --time --query "INSERT INTO hits FORMAT TSV" < hits.tsv

cratedb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ do
3434
sleep 1
3535
done
3636

37-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz' -O /tmp/hits.tsv.gz
37+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz' -O /tmp/hits.tsv.gz
3838
gzip -d /tmp/hits.tsv.gz
3939
chmod 444 /tmp/hits.tsv
4040

databend/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ CONF
2626
# Docs: https://databend.rs/doc/use-cases/analyze-hits-dataset-with-databend
2727
curl 'http://default@localhost:8124/' --data-binary @create.sql
2828

29-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
29+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
3030
gzip -d hits.tsv.gz
3131

3232
## Aws gp2 write performance is not stable, we must load the data when disk's write around ~500MB/s (Don't know much about the rules of gp2)

datafusion/README.md

+1-1

datafusion/benchmark.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@ cd ../..
2121

2222

2323
# Download benchmark target data, single file
24-
wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/hits.parquet
24+
wget --continue https://datasets.clickhouse.com/hits_compatible/hits.parquet
2525

2626
# Download benchmark target data, partitioned
2727
mkdir -p partitioned
28-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --directory-prefix partitioned --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
28+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --directory-prefix partitioned --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
2929

3030
# Run benchmarks for single parquet and partitioned
3131
./run.sh single

doris/benchmark.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ fi
1515
file_name="$(basename ${url})"
1616
if [[ "$url" == "http"* ]]; then
1717
if [[ ! -f $file_name ]]; then
18-
wget --no-verbose --continue ${url}
18+
wget --continue ${url}
1919
else
2020
echo "$file_name already exists, no need to download."
2121
fi
@@ -88,7 +88,7 @@ mysql -h 127.0.0.1 -P9030 -uroot hits <"$ROOT"/create.sql
8888

8989
# Download data
9090
if [[ ! -f hits.tsv.gz ]] && [[ ! -f hits.tsv ]]; then
91-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
91+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
9292
gzip -d hits.tsv.gz
9393
fi
9494

drill/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
sudo apt-get update
44
sudo apt-get install -y docker.io
55

6-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
6+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
77

88
./run.sh 2>&1 | tee log.txt
99

druid/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ echo "druid.query.groupBy.maxMergingDictionarySize=5000000000" >> apache-druid-$
2626

2727
# Load the data
2828

29-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
29+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
3030
gzip -d hits.tsv.gz
3131

3232
./apache-druid-${VERSION}/bin/post-index-task --file ingest.json --url http://localhost:8081

duckdb-dataframe/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ sudo apt-get install -y python3-pip
77
pip install --break-system-packages pandas duckdb==1.1.3
88

99
# Download the data
10-
wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
10+
wget --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
1111

1212
# Run the queries
1313

duckdb-memory/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ pip install --break-system-packages duckdb==1.1.3 psutil
88

99
# Load the data
1010

11-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
11+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
1212
gzip -d hits.csv.gz
1313

1414
# Run the queries

duckdb-parquet/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ export PATH="$PATH:`pwd`/build/release/"
1414
cd ..
1515

1616
# Load the data
17-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
17+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
1818

1919
time duckdb hits.db -f create.sql
2020

duckdb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ export PATH="$PATH:`pwd`/build/release/"
1414
cd ..
1515

1616
# Load the data
17-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
17+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1818
gzip -d hits.tsv.gz
1919

2020
time duckdb hits.db -f create.sql -c "COPY hits FROM 'hits.tsv' (QUOTE '')"

heavyai/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ sudo systemctl enable heavydb
2727

2828
# Load the data
2929

30-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
30+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
3131
gzip -d hits.csv.gz
3232
chmod 777 ~ hits.csv
3333

hydra/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ sudo ./install.sh
77

88
# download hits.tsv if we dont already have it
99
if [ ! -e hits.tsv ]; then
10-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
10+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1111
gzip -d hits.tsv.gz
1212
fi
1313

hyper-parquet/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ sudo apt-get update
44
sudo apt-get install -y python3-pip
55
pip install --break-system-packages tableauhyperapi
66

7-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
7+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
88

99
./run.sh | tee log.txt
1010

hyper/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ sudo apt-get update
44
sudo apt-get install -y python3-pip
55
pip install --break-system-packages tableauhyperapi
66

7-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
7+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
88
gzip -d hits.csv.gz
99

1010
./load.py

infobright/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ sudo docker run -it --rm --network host mysql:5 mysql --host 127.0.0.1 --port 50
1313

1414
# Load the data
1515

16-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
16+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1717
gzip -d hits.tsv.gz
1818

1919
# ERROR 2 (HY000) at line 1: Wrong data or column definition. Row: 93557187, field: 100.

kinetica/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ export KI_PWD=admin
1313
CLI="./kisql --host localhost --user admin"
1414

1515
# download the ds
16-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
16+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
1717
gzip -d hits.csv.gz
1818

1919
# prepare the ds for ingestion; bigger files cause out of memory error

locustdb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ sudo apt-get install -y g++ capnproto libclang-14-dev
1414

1515
cargo build --features "enable_rocksdb" --features "enable_lz4" --release
1616

17-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
17+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
1818
gzip -d hits.csv.gz
1919

2020
target/release/repl --load hits.csv --db-path db

mariadb-columnstore/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ mysql --password="${PASSWORD}" --host 127.0.0.1 test < create.sql
1616

1717
# Load the data
1818

19-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
19+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
2020
gzip -d hits.tsv.gz
2121

2222
time mysql --password="${PASSWORD}" --host 127.0.0.1 test -e "

mariadb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ sudo service mariadb restart
1414

1515
# Load the data
1616

17-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
17+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1818
gzip -d hits.tsv.gz
1919

2020
sudo mariadb -e "CREATE DATABASE test"

monetdb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ sudo apt-get install -y expect
2222

2323
./query.expect "$(cat create.sql)"
2424

25-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
25+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
2626
gzip -d hits.tsv.gz
2727
chmod 777 ~ hits.tsv
2828

mongodb/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ time mongosh --quiet --eval 'db.hits.createIndex({"ClientIP": 1, "WatchID": 1, "
5555

5656
#################################
5757
# Load data and import
58-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
58+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
5959
gzip -d hits.tsv.gz
6060

6161
# Use mongo import to load data into mongo. By default numInsertionWorkers is 1 so change to half of VM where it would be run

mysql-myisam/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ sudo service mysql restart
99

1010
# Load the data
1111

12-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
12+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1313
gzip -d hits.tsv.gz
1414

1515
sudo mysql -e "CREATE DATABASE test"

mysql/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ sudo service mysql restart
99

1010
# Load the data
1111

12-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
12+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz'
1313
gzip -d hits.tsv.gz
1414

1515
sudo mysql -e "CREATE DATABASE test"

octosql/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
wget https://github.com/cube2222/octosql/releases/download/v0.13.0/octosql_0.13.0_linux_amd64.tar.gz
44
tar xf octosql_0.13.0_linux_amd64.tar.gz
55

6-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
6+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.parquet'
77

88
./run.sh 2>&1 | tee log.txt
99

opteryx/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ source ~/opteryx_venv/bin/activate
1919

2020
# Download benchmark target data, partitioned
2121
mkdir -p hits
22-
seq 0 99 | xargs -P100 -I{} bash -c 'wget --no-verbose --directory-prefix hits --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
22+
seq 0 99 | xargs -P100 -I{} bash -c 'wget --directory-prefix hits --continue https://datasets.clickhouse.com/hits_compatible/athena_partitioned/hits_{}.parquet'
2323

2424
# Run a simple query to check the installation
2525
~/opteryx_venv/bin/python -m opteryx "SELECT version()" 2>&1

oxla/benchmark.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ sudo DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential
99

1010
# download dataset
1111
echo "Download dataset."
12-
wget --no-verbose --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
12+
wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.csv.gz'
1313
echo "Unpack dataset."
1414
gzip -d hits.csv.gz
1515
mkdir data
@@ -37,4 +37,4 @@ sleep 60
3737

3838
# run benchmark
3939
echo "running benchmark..."
40-
./run.sh
40+
./run.sh

pandas/benchmark.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ sudo apt-get install -y python3-pip
77
pip install --break-system-packages pandas
88

99
# Download the data
10-
wget --no-verbose --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
10+
wget --continue https://datasets.clickhouse.com/hits_compatible/athena/hits.parquet
1111

1212
# Run the queries
1313

0 commit comments

Comments
 (0)