raw_data / get_download_links.py
rulins's picture
update script
66a71a2
raw
history blame
1.28 kB
import os
# Base URL for the links
base_url = "https://huggingface.co/datasets/rulins/raw_data/resolve/main"
# List to store the generated URLs
urls = []
# Walk through the directory
for root, dirs, files in os.walk('.'):
for file in files:
if file.endswith('.jsonl'):
# Get the relative path
relative_path = os.path.relpath(os.path.join(root, file), '.')
# Replace backslashes with forward slashes for URLs
relative_path = relative_path.replace(os.sep, '/')
# Generate the URL
url = f"{base_url}/{relative_path}?download=true"
urls.append((url, relative_path))
# Create a bash script
with open('download_files.sh', 'w') as bash_script:
bash_script.write("#!/bin/bash\n\n")
for url, relative_path in urls:
# Extract directory path
dir_path = os.path.dirname(relative_path)
# Add command to create directory if it doesn't exist
if dir_path:
bash_script.write(f"mkdir -p \"{dir_path}\"\n")
bash_script.write(f"if [ ! -f \"{relative_path}\" ]; then\n")
bash_script.write(f" wget -O \"{relative_path}\" \"{url}\"\n")
bash_script.write("fi\n\n")
print("Bash script 'download_files.sh' has been created.")