最近在下载文章数据的时候,发现了一个很让人感到头痛的数据,如下图所示: 20250223221153

可以看到,有很多的文件夹,每个文件夹点击进入也是不同的文件。大概估计可能有上百个,如此这版,显然是需要付出比较大的体力和精力,目前遇到的Bicnn的文件也基本是如此的,出去我的方便考量,写了一段爬虫,可能直接爬取目标文件夹下的所有文件。

再埋一个坑,我这里没有写断点续传,可能会在下一次补上,也许呢。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# 目标URL
base_url = "https://data.nemoarchive.org/biccn/grant/u01_devhu/kriegstein/transcriptome/scell/10x_v2/human/processed/counts/?C=N;O=D"

# 保存文件的本地根目录
save_dir = r"D:/project/developing_brain/datas/"
os.makedirs(save_dir, exist_ok=True)

# 设置请求头,模拟浏览器访问
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}

def download_file(file_url, file_path):
"""下载文件并保存到指定路径"""
print()
try:
with requests.get(file_url, stream=True, headers=headers) as r:
r.raise_for_status()
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Downloaded: {file_url} -> {file_path}")
except Exception as e:
print(f"Failed to download {file_url}: {e}")

def process_directory(url, local_path):
"""处理目录,递归下载所有文件和子目录"""
# 获取目录内容
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')

# 创建本地目录
os.makedirs(local_path, exist_ok=True)
soups = soup.find_all('a')
soups = soups[5:]
# 遍历目录中的所有链接
for link in soups:
href = link.get('href')
if href:
full_url = urljoin(url, href)
if href.endswith('/'): # 如果是子目录
subdir_name = href.rstrip('/')
subdir_local_path = os.path.join(local_path, subdir_name)
print(f"Entering directory: {full_url}")
process_directory(full_url, subdir_local_path) # 递归处理子目录
else: # 如果是文件
file_name = os.path.basename(href)
print(file_name)
if '=' in file_name:
continue
print(file_name)
file_local_path = os.path.join(local_path, file_name)
print(f"Downloading file: {full_url}")
download_file(full_url, file_local_path)

# 开始处理根目录
process_directory(base_url, save_dir)
print("All files and directories have been downloaded.")