simplify using etags

This commit is contained in:
Deeman
2025-07-26 22:08:35 +02:00
parent bd65ddcac8
commit 4fd1b96114
3 changed files with 55 additions and 22 deletions

View File

@@ -9,6 +9,7 @@ authors = [
requires-python = ">=3.13"
dependencies = [
"niquests>=3.14.1",
"pendulum>=3.1.0",
]
[build-system]

View File

@@ -3,6 +3,7 @@ import pathlib
import logging
import sys
from datetime import datetime
import pendulum
logging.basicConfig(
level=logging.INFO,
@@ -12,37 +13,39 @@ logging.basicConfig(
logging.StreamHandler(sys.stdout)
]
)
logger = logging.getLogger("PSD Extraction")
output_dir = pathlib.Path(__file__).parent / "data"
output_dir.mkdir(parents=True, exist_ok=True)
logger.info(f"Output dir: {output_dir}")
logger = logging.getLogger("PSDOnline Extractor")
OUTPUT_DIR = pathlib.Path(__file__).parent / "data"
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
logger.info(f"Output dir: {OUTPUT_DIR}")
#TODO: adapt to environment values, so this writes to s3 in prod
PSD_HISTORICAL_URL = "https://apps.fas.usda.gov/psdonline/downloads/archives/{year}/{month:02d}/psd_alldata_csv.zip"
PSD_LATEST_URL = "https://apps.fas.usda.gov/psdonline/downloads/psd_alldata_csv.zip"
FIRST_YEAR = 2006
FIRST_MONTH = 8
def extract_psd_file(url:str, extract_to_path: pathlib.Path, http_session: niquests.Session):
logger.info(f"Requesting file {url} ...")
response = http_session.get(url)
extracted_etags = list(map(lambda file: file.stem, OUTPUT_DIR.rglob("*.zip")))
response = http_session.head(url)
if response.status_code == 404:
logger.error("File doesn't exist on server, received status code 404 Not Found")
return
elif response.status_code != 200:
logger.error(f"Status code not ok, STATUS={response.status_code}")
return
etag = response.headers.get("etag").replace('"',"").replace(":","_")
if etag in extracted_etags:
return
else:
response = http_session.get(url)
extract_to_path = extract_to_path / f"{etag}.zip"
logger.info(f"Storing file to {extract_to_path}")
extract_to_path.parent.mkdir(parents=True, exist_ok=True)
extract_to_path.write_bytes(response.content)
logger.info("Download done.")
def extraction_status():
extracted_files = list(output_dir.rglob("*.zip"))
extracted_months = [tuple(map(int, str(file).split("/")[-3:-1])) for file in extracted_files]
return extracted_months
def extract_historical_psd_dataset():
status = extraction_status()
def extract_psd_dataset():
today = datetime.now()
years = list(range(FIRST_YEAR, today.year+1))
for year in years:
@@ -54,18 +57,20 @@ def extract_historical_psd_dataset():
months = list(range(1,13))
for month in months:
if (year, month) in status:
continue
url = PSD_HISTORICAL_URL.format(year=year, month=month)
target_path = output_dir / f"{year}"/f"{month:02d}" / "psd_alldata_csv.zip"
target_dir = OUTPUT_DIR / f"{year}"/f"{month:02d}"
with niquests.Session() as session:
logger.info(f"Downloading psd_alldata_csv.zip for {year}/{month:02d}")
try:
extract_psd_file(url=url, http_session=session, extract_to_path=target_path)
except Exception as e:
logger.error("Error trying to download file. Likely the file does not exist", e)
extract_psd_file(url=url, http_session=session, extract_to_path=target_dir)
def parse_last_modified(last_modified:str) -> pendulum.datetime:
last_modified = last_modified.split(",")[1].strip()
day, month, year, time, timezone = last_modified.split(" ")
last_modified = f"{year}-{month}-{day}T{time}"
last_modified = pendulum.from_format(last_modified, fmt="YYYY-MMM-DDTHH:mm:ss", tz=timezone)
return last_modified
if __name__ == "__main__":
extract_historical_psd_dataset()
extract_psd_dataset()