JobSpy/jobspy/ziprecruiter/__init__.py

220 lines
8.0 KiB
Python
Raw Normal View History

from __future__ import annotations
import json
2023-08-31 08:29:43 -07:00
import math
import re
2023-10-28 14:41:32 -07:00
import time
2025-02-21 10:29:28 -08:00
from concurrent.futures import ThreadPoolExecutor
2024-02-14 14:04:23 -08:00
from datetime import datetime
2023-07-08 04:57:36 -07:00
from bs4 import BeautifulSoup
2025-02-21 12:14:55 -08:00
from jobspy.ziprecruiter.constant import headers, get_cookie_data
from jobspy.util import (
2024-02-14 14:04:23 -08:00
extract_emails_from_text,
create_session,
markdown_converter,
remove_attributes,
2024-10-19 16:01:59 -07:00
create_logger,
2024-02-14 14:04:23 -08:00
)
2025-02-21 12:14:55 -08:00
from jobspy.model import (
2024-02-14 14:04:23 -08:00
JobPost,
Compensation,
Location,
JobResponse,
Country,
DescriptionFormat,
2025-02-21 12:14:55 -08:00
Scraper,
ScraperInput,
Site,
2024-02-14 14:04:23 -08:00
)
2025-02-21 12:14:55 -08:00
from jobspy.ziprecruiter.util import get_job_type_enum, add_params
2023-09-28 16:33:14 -07:00
2025-02-21 10:29:28 -08:00
log = create_logger("ZipRecruiter")
2024-10-19 16:01:59 -07:00
2023-07-08 04:57:36 -07:00
2025-02-21 12:14:55 -08:00
class ZipRecruiter(Scraper):
2024-02-14 14:04:23 -08:00
base_url = "https://www.ziprecruiter.com"
api_url = "https://api.ziprecruiter.com"
2024-10-19 16:01:59 -07:00
def __init__(
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
):
"""
2023-10-30 17:57:36 -07:00
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
"""
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
2024-02-14 14:04:23 -08:00
self.scraper_input = None
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
2024-10-19 16:01:59 -07:00
self.session.headers.update(headers)
2024-02-14 14:04:23 -08:00
self._get_cookies()
2023-07-08 04:57:36 -07:00
2024-02-14 14:04:23 -08:00
self.delay = 5
2023-07-11 08:49:36 -07:00
self.jobs_per_page = 20
self.seen_urls = set()
2023-07-08 04:57:36 -07:00
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
2023-10-28 13:51:29 -07:00
Scrapes ZipRecruiter for jobs with scraper_input criteria.
:param scraper_input: Information about job search criteria.
:return: JobResponse containing a list of jobs.
"""
2024-02-14 14:04:23 -08:00
self.scraper_input = scraper_input
2023-10-28 13:51:29 -07:00
job_list: list[JobPost] = []
continue_token = None
2023-07-11 08:49:36 -07:00
2023-10-28 13:51:29 -07:00
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
for page in range(1, max_pages + 1):
if len(job_list) >= scraper_input.results_wanted:
break
if page > 1:
time.sleep(self.delay)
2025-02-21 10:29:28 -08:00
log.info(f"search page: {page} / {max_pages}")
2024-02-14 14:04:23 -08:00
jobs_on_page, continue_token = self._find_jobs_in_page(
2023-10-30 17:57:36 -07:00
scraper_input, continue_token
)
2023-10-28 13:51:29 -07:00
if jobs_on_page:
job_list.extend(jobs_on_page)
2024-02-14 14:04:23 -08:00
else:
break
2023-10-28 13:51:29 -07:00
if not continue_token:
break
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
2024-02-14 14:04:23 -08:00
def _find_jobs_in_page(
self, scraper_input: ScraperInput, continue_token: str | None = None
2025-02-21 12:14:55 -08:00
) -> tuple[list[JobPost], str | None]:
2024-02-14 14:04:23 -08:00
"""
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
:param scraper_input:
:param continue_token:
:return: jobs found on page
"""
jobs_list = []
2025-02-21 12:14:55 -08:00
params = add_params(scraper_input)
2024-02-14 14:04:23 -08:00
if continue_token:
params["continue_from"] = continue_token
try:
2024-10-19 16:01:59 -07:00
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
2024-02-14 14:04:23 -08:00
if res.status_code not in range(200, 400):
if res.status_code == 429:
err = "429 Response - Blocked by ZipRecruiter for too many requests"
2024-02-14 14:04:23 -08:00
else:
err = f"ZipRecruiter response status code {res.status_code}"
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
2025-02-21 10:29:28 -08:00
log.error(err)
2024-02-14 14:04:23 -08:00
return jobs_list, ""
except Exception as e:
if "Proxy responded with" in str(e):
2025-02-21 10:29:28 -08:00
log.error(f"Indeed: Bad proxy")
2024-02-14 14:04:23 -08:00
else:
2025-02-21 10:29:28 -08:00
log.error(f"Indeed: {str(e)}")
2024-02-14 14:04:23 -08:00
return jobs_list, ""
res_data = res.json()
jobs_list = res_data.get("jobs", [])
next_continue_token = res_data.get("continue", None)
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
job_results = [executor.submit(self._process_job, job) for job in jobs_list]
job_list = list(filter(None, (result.result() for result in job_results)))
return job_list, next_continue_token
def _process_job(self, job: dict) -> JobPost | None:
"""
Processes an individual job dict from the response
"""
2023-10-28 13:51:29 -07:00
title = job.get("name")
2024-02-14 14:04:23 -08:00
job_url = f"{self.base_url}/jobs//j?lvk={job['listing_key']}"
if job_url in self.seen_urls:
return
self.seen_urls.add(job_url)
2023-09-03 07:29:25 -07:00
2024-02-12 09:02:48 -08:00
description = job.get("job_description", "").strip()
2024-07-15 18:30:04 -07:00
listing_type = job.get("buyer_type", "")
description = (
markdown_converter(description)
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
else description
)
company = job.get("hiring_company", {}).get("name")
2023-10-30 17:57:36 -07:00
country_value = "usa" if job.get("job_country") == "US" else "canada"
country_enum = Country.from_string(country_value)
location = Location(
2023-10-30 17:57:36 -07:00
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
)
2025-02-21 12:14:55 -08:00
job_type = get_job_type_enum(
2023-10-28 13:51:29 -07:00
job.get("employment_type", "").replace("_", "").lower()
)
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
comp_interval = job.get("compensation_interval")
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
comp_currency = job.get("compensation_currency")
description_full, job_url_direct = self._get_descr(job_url)
return JobPost(
2024-10-19 16:01:59 -07:00
id=f'zr-{job["listing_key"]}',
2023-09-03 07:29:25 -07:00
title=title,
company_name=company,
location=location,
2023-09-03 07:29:25 -07:00
job_type=job_type,
2023-10-28 14:17:28 -07:00
compensation=Compensation(
interval=comp_interval,
min_amount=comp_min,
max_amount=comp_max,
currency=comp_currency,
2023-10-28 14:17:28 -07:00
),
2023-09-03 07:29:25 -07:00
date_posted=date_posted,
job_url=job_url,
description=description_full if description_full else description,
emails=extract_emails_from_text(description) if description else None,
job_url_direct=job_url_direct,
2024-07-15 18:30:04 -07:00
listing_type=listing_type,
2023-09-03 07:29:25 -07:00
)
def _get_descr(self, job_url):
2024-10-19 16:01:59 -07:00
res = self.session.get(job_url, allow_redirects=True)
description_full = job_url_direct = None
if res.ok:
soup = BeautifulSoup(res.text, "html.parser")
job_descr_div = soup.find("div", class_="job_description")
company_descr_section = soup.find("section", class_="company_description")
job_description_clean = (
remove_attributes(job_descr_div).prettify(formatter="html")
if job_descr_div
else ""
)
company_description_clean = (
remove_attributes(company_descr_section).prettify(formatter="html")
if company_descr_section
else ""
)
description_full = job_description_clean + company_description_clean
2025-02-21 12:14:55 -08:00
try:
script_tag = soup.find("script", type="application/json")
if script_tag:
job_json = json.loads(script_tag.string)
job_url_val = job_json["model"].get("saveJobURL", "")
m = re.search(r"job_url=(.+)", job_url_val)
if m:
job_url_direct = m.group(1)
except:
job_url_direct = None
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
description_full = markdown_converter(description_full)
return description_full, job_url_direct
2024-02-14 14:04:23 -08:00
def _get_cookies(self):
2025-02-21 10:29:28 -08:00
"""
Sends a session event to the API with device properties.
"""
url = f"{self.api_url}/jobs-app/event"
2025-02-21 12:14:55 -08:00
self.session.post(url, data=get_cookie_data)