Compare commits

...

5 Commits

Author SHA1 Message Date
Cullen
e2f6885d61 chore: format 2023-10-28 16:52:05 -05:00
Cullen
8d65d1b652 [chore] version 2023-10-28 16:43:44 -05:00
Cullen
216d3fd39f ziprecruiter: 5s delay 2023-10-28 16:41:32 -05:00
Cullen Watson
d3bfdc0a6e ziprecruiter api (#63) 2023-10-28 16:17:28 -05:00
Cullen Watson
ba5ed803ca use ziprecuriter api (#62) 2023-10-28 15:51:29 -05:00
4 changed files with 968 additions and 1118 deletions

1793
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.14" version = "1.1.17"
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"] authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy" homepage = "https://github.com/Bunsly/JobSpy"

View File

@@ -177,8 +177,8 @@ class CompensationInterval(Enum):
class Compensation(BaseModel): class Compensation(BaseModel):
interval: Optional[CompensationInterval] = None interval: Optional[CompensationInterval] = None
min_amount: int = None min_amount: int | None = None
max_amount: int = None max_amount: int | None = None
currency: Optional[str] = "USD" currency: Optional[str] = "USD"

View File

@@ -5,7 +5,7 @@ jobspy.scrapers.ziprecruiter
This module contains routines to scrape ZipRecruiter. This module contains routines to scrape ZipRecruiter.
""" """
import math import math
import json import time
import re import re
from datetime import datetime, date from datetime import datetime, date
from typing import Optional, Tuple, Any from typing import Optional, Tuple, Any
@@ -42,23 +42,22 @@ class ZipRecruiterScraper(Scraper):
self.jobs_per_page = 20 self.jobs_per_page = 20
self.seen_urls = set() self.seen_urls = set()
def find_jobs_in_page( def find_jobs_in_page(self, scraper_input: ScraperInput, continue_token: Optional[str] = None) -> Tuple[list[JobPost], Optional[str]]:
self, scraper_input: ScraperInput, page: int
) -> list[JobPost]:
""" """
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
:param scraper_input: :param scraper_input:
:param page:
:return: jobs found on page :return: jobs found on page
""" """
session = create_session(self.proxy) params = self.add_params(scraper_input)
if continue_token:
params['continue'] = continue_token
try: try:
response = session.get( response = requests.get(
f"{self.url}/jobs-search", f"https://api.ziprecruiter.com/jobs-app/jobs",
headers=self.headers(), headers=self.headers(),
params=self.add_params(scraper_input, page), params=self.add_params(scraper_input),
allow_redirects=True, allow_redirects=True,
timeout_seconds=10, timeout=10,
) )
if response.status_code != 200: if response.status_code != 200:
raise ZipRecruiterException( raise ZipRecruiterException(
@@ -68,118 +67,66 @@ class ZipRecruiterScraper(Scraper):
if "Proxy responded with non 200 code" in str(e): if "Proxy responded with non 200 code" in str(e):
raise ZipRecruiterException("bad proxy") raise ZipRecruiterException("bad proxy")
raise ZipRecruiterException(str(e)) raise ZipRecruiterException(str(e))
else:
soup = BeautifulSoup(response.text, "html.parser")
js_tag = soup.find("script", {"id": "js_variables"})
if js_tag: time.sleep(5)
page_json = json.loads(js_tag.string) response_data = response.json()
jobs_list = page_json.get("jobList") jobs_list = response_data.get("jobs", [])
if jobs_list: next_continue_token = response_data.get('continue', None)
page_variant = "javascript"
# print('type javascript', len(jobs_list))
else:
page_variant = "html_2"
jobs_list = soup.find_all("div", {"class": "job_content"})
# print('type 2 html', len(jobs_list))
else:
page_variant = "html_1"
jobs_list = soup.find_all("li", {"class": "job-listing"})
# print('type 1 html', len(jobs_list))
with ThreadPoolExecutor(max_workers=10) as executor: with ThreadPoolExecutor(max_workers=10) as executor:
if page_variant == "javascript":
job_results = [ job_results = [
executor.submit(self.process_job_javascript, job) executor.submit(self.process_job, job)
for job in jobs_list for job in jobs_list
] ]
elif page_variant == "html_1":
job_results = [
executor.submit(self.process_job_html_1, job) for job in jobs_list
]
elif page_variant == "html_2":
job_results = [
executor.submit(self.process_job_html_2, job) for job in jobs_list
]
job_list = [result.result() for result in job_results if result.result()] job_list = [result.result() for result in job_results if result.result()]
return job_list return job_list, next_continue_token
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
""" """
Scrapes ZipRecruiter for jobs with scraper_input criteria Scrapes ZipRecruiter for jobs with scraper_input criteria.
:param scraper_input: :param scraper_input: Information about job search criteria.
:return: job_response :return: JobResponse containing a list of jobs.
""" """
start_page = ( job_list: list[JobPost] = []
(scraper_input.offset // self.jobs_per_page) + 1 continue_token = None
if scraper_input.offset
else 1
)
#: get first page to initialize session
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, start_page)
pages_to_process = max(
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
)
with ThreadPoolExecutor(max_workers=10) as executor: max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
futures: list[Future] = [
executor.submit(self.find_jobs_in_page, scraper_input, page)
for page in range(start_page + 1, start_page + pages_to_process + 2)
]
for future in futures: for page in range(1, max_pages + 1):
jobs = future.result() if len(job_list) >= scraper_input.results_wanted:
break
job_list += jobs jobs_on_page, continue_token = self.find_jobs_in_page(scraper_input, continue_token)
if jobs_on_page:
job_list.extend(jobs_on_page)
if not continue_token:
break
if len(job_list) > scraper_input.results_wanted:
job_list = job_list[:scraper_input.results_wanted]
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list) return JobResponse(jobs=job_list)
def process_job_javascript(self, job: dict) -> JobPost: def process_job(self, job: dict) -> JobPost:
"""the most common type of jobs page on ZR""" """the most common type of jobs page on ZR"""
title = job.get("Title") title = job.get("name")
job_url = job.get("JobURL") job_url = job.get("job_url")
description, updated_job_url = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = BeautifulSoup( description = BeautifulSoup(
job.get("Snippet", "").strip(), "html.parser" job.get("job_description", "").strip(), "html.parser"
).get_text() ).get_text()
company = job.get("OrgName") company = job['hiring_company'].get("name") if "hiring_company" in job else None
location = Location( location = Location(
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA city=job.get("job_city"), state=job.get("job_state"), country='usa' if job.get("job_country") == 'US' else 'canada'
) )
job_type = ZipRecruiterScraper.get_job_type_enum( job_type = ZipRecruiterScraper.get_job_type_enum(
job.get("EmploymentType", "").replace("-", "").lower() job.get("employment_type", "").replace("_", "").lower()
) )
formatted_salary = job.get("FormattedSalaryShort", "")
salary_parts = formatted_salary.split(" ")
min_salary_str = salary_parts[0][1:].replace(",", "")
if "." in min_salary_str:
min_amount = int(float(min_salary_str) * 1000)
else:
min_amount = int(min_salary_str.replace("K", "000"))
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
max_salary_str = salary_parts[2][1:].replace(",", "")
if "." in max_salary_str:
max_amount = int(float(max_salary_str) * 1000)
else:
max_amount = int(max_salary_str.replace("K", "000"))
else:
max_amount = 0
compensation = Compensation(
interval=CompensationInterval.YEARLY,
min_amount=min_amount,
max_amount=max_amount,
currency="USD/CAD",
)
save_job_url = job.get("SaveJobURL", "") save_job_url = job.get("SaveJobURL", "")
posted_time_match = re.search( posted_time_match = re.search(
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
@@ -196,7 +143,12 @@ class ZipRecruiterScraper(Scraper):
company_name=company, company_name=company,
location=location, location=location,
job_type=job_type, job_type=job_type,
compensation=compensation, compensation=Compensation(
interval="yearly" if job.get("compensation_interval") == "annual" else job.get("compensation_interval") ,
min_amount=int(job["compensation_min"]) if "compensation_min" in job else None,
max_amount=int(job["compensation_max"]) if "compensation_max" in job else None,
currency=job.get("compensation_currency"),
),
date_posted=date_posted, date_posted=date_posted,
job_url=job_url, job_url=job_url,
description=description, description=description,
@@ -204,95 +156,6 @@ class ZipRecruiterScraper(Scraper):
num_urgent_words=count_urgent_words(description) if description else None, num_urgent_words=count_urgent_words(description) if description else None,
) )
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
"""
second most common type of jobs page on ZR after process_job_javascript()
Parses a job from the job content tag for a second variat of HTML that ZR uses
:param job: BeautifulSoup Tag for one job post
:return JobPost
"""
job_url = job.find("a", class_="job_link")["href"]
title = job.find("h2", class_="title").text
company = job.find("a", class_="company_name").text.strip()
description, updated_job_url = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", class_="job_snippet").get_text().strip()
job_type_text = job.find("li", class_="perk_item perk_type")
job_type = None
if job_type_text:
job_type_text = (
job_type_text.get_text()
.strip()
.lower()
.replace("-", "")
.replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
description=description,
emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None,
)
return job_post
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
"""
TODO this method isnt finished due to not encountering this type of html often
least common type of jobs page on ZR (rarely found)
Parses a job from the job content tag
:param job: BeautifulSoup Tag for one job post
:return JobPost
"""
job_url = job.find("a", {"class": "job_link"})["href"]
# job_url = self.cleanurl(job.find("a", {"class": "job_link"})["href"])
if job_url in self.seen_urls:
return None
title = job.find("h2", {"class": "title"}).text
company = job.find("a", {"class": "company_name"}).text.strip()
description, _ = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
# get description from jobs listing page if get_description from the specific job page fails
if description is None:
description = job.find("p", {"class": "job_snippet"}).text.strip()
job_type_element = job.find("li", {"class": "perk_item perk_type"})
job_type = None
if job_type_element:
job_type_text = (
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
description=description,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
emails=extract_emails_from_text(description),
num_urgent_words=count_urgent_words(description),
)
return job_post
@staticmethod @staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None: def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType: for job_type in JobType:
@@ -300,39 +163,11 @@ class ZipRecruiterScraper(Scraper):
return [job_type] return [job_type]
return None return None
def get_description(self, job_page_url: str) -> Tuple[str | None, str | None]:
"""
Retrieves job description by going to the job page url
:param job_page_url:
:return: description or None, response url
"""
try:
session = create_session(self.proxy)
response = session.get(
job_page_url,
headers=self.headers(),
allow_redirects=True,
timeout_seconds=5,
)
if response.status_code not in range(200, 400):
return None, None
except Exception as e:
return None, None
html_string = response.content
soup_job = BeautifulSoup(html_string, "html.parser")
job_description_div = soup_job.find("div", {"class": "job_description"})
if job_description_div:
return job_description_div.text.strip(), response.url
return None, response.url
@staticmethod @staticmethod
def add_params(scraper_input, page) -> dict[str, str | Any]: def add_params(scraper_input) -> dict[str, str | Any]:
params = { params = {
"search": scraper_input.search_term, "search": scraper_input.search_term,
"location": scraper_input.location, "location": scraper_input.location,
"page": page,
"form": "jobs-landing", "form": "jobs-landing",
} }
job_type_value = None job_type_value = None
@@ -465,11 +300,13 @@ class ZipRecruiterScraper(Scraper):
:return: dict - Dictionary containing headers :return: dict - Dictionary containing headers
""" """
return { return {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36" 'Host': 'api.ziprecruiter.com',
'Cookie': 'ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38',
'accept': '*/*',
'x-zr-zva-override': '100000000;vid:ZT1huzm_EQlDTVEc',
'x-pushnotificationid': '0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0',
'x-deviceid': 'D77B3A92-E589-46A4-8A39-6EF6F1D86006',
'user-agent': 'Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)',
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
'accept-language': 'en-US,en;q=0.9'
} }
# @staticmethod
# def cleanurl(url) -> str:
# parsed_url = urlparse(url)
#
# return urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))