mirror of https://github.com/Bunsly/JobSpy
parent
ff1eb0f7b0
commit
ba5ed803ca
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.14"
|
||||
version = "1.1.15"
|
||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
|
|
|
@ -42,23 +42,22 @@ class ZipRecruiterScraper(Scraper):
|
|||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> list[JobPost]:
|
||||
def find_jobs_in_page(self, scraper_input: ScraperInput, continue_token: Optional[str] = None) -> Tuple[list[JobPost], Optional[str]]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
session = create_session(self.proxy)
|
||||
params = self.add_params(scraper_input)
|
||||
if continue_token:
|
||||
params['continue'] = continue_token
|
||||
try:
|
||||
response = session.get(
|
||||
f"{self.url}/jobs-search",
|
||||
response = requests.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=self.add_params(scraper_input, page),
|
||||
params=self.add_params(scraper_input),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=10,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
|
@ -68,118 +67,65 @@ class ZipRecruiterScraper(Scraper):
|
|||
if "Proxy responded with non 200 code" in str(e):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
else:
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
js_tag = soup.find("script", {"id": "js_variables"})
|
||||
|
||||
if js_tag:
|
||||
page_json = json.loads(js_tag.string)
|
||||
jobs_list = page_json.get("jobList")
|
||||
if jobs_list:
|
||||
page_variant = "javascript"
|
||||
# print('type javascript', len(jobs_list))
|
||||
else:
|
||||
page_variant = "html_2"
|
||||
jobs_list = soup.find_all("div", {"class": "job_content"})
|
||||
# print('type 2 html', len(jobs_list))
|
||||
else:
|
||||
page_variant = "html_1"
|
||||
jobs_list = soup.find_all("li", {"class": "job-listing"})
|
||||
# print('type 1 html', len(jobs_list))
|
||||
response_data = response.json()
|
||||
jobs_list = response_data.get("jobs", [])
|
||||
next_continue_token = response_data.get('continue', None)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
if page_variant == "javascript":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_javascript, job)
|
||||
for job in jobs_list
|
||||
]
|
||||
elif page_variant == "html_1":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_html_1, job) for job in jobs_list
|
||||
]
|
||||
elif page_variant == "html_2":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_html_2, job) for job in jobs_list
|
||||
]
|
||||
job_results = [
|
||||
executor.submit(self.process_job, job)
|
||||
for job in jobs_list
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
return job_list
|
||||
return job_list, next_continue_token
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
start_page = (
|
||||
(scraper_input.offset // self.jobs_per_page) + 1
|
||||
if scraper_input.offset
|
||||
else 1
|
||||
)
|
||||
#: get first page to initialize session
|
||||
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, start_page)
|
||||
pages_to_process = max(
|
||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
)
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.find_jobs_in_page, scraper_input, page)
|
||||
for page in range(start_page + 1, start_page + pages_to_process + 2)
|
||||
]
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
|
||||
for future in futures:
|
||||
jobs = future.result()
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
|
||||
job_list += jobs
|
||||
jobs_on_page, continue_token = self.find_jobs_in_page(scraper_input, continue_token)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
|
||||
if not continue_token:
|
||||
break
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[:scraper_input.results_wanted]
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def process_job_javascript(self, job: dict) -> JobPost:
|
||||
def process_job(self, job: dict) -> JobPost:
|
||||
"""the most common type of jobs page on ZR"""
|
||||
title = job.get("Title")
|
||||
job_url = job.get("JobURL")
|
||||
title = job.get("name")
|
||||
job_url = job.get("job_url")
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
if description is None:
|
||||
description = BeautifulSoup(
|
||||
job.get("Snippet", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
description = BeautifulSoup(
|
||||
job.get("job_description", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
|
||||
company = job.get("OrgName")
|
||||
company = job.get("source")
|
||||
location = Location(
|
||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
||||
city=job.get("job_city"), state=job.get("job_state"), country='usa' if job.get("job_country") == 'US' else 'canada'
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("EmploymentType", "").replace("-", "").lower()
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
|
||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
||||
salary_parts = formatted_salary.split(" ")
|
||||
|
||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
||||
if "." in min_salary_str:
|
||||
min_amount = int(float(min_salary_str) * 1000)
|
||||
else:
|
||||
min_amount = int(min_salary_str.replace("K", "000"))
|
||||
|
||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
||||
if "." in max_salary_str:
|
||||
max_amount = int(float(max_salary_str) * 1000)
|
||||
else:
|
||||
max_amount = int(max_salary_str.replace("K", "000"))
|
||||
else:
|
||||
max_amount = 0
|
||||
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval.YEARLY,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency="USD/CAD",
|
||||
)
|
||||
save_job_url = job.get("SaveJobURL", "")
|
||||
posted_time_match = re.search(
|
||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||
|
@ -196,7 +142,7 @@ class ZipRecruiterScraper(Scraper):
|
|||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=compensation,
|
||||
# compensation=compensation,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
|
@ -204,95 +150,6 @@ class ZipRecruiterScraper(Scraper):
|
|||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
|
||||
"""
|
||||
second most common type of jobs page on ZR after process_job_javascript()
|
||||
Parses a job from the job content tag for a second variat of HTML that ZR uses
|
||||
:param job: BeautifulSoup Tag for one job post
|
||||
:return JobPost
|
||||
"""
|
||||
job_url = job.find("a", class_="job_link")["href"]
|
||||
title = job.find("h2", class_="title").text
|
||||
company = job.find("a", class_="company_name").text.strip()
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
if description is None:
|
||||
description = job.find("p", class_="job_snippet").get_text().strip()
|
||||
|
||||
job_type_text = job.find("li", class_="perk_item perk_type")
|
||||
job_type = None
|
||||
if job_type_text:
|
||||
job_type_text = (
|
||||
job_type_text.get_text()
|
||||
.strip()
|
||||
.lower()
|
||||
.replace("-", "")
|
||||
.replace(" ", "")
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=ZipRecruiterScraper.get_location(job),
|
||||
job_type=job_type,
|
||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
|
||||
"""
|
||||
TODO this method isnt finished due to not encountering this type of html often
|
||||
least common type of jobs page on ZR (rarely found)
|
||||
Parses a job from the job content tag
|
||||
:param job: BeautifulSoup Tag for one job post
|
||||
:return JobPost
|
||||
"""
|
||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
||||
# job_url = self.cleanurl(job.find("a", {"class": "job_link"})["href"])
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
|
||||
title = job.find("h2", {"class": "title"}).text
|
||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
||||
|
||||
description, _ = self.get_description(job_url)
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
# get description from jobs listing page if get_description from the specific job page fails
|
||||
if description is None:
|
||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
||||
|
||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
||||
job_type = None
|
||||
if job_type_element:
|
||||
job_type_text = (
|
||||
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
||||
|
||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=ZipRecruiterScraper.get_location(job),
|
||||
job_type=job_type,
|
||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
emails=extract_emails_from_text(description),
|
||||
num_urgent_words=count_urgent_words(description),
|
||||
)
|
||||
return job_post
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
|
@ -300,39 +157,11 @@ class ZipRecruiterScraper(Scraper):
|
|||
return [job_type]
|
||||
return None
|
||||
|
||||
def get_description(self, job_page_url: str) -> Tuple[str | None, str | None]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None, response url
|
||||
"""
|
||||
try:
|
||||
session = create_session(self.proxy)
|
||||
response = session.get(
|
||||
job_page_url,
|
||||
headers=self.headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
return None, None
|
||||
except Exception as e:
|
||||
return None, None
|
||||
|
||||
html_string = response.content
|
||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
||||
|
||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
||||
if job_description_div:
|
||||
return job_description_div.text.strip(), response.url
|
||||
return None, response.url
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input, page) -> dict[str, str | Any]:
|
||||
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"page": page,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
job_type_value = None
|
||||
|
@ -465,11 +294,13 @@ class ZipRecruiterScraper(Scraper):
|
|||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
||||
'Host': 'api.ziprecruiter.com',
|
||||
'Cookie': 'ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38',
|
||||
'accept': '*/*',
|
||||
'x-zr-zva-override': '100000000;vid:ZT1huzm_EQlDTVEc',
|
||||
'x-pushnotificationid': '0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0',
|
||||
'x-deviceid': 'D77B3A92-E589-46A4-8A39-6EF6F1D86006',
|
||||
'user-agent': 'Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)',
|
||||
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
|
||||
'accept-language': 'en-US,en;q=0.9'
|
||||
}
|
||||
|
||||
# @staticmethod
|
||||
# def cleanurl(url) -> str:
|
||||
# parsed_url = urlparse(url)
|
||||
#
|
||||
# return urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))
|
||||
|
|
Loading…
Reference in New Issue