Proxy support (#44)

* add proxy support

* return as data frame
pull/51/head v1.1.3
Cullen Watson 2023-09-07 11:28:17 -05:00 committed by GitHub
parent a37e7f235e
commit 59f739018a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 366 additions and 319 deletions

View File

@ -33,15 +33,20 @@
"outputs": [],
"source": [
"# example 1 (no hyperlinks, USA)\n",
"result = scrape_jobs(\n",
"jobs = scrape_jobs(\n",
" site_name=[\"linkedin\", \"zip_recruiter\"],\n",
" location='san francisco',\n",
" search_term=\"engineer\",\n",
" results_wanted=5, \n",
" results_wanted=5,\n",
"\n",
" # use if you want to use a proxy\n",
" # proxy=\"socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
" # proxy=\"http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
" # proxy=\"https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
"\n",
")\n",
"\n",
"display(result.jobs)\n",
"display(result.errors)"
"display(jobs)"
]
},
{
@ -52,7 +57,7 @@
"outputs": [],
"source": [
"# example 2 - remote USA & hyperlinks\n",
"result = scrape_jobs(\n",
"jobs = scrape_jobs(\n",
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
" # location='san francisco',\n",
" search_term=\"software engineer\",\n",
@ -71,11 +76,10 @@
"outputs": [],
"source": [
"# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n",
"html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n",
"display(result.errors)"
"display(HTML(truncate_width))"
]
},
{
@ -86,13 +90,16 @@
"outputs": [],
"source": [
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
"result = scrape_jobs(\n",
"jobs = scrape_jobs(\n",
" site_name=[\"linkedin\"],\n",
" location='berlin',\n",
" search_term=\"engineer\",\n",
" hyperlinks=True,\n",
" results_wanted=5,\n",
" easy_apply=True\n",
"\n",
"\n",
"\n",
")"
]
},
@ -104,11 +111,10 @@
"outputs": [],
"source": [
"# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n",
"html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n",
"display(result.errors)"
"display(HTML(truncate_width))"
]
},
{
@ -136,11 +142,10 @@
"outputs": [],
"source": [
"# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n",
"html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n",
"display(result.errors)"
"display(HTML(truncate_width))"
]
}
],

View File

@ -26,13 +26,18 @@ pip install python-jobspy
from jobspy import scrape_jobs
import pandas as pd
result: pd.DataFrame = scrape_jobs(
jobs: pd.DataFrame = scrape_jobs(
site_name=["indeed", "linkedin", "zip_recruiter"],
search_term="software engineer",
location="Dallas, TX",
results_wanted=10,
country_indeed='USA' # only needed for indeed
# use if you want to use a proxy
# proxy="socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
# proxy="https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
)
pd.set_option('display.max_columns', None)
@ -41,12 +46,12 @@ pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
#1 output
print(result.jobs)
print(result.errors)
print(jobs)
print(errors)
#2 display in Jupyter Notebook
#display(result.jobs)
#display(result.errors)
#display(jobs)
#display(errors)
#3 output to .csv
#result.jobs.to_csv('result.jobs.csv', index=False)

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "python-jobspy"
version = "1.1.2"
version = "1.1.3"
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
readme = "README.md"

View File

@ -1,13 +1,19 @@
import pandas as pd
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from typing import List, Tuple, NamedTuple, Dict
from typing import List, Tuple, NamedTuple, Dict, Optional
import traceback
from .jobs import JobType, Location
from .scrapers.indeed import IndeedScraper
from .scrapers.ziprecruiter import ZipRecruiterScraper
from .scrapers.linkedin import LinkedInScraper
from .scrapers import ScraperInput, Site, JobResponse, Country
from .scrapers.exceptions import (
LinkedInException,
IndeedException,
ZipRecruiterException,
)
SCRAPER_MAPPING = {
Site.LINKEDIN: LinkedInScraper,
@ -16,11 +22,6 @@ SCRAPER_MAPPING = {
}
class ScrapeResults(NamedTuple):
jobs: pd.DataFrame
errors: pd.DataFrame
def _map_str_to_site(site_name: str) -> Site:
return Site[site_name.upper()]
@ -35,17 +36,21 @@ def scrape_jobs(
easy_apply: bool = False, # linkedin
results_wanted: int = 15,
country_indeed: str = "usa",
hyperlinks: bool = False
) -> ScrapeResults:
hyperlinks: bool = False,
proxy: Optional[str] = None,
) -> pd.DataFrame:
"""
Asynchronously scrapes job data from multiple job sites.
Simultaneously scrapes job data from multiple job sites.
:return: results_wanted: pandas dataframe containing job data
"""
if type(site_name) == str:
site_type = [_map_str_to_site(site_name)]
else: #: if type(site_name) == list
site_type = [_map_str_to_site(site) if type(site) == str else site_name for site in site_name]
site_type = [
_map_str_to_site(site) if type(site) == str else site_name
for site in site_name
]
country_enum = Country.from_string(country_indeed)
@ -62,99 +67,95 @@ def scrape_jobs(
)
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
try:
scraper_class = SCRAPER_MAPPING[site]
scraper = scraper_class()
scraper = scraper_class(proxy=proxy)
try:
scraped_data: JobResponse = scraper.scrape(scraper_input)
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
raise lie
except Exception as e:
scraped_data = JobResponse(jobs=[], error=str(e), success=False)
# unhandled exceptions
if site == Site.LINKEDIN:
raise LinkedInException()
if site == Site.INDEED:
raise IndeedException()
if site == Site.ZIP_RECRUITER:
raise ZipRecruiterException()
else:
raise e
return site.value, scraped_data
results, errors = {}, {}
site_to_jobs_dict = {}
def worker(site):
site_value, scraped_data = scrape_site(site)
return site_value, scraped_data
with ThreadPoolExecutor() as executor:
future_to_site = {executor.submit(worker, site): site for site in scraper_input.site_type}
future_to_site = {
executor.submit(worker, site): site for site in scraper_input.site_type
}
for future in concurrent.futures.as_completed(future_to_site):
site_value, scraped_data = future.result()
results[site_value] = scraped_data
if scraped_data.error:
errors[site_value] = scraped_data.error
site_to_jobs_dict[site_value] = scraped_data
dfs = []
jobs_dfs: List[pd.DataFrame] = []
for site, job_response in results.items():
for site, job_response in site_to_jobs_dict.items():
for job in job_response.jobs:
data = job.dict()
data["job_url_hyper"] = f'<a href="{data["job_url"]}">{data["job_url"]}</a>'
data["site"] = site
data["company"] = data["company_name"]
if data["job_type"]:
job_data = job.dict()
job_data[
"job_url_hyper"
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
job_data["site"] = site
job_data["company"] = job_data["company_name"]
if job_data["job_type"]:
# Take the first value from the job type tuple
data["job_type"] = data["job_type"].value[0]
job_data["job_type"] = job_data["job_type"].value[0]
else:
data["job_type"] = None
job_data["job_type"] = None
data["location"] = Location(**data["location"]).display_location()
job_data["location"] = Location(**job_data["location"]).display_location()
compensation_obj = data.get("compensation")
compensation_obj = job_data.get("compensation")
if compensation_obj and isinstance(compensation_obj, dict):
data["interval"] = (
job_data["interval"] = (
compensation_obj.get("interval").value
if compensation_obj.get("interval")
else None
)
data["min_amount"] = compensation_obj.get("min_amount")
data["max_amount"] = compensation_obj.get("max_amount")
data["currency"] = compensation_obj.get("currency", "USD")
job_data["min_amount"] = compensation_obj.get("min_amount")
job_data["max_amount"] = compensation_obj.get("max_amount")
job_data["currency"] = compensation_obj.get("currency", "USD")
else:
data["interval"] = None
data["min_amount"] = None
data["max_amount"] = None
data["currency"] = None
job_data["interval"] = None
job_data["min_amount"] = None
job_data["max_amount"] = None
job_data["currency"] = None
job_df = pd.DataFrame([data])
dfs.append(job_df)
job_df = pd.DataFrame([job_data])
jobs_dfs.append(job_df)
errors_list = [(key, value) for key, value in errors.items()]
errors_df = pd.DataFrame(errors_list, columns=["Site", "Error"])
if dfs:
df = pd.concat(dfs, ignore_index=True)
if hyperlinks:
desired_order = [
if jobs_dfs:
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
desired_order: List[str] = [
"site",
"title",
"company",
"location",
"date_posted",
"job_type",
"interval",
"min_amount",
"max_amount",
"currency",
"job_url_hyper",
"job_url_hyper" if hyperlinks else "job_url",
"description",
]
jobs_formatted_df = jobs_df[desired_order]
else:
desired_order = [
"site",
"title",
"company",
"location",
"job_type",
"interval",
"min_amount",
"max_amount",
"currency",
"job_url",
"description",
]
df = df[desired_order]
else:
df = pd.DataFrame()
jobs_formatted_df = pd.DataFrame()
return ScrapeResults(jobs=df, errors=errors_df)
return jobs_formatted_df

View File

@ -189,22 +189,4 @@ class JobPost(BaseModel):
class JobResponse(BaseModel):
success: bool
error: str = None
total_results: Optional[int] = None
jobs: list[JobPost] = []
returned_results: int = None
@validator("returned_results", pre=True, always=True)
def set_returned_results(cls, v, values):
jobs_list = values.get("jobs")
if v is None:
if jobs_list is not None:
return len(jobs_list)
else:
return 0
return v

View File

@ -2,11 +2,6 @@ from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
from typing import List, Optional, Any
class StatusException(Exception):
def __init__(self, status_code: int):
self.status_code = status_code
class Site(Enum):
LINKEDIN = "linkedin"
INDEED = "indeed"
@ -28,8 +23,9 @@ class ScraperInput(BaseModel):
class Scraper:
def __init__(self, site: Site):
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
...

View File

@ -0,0 +1,18 @@
"""
jobspy.scrapers.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Scrapers' exceptions.
"""
class LinkedInException(Exception):
"""Failed to scrape LinkedIn"""
class IndeedException(Exception):
"""Failed to scrape Indeed"""
class ZipRecruiterException(Exception):
"""Failed to scrape ZipRecruiter"""

View File

@ -1,3 +1,9 @@
"""
jobspy.scrapers.indeed
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape Indeed.
"""
import re
import math
import io
@ -12,6 +18,7 @@ from bs4 import BeautifulSoup
from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future
from ..exceptions import IndeedException
from ...jobs import (
JobPost,
Compensation,
@ -20,20 +27,16 @@ from ...jobs import (
JobResponse,
JobType,
)
from .. import Scraper, ScraperInput, Site, Country, StatusException
class ParsingException(Exception):
pass
from .. import Scraper, ScraperInput, Site, Country
class IndeedScraper(Scraper):
def __init__(self):
def __init__(self, proxy: Optional[str] = None):
"""
Initializes IndeedScraper with the Indeed job search url
"""
site = Site(Site.INDEED)
super().__init__(site)
super().__init__(site, proxy=proxy)
self.jobs_per_page = 15
self.seen_urls = set()
@ -52,7 +55,7 @@ class IndeedScraper(Scraper):
domain = self.country.domain_value
self.url = f"https://{domain}.indeed.com"
job_list = []
job_list: list[JobPost] = []
params = {
"q": scraper_input.search_term,
@ -71,15 +74,26 @@ class IndeedScraper(Scraper):
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
response = session.get(self.url + "/jobs", params=params, allow_redirects=True)
# print(response.status_code)
try:
response = session.get(
self.url + "/jobs",
params=params,
allow_redirects=True,
proxy=self.proxy,
timeout_seconds=10,
)
if response.status_code not in range(200, 400):
raise StatusException(response.status_code)
raise IndeedException(
f"bad response with status code: {response.status_code}"
)
except Exception as e:
if "Proxy responded with" in str(e):
raise IndeedException("bad proxy")
raise IndeedException(str(e))
soup = BeautifulSoup(response.content, "html.parser")
if "did not match any jobs" in response.text:
raise ParsingException("Search did not match any jobs")
raise IndeedException("Parsing exception: Search did not match any jobs")
jobs = IndeedScraper.parse_jobs(
soup
@ -91,7 +105,7 @@ class IndeedScraper(Scraper):
.get("mosaicProviderJobCardsModel", {})
.get("results")
):
raise Exception("No jobs found.")
raise IndeedException("No jobs found.")
def process_job(job) -> Optional[JobPost]:
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
@ -169,7 +183,6 @@ class IndeedScraper(Scraper):
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
)
try:
#: get first page to initialize session
job_list, total_results = self.scrape_page(scraper_input, 0, session)
@ -183,28 +196,11 @@ class IndeedScraper(Scraper):
jobs, _ = future.result()
job_list += jobs
except StatusException as e:
return JobResponse(
success=False,
error=f"Indeed returned status code {e.status_code}",
)
except ParsingException as e:
return JobResponse(
success=False,
error=f"Indeed failed to parse response: {e}",
)
except Exception as e:
return JobResponse(
success=False,
error=f"Indeed failed to scrape: {e}",
)
if len(job_list) > scraper_input.results_wanted:
job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse(
success=True,
jobs=job_list,
total_results=total_results,
)
@ -224,9 +220,9 @@ class IndeedScraper(Scraper):
try:
response = session.get(
formatted_url, allow_redirects=True, timeout_seconds=5
formatted_url, allow_redirects=True, timeout_seconds=5, proxy=self.proxy
)
except requests.exceptions.Timeout:
except Exception as e:
return None
if response.status_code not in range(200, 400):
@ -253,7 +249,6 @@ class IndeedScraper(Scraper):
label = taxonomy["attributes"][0].get("label")
if label:
job_type_str = label.replace("-", "").replace(" ", "").lower()
# print(f"Debug: job_type_str = {job_type_str}")
return IndeedScraper.get_enum_from_value(job_type_str)
return None
@ -299,9 +294,9 @@ class IndeedScraper(Scraper):
jobs = json.loads(m.group(1).strip())
return jobs
else:
raise ParsingException("Could not find mosaic provider job cards data")
raise IndeedException("Could not find mosaic provider job cards data")
else:
raise ParsingException(
raise IndeedException(
"Could not find a script tag containing mosaic provider data"
)

View File

@ -1,13 +1,20 @@
"""
jobspy.scrapers.linkedin
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape LinkedIn.
"""
from typing import Optional, Tuple
from datetime import datetime
import traceback
import requests
from requests.exceptions import Timeout
from requests.exceptions import Timeout, ProxyError
from bs4 import BeautifulSoup
from bs4.element import Tag
from .. import Scraper, ScraperInput, Site
from ..exceptions import LinkedInException
from ...jobs import (
JobPost,
Location,
@ -19,13 +26,13 @@ from ...jobs import (
class LinkedInScraper(Scraper):
def __init__(self):
def __init__(self, proxy: Optional[str] = None):
"""
Initializes LinkedInScraper with the LinkedIn job search url
"""
site = Site(Site.LINKEDIN)
self.url = "https://www.linkedin.com"
super().__init__(site)
super().__init__(site, proxy=proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
@ -64,18 +71,23 @@ class LinkedInScraper(Scraper):
}
params = {k: v for k, v in params.items() if v is not None}
try:
response = session.get(
f"{self.url}/jobs/search", params=params, allow_redirects=True
f"{self.url}/jobs/search",
params=params,
allow_redirects=True,
proxies=self.proxy,
timeout=10,
)
if response.status_code != 200:
reason = ' (too many requests)' if response.status_code == 429 else ''
return JobResponse(
success=False,
error=f"LinkedIn returned {response.status_code} {reason}",
jobs=job_list,
total_results=job_count,
response.raise_for_status()
except requests.HTTPError as e:
raise LinkedInException(
f"bad response status code: {response.status_code}"
)
except ProxyError as e:
raise LinkedInException("bad proxy")
except (ProxyError, Exception) as e:
raise LinkedInException(str(e))
soup = BeautifulSoup(response.text, "html.parser")
@ -115,7 +127,7 @@ class LinkedInScraper(Scraper):
datetime_tag = metadata_card.find(
"time", class_="job-search-card__listdate"
)
description, job_type = LinkedInScraper.get_description(job_url)
description, job_type = self.get_description(job_url)
if datetime_tag:
datetime_str = datetime_tag["datetime"]
try:
@ -150,26 +162,18 @@ class LinkedInScraper(Scraper):
page += 1
job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse(
success=True,
jobs=job_list,
total_results=job_count,
)
return job_response
return JobResponse(jobs=job_list)
@staticmethod
def get_description(job_page_url: str) -> Optional[str]:
def get_description(self, job_page_url: str) -> Optional[str]:
"""
Retrieves job description by going to the job page url
:param job_page_url:
:return: description or None
"""
try:
response = requests.get(job_page_url, timeout=5)
except Timeout:
return None, None
if response.status_code not in range(200, 400):
response = requests.get(job_page_url, timeout=5, proxies=self.proxy)
response.raise_for_status()
except Exception as e:
return None, None
soup = BeautifulSoup(response.text, "html.parser")

View File

@ -1,3 +1,9 @@
"""
jobspy.scrapers.ziprecruiter
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape ZipRecruiter.
"""
import math
import json
import re
@ -7,11 +13,13 @@ from typing import Optional, Tuple
from urllib.parse import urlparse, parse_qs
import tls_client
import requests
from bs4 import BeautifulSoup
from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future
from .. import Scraper, ScraperInput, Site, StatusException
from .. import Scraper, ScraperInput, Site
from ..exceptions import ZipRecruiterException
from ...jobs import (
JobPost,
Compensation,
@ -24,13 +32,13 @@ from ...jobs import (
class ZipRecruiterScraper(Scraper):
def __init__(self):
def __init__(self, proxy: Optional[str] = None):
"""
Initializes LinkedInScraper with the ZipRecruiter job search url
"""
site = Site(Site.ZIP_RECRUITER)
self.url = "https://www.ziprecruiter.com"
super().__init__(site)
super().__init__(site, proxy=proxy)
self.jobs_per_page = 20
self.seen_urls = set()
@ -38,7 +46,7 @@ class ZipRecruiterScraper(Scraper):
client_identifier="chrome112", random_tls_extension_order=True
)
def scrape_page(
def find_jobs_in_page(
self, scraper_input: ScraperInput, page: int
) -> tuple[list[JobPost], int | None]:
"""
@ -48,73 +56,62 @@ class ZipRecruiterScraper(Scraper):
:param session:
:return: jobs found on page, total number of jobs found for search
"""
job_list = []
job_type_value = None
if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime":
job_type_value = "full_time"
elif scraper_input.job_type.value == "parttime":
job_type_value = "part_time"
else:
job_type_value = scraper_input.job_type.value
params = {
"search": scraper_input.search_term,
"location": scraper_input.location,
"page": page,
"form": "jobs-landing",
}
if scraper_input.is_remote:
params["refine_by_location_type"] = "only_remote"
if scraper_input.distance:
params["radius"] = scraper_input.distance
if job_type_value:
params[
"refine_by_employment"
] = f"employment_type:employment_type:{job_type_value}"
job_list: list[JobPost] = []
try:
response = self.session.get(
self.url + "/jobs-search",
headers=ZipRecruiterScraper.headers(),
params=params,
params=ZipRecruiterScraper.add_params(scraper_input, page),
allow_redirects=True,
proxy=self.proxy,
timeout_seconds=10,
)
# print(response.status_code)
if response.status_code != 200:
raise StatusException(response.status_code)
html_string = response.text
soup = BeautifulSoup(html_string, "html.parser")
script_tag = soup.find("script", {"id": "js_variables"})
data = json.loads(script_tag.string)
if page == 1:
job_count = int(data["totalJobCount"].replace(",", ""))
raise ZipRecruiterException(
f"bad response status code: {response.status_code}"
)
except Exception as e:
if "Proxy responded with non 200 code" in str(e):
raise ZipRecruiterException("bad proxy")
raise ZipRecruiterException(str(e))
else:
job_count = None
soup = BeautifulSoup(response.text, "html.parser")
js_tag = soup.find("script", {"id": "js_variables"})
if js_tag:
page_json = json.loads(js_tag.string)
jobs_list = page_json.get("jobList")
if jobs_list:
page_variant = "javascript"
# print('type javascript', len(jobs_list))
else:
page_variant = "html_2"
jobs_list = soup.find_all("div", {"class": "job_content"})
# print('type 2 html', len(jobs_list))
else:
page_variant = "html_1"
jobs_list = soup.find_all("li", {"class": "job-listing"})
# print('type 1 html', len(jobs_list))
# with open("zip_method_8.html", "w") as f:
# f.write(soup.prettify())
with ThreadPoolExecutor(max_workers=10) as executor:
if "jobList" in data and data["jobList"]:
jobs_js = data["jobList"]
if page_variant == "javascript":
job_results = [
executor.submit(self.process_job_js, job) for job in jobs_js
executor.submit(self.process_job_javascript, job)
for job in jobs_list
]
else:
jobs_html = soup.find_all("div", {"class": "job_content"})
elif page_variant == "html_1":
job_results = [
executor.submit(self.process_job_html, job) for job in jobs_html
executor.submit(self.process_job_html_1, job) for job in jobs_list
]
elif page_variant == "html_2":
job_results = [
executor.submit(self.process_job_html_2, job) for job in jobs_list
]
job_list = [result.result() for result in job_results if result.result()]
return job_list, job_count
return job_list
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
@ -122,50 +119,27 @@ class ZipRecruiterScraper(Scraper):
:param scraper_input:
:return: job_response
"""
#: get first page to initialize session
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, 1)
pages_to_process = max(
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
)
try:
#: get first page to initialize session
job_list, total_results = self.scrape_page(scraper_input, 1)
with ThreadPoolExecutor(max_workers=10) as executor:
futures: list[Future] = [
executor.submit(self.scrape_page, scraper_input, page)
executor.submit(self.find_jobs_in_page, scraper_input, page)
for page in range(2, pages_to_process + 1)
]
for future in futures:
jobs, _ = future.result()
jobs = future.result()
job_list += jobs
except StatusException as e:
return JobResponse(
success=False,
error=f"ZipRecruiter returned status code {e.status_code}",
)
except Exception as e:
return JobResponse(
success=False,
error=f"ZipRecruiter failed to scrape: {e}",
)
#: note: this does not handle if the results are more or less than the results_wanted
if len(job_list) > scraper_input.results_wanted:
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list)
job_response = JobResponse(
success=True,
jobs=job_list,
total_results=total_results,
)
return job_response
def process_job_html(self, job: Tag) -> Optional[JobPost]:
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
"""
Parses a job from the job content tag
:param job: BeautifulSoup Tag for one job post
@ -179,8 +153,7 @@ class ZipRecruiterScraper(Scraper):
company = job.find("a", {"class": "company_name"}).text.strip()
description, updated_job_url = self.get_description(job_url)
if updated_job_url is not None:
job_url = updated_job_url
job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", {"class": "job_snippet"}).text.strip()
@ -188,7 +161,7 @@ class ZipRecruiterScraper(Scraper):
job_type = None
if job_type_element:
job_type_text = (
job_type_element.text.strip().lower().replace("-", "").replace(" ", "")
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
@ -206,8 +179,53 @@ class ZipRecruiterScraper(Scraper):
)
return job_post
def process_job_js(self, job: dict) -> JobPost:
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
"""
Parses a job from the job content tag for a second variat of HTML that ZR uses
:param job: BeautifulSoup Tag for one job post
:return JobPost
"""
job_url = job.find("a", class_="job_link")["href"]
title = job.find("h2", class_="title").text
company = job.find("a", class_="company_name").text.strip()
description, updated_job_url = self.get_description(job_url)
job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", class_="job_snippet").get_text().strip()
job_type_text = job.find("li", class_="perk_item perk_type")
job_type = None
if job_type_text:
job_type_text = (
job_type_text.get_text()
.strip()
.lower()
.replace("-", "")
.replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
description=description,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
)
return job_post
def process_job_javascript(self, job: dict) -> JobPost:
title = job.get("Title")
job_url = job.get("JobURL")
description, updated_job_url = self.get_description(job_url)
job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = BeautifulSoup(
job.get("Snippet", "").strip(), "html.parser"
).get_text()
@ -216,13 +234,9 @@ class ZipRecruiterScraper(Scraper):
location = Location(
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
)
try:
job_type = ZipRecruiterScraper.get_job_type_enum(
job.get("EmploymentType", "").replace("-", "_").lower()
job.get("EmploymentType", "").replace("-", "").lower()
)
except ValueError:
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
return None
formatted_salary = job.get("FormattedSalaryShort", "")
salary_parts = formatted_salary.split(" ")
@ -272,17 +286,11 @@ class ZipRecruiterScraper(Scraper):
)
return job_post
@staticmethod
def get_enum_from_value(value_str):
for job_type in JobType:
if value_str in job_type.value:
return job_type
return None
@staticmethod
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
for job_type in JobType:
if job_type_str in job_type.value:
a = True
return job_type
return None
@ -294,14 +302,17 @@ class ZipRecruiterScraper(Scraper):
:return: description or None, response url
"""
try:
response = self.session.get(
response = requests.get(
job_page_url,
headers=ZipRecruiterScraper.headers(),
allow_redirects=True,
timeout_seconds=5,
timeout=5,
proxies=self.proxy,
)
except requests.exceptions.Timeout:
return None
if response.status_code not in range(200, 400):
return None, None
except Exception as e:
return None, None
html_string = response.content
soup_job = BeautifulSoup(html_string, "html.parser")
@ -311,6 +322,36 @@ class ZipRecruiterScraper(Scraper):
return job_description_div.text.strip(), response.url
return None, response.url
@staticmethod
def add_params(scraper_input, page) -> Optional[str]:
params = {
"search": scraper_input.search_term,
"location": scraper_input.location,
"page": page,
"form": "jobs-landing",
}
job_type_value = None
if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime":
job_type_value = "full_time"
elif scraper_input.job_type.value == "parttime":
job_type_value = "part_time"
else:
job_type_value = scraper_input.job_type.value
if job_type_value:
params[
"refine_by_employment"
] = f"employment_type:employment_type:{job_type_value}"
if scraper_input.is_remote:
params["refine_by_location_type"] = "only_remote"
if scraper_input.distance:
params["radius"] = scraper_input.distance
return params
@staticmethod
def get_interval(interval_str: str):
"""