Proxy support (#44)

* add proxy support

* return as data frame
pull/51/head v1.1.3
Cullen Watson 2023-09-07 11:28:17 -05:00 committed by GitHub
parent a37e7f235e
commit 59f739018a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 366 additions and 319 deletions

View File

@ -33,15 +33,20 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# example 1 (no hyperlinks, USA)\n", "# example 1 (no hyperlinks, USA)\n",
"result = scrape_jobs(\n", "jobs = scrape_jobs(\n",
" site_name=[\"linkedin\", \"zip_recruiter\"],\n", " site_name=[\"linkedin\", \"zip_recruiter\"],\n",
" location='san francisco',\n", " location='san francisco',\n",
" search_term=\"engineer\",\n", " search_term=\"engineer\",\n",
" results_wanted=5, \n", " results_wanted=5,\n",
"\n",
" # use if you want to use a proxy\n",
" # proxy=\"socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
" # proxy=\"http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
" # proxy=\"https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
"\n",
")\n", ")\n",
"\n", "\n",
"display(result.jobs)\n", "display(jobs)"
"display(result.errors)"
] ]
}, },
{ {
@ -52,7 +57,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# example 2 - remote USA & hyperlinks\n", "# example 2 - remote USA & hyperlinks\n",
"result = scrape_jobs(\n", "jobs = scrape_jobs(\n",
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n", " site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
" # location='san francisco',\n", " # location='san francisco',\n",
" search_term=\"software engineer\",\n", " search_term=\"software engineer\",\n",
@ -71,11 +76,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# use if hyperlinks=True\n", "# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n", "html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n", "# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n", "truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n", "display(HTML(truncate_width))"
"display(result.errors)"
] ]
}, },
{ {
@ -86,13 +90,16 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n", "# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
"result = scrape_jobs(\n", "jobs = scrape_jobs(\n",
" site_name=[\"linkedin\"],\n", " site_name=[\"linkedin\"],\n",
" location='berlin',\n", " location='berlin',\n",
" search_term=\"engineer\",\n", " search_term=\"engineer\",\n",
" hyperlinks=True,\n", " hyperlinks=True,\n",
" results_wanted=5,\n", " results_wanted=5,\n",
" easy_apply=True\n", " easy_apply=True\n",
"\n",
"\n",
"\n",
")" ")"
] ]
}, },
@ -104,11 +111,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# use if hyperlinks=True\n", "# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n", "html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n", "# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n", "truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n", "display(HTML(truncate_width))"
"display(result.errors)"
] ]
}, },
{ {
@ -136,11 +142,10 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# use if hyperlinks=True\n", "# use if hyperlinks=True\n",
"html = result.jobs.to_html(escape=False)\n", "html = jobs.to_html(escape=False)\n",
"# change max-width: 200px to show more or less of the content\n", "# change max-width: 200px to show more or less of the content\n",
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n", "truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
"display(HTML(truncate_width))\n", "display(HTML(truncate_width))"
"display(result.errors)"
] ]
} }
], ],

View File

@ -26,13 +26,18 @@ pip install python-jobspy
from jobspy import scrape_jobs from jobspy import scrape_jobs
import pandas as pd import pandas as pd
result: pd.DataFrame = scrape_jobs( jobs: pd.DataFrame = scrape_jobs(
site_name=["indeed", "linkedin", "zip_recruiter"], site_name=["indeed", "linkedin", "zip_recruiter"],
search_term="software engineer", search_term="software engineer",
location="Dallas, TX", location="Dallas, TX",
results_wanted=10, results_wanted=10,
country_indeed='USA' # only needed for indeed country_indeed='USA' # only needed for indeed
# use if you want to use a proxy
# proxy="socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
# proxy="https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
) )
pd.set_option('display.max_columns', None) pd.set_option('display.max_columns', None)
@ -41,12 +46,12 @@ pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
#1 output #1 output
print(result.jobs) print(jobs)
print(result.errors) print(errors)
#2 display in Jupyter Notebook #2 display in Jupyter Notebook
#display(result.jobs) #display(jobs)
#display(result.errors) #display(errors)
#3 output to .csv #3 output to .csv
#result.jobs.to_csv('result.jobs.csv', index=False) #result.jobs.to_csv('result.jobs.csv', index=False)

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.2" version = "1.1.3"
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"] authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
readme = "README.md" readme = "README.md"

View File

@ -1,13 +1,19 @@
import pandas as pd import pandas as pd
import concurrent.futures import concurrent.futures
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from typing import List, Tuple, NamedTuple, Dict from typing import List, Tuple, NamedTuple, Dict, Optional
import traceback
from .jobs import JobType, Location from .jobs import JobType, Location
from .scrapers.indeed import IndeedScraper from .scrapers.indeed import IndeedScraper
from .scrapers.ziprecruiter import ZipRecruiterScraper from .scrapers.ziprecruiter import ZipRecruiterScraper
from .scrapers.linkedin import LinkedInScraper from .scrapers.linkedin import LinkedInScraper
from .scrapers import ScraperInput, Site, JobResponse, Country from .scrapers import ScraperInput, Site, JobResponse, Country
from .scrapers.exceptions import (
LinkedInException,
IndeedException,
ZipRecruiterException,
)
SCRAPER_MAPPING = { SCRAPER_MAPPING = {
Site.LINKEDIN: LinkedInScraper, Site.LINKEDIN: LinkedInScraper,
@ -16,11 +22,6 @@ SCRAPER_MAPPING = {
} }
class ScrapeResults(NamedTuple):
jobs: pd.DataFrame
errors: pd.DataFrame
def _map_str_to_site(site_name: str) -> Site: def _map_str_to_site(site_name: str) -> Site:
return Site[site_name.upper()] return Site[site_name.upper()]
@ -35,17 +36,21 @@ def scrape_jobs(
easy_apply: bool = False, # linkedin easy_apply: bool = False, # linkedin
results_wanted: int = 15, results_wanted: int = 15,
country_indeed: str = "usa", country_indeed: str = "usa",
hyperlinks: bool = False hyperlinks: bool = False,
) -> ScrapeResults: proxy: Optional[str] = None,
) -> pd.DataFrame:
""" """
Asynchronously scrapes job data from multiple job sites. Simultaneously scrapes job data from multiple job sites.
:return: results_wanted: pandas dataframe containing job data :return: results_wanted: pandas dataframe containing job data
""" """
if type(site_name) == str: if type(site_name) == str:
site_type = [_map_str_to_site(site_name)] site_type = [_map_str_to_site(site_name)]
else: #: if type(site_name) == list else: #: if type(site_name) == list
site_type = [_map_str_to_site(site) if type(site) == str else site_name for site in site_name] site_type = [
_map_str_to_site(site) if type(site) == str else site_name
for site in site_name
]
country_enum = Country.from_string(country_indeed) country_enum = Country.from_string(country_indeed)
@ -62,99 +67,95 @@ def scrape_jobs(
) )
def scrape_site(site: Site) -> Tuple[str, JobResponse]: def scrape_site(site: Site) -> Tuple[str, JobResponse]:
scraper_class = SCRAPER_MAPPING[site]
scraper = scraper_class(proxy=proxy)
try: try:
scraper_class = SCRAPER_MAPPING[site]
scraper = scraper_class()
scraped_data: JobResponse = scraper.scrape(scraper_input) scraped_data: JobResponse = scraper.scrape(scraper_input)
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
raise lie
except Exception as e: except Exception as e:
scraped_data = JobResponse(jobs=[], error=str(e), success=False) # unhandled exceptions
if site == Site.LINKEDIN:
raise LinkedInException()
if site == Site.INDEED:
raise IndeedException()
if site == Site.ZIP_RECRUITER:
raise ZipRecruiterException()
else:
raise e
return site.value, scraped_data return site.value, scraped_data
results, errors = {}, {} site_to_jobs_dict = {}
def worker(site): def worker(site):
site_value, scraped_data = scrape_site(site) site_value, scraped_data = scrape_site(site)
return site_value, scraped_data return site_value, scraped_data
with ThreadPoolExecutor() as executor: with ThreadPoolExecutor() as executor:
future_to_site = {executor.submit(worker, site): site for site in scraper_input.site_type} future_to_site = {
executor.submit(worker, site): site for site in scraper_input.site_type
}
for future in concurrent.futures.as_completed(future_to_site): for future in concurrent.futures.as_completed(future_to_site):
site_value, scraped_data = future.result() site_value, scraped_data = future.result()
results[site_value] = scraped_data site_to_jobs_dict[site_value] = scraped_data
if scraped_data.error:
errors[site_value] = scraped_data.error
dfs = [] jobs_dfs: List[pd.DataFrame] = []
for site, job_response in results.items(): for site, job_response in site_to_jobs_dict.items():
for job in job_response.jobs: for job in job_response.jobs:
data = job.dict() job_data = job.dict()
data["job_url_hyper"] = f'<a href="{data["job_url"]}">{data["job_url"]}</a>' job_data[
data["site"] = site "job_url_hyper"
data["company"] = data["company_name"] ] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
if data["job_type"]: job_data["site"] = site
job_data["company"] = job_data["company_name"]
if job_data["job_type"]:
# Take the first value from the job type tuple # Take the first value from the job type tuple
data["job_type"] = data["job_type"].value[0] job_data["job_type"] = job_data["job_type"].value[0]
else: else:
data["job_type"] = None job_data["job_type"] = None
data["location"] = Location(**data["location"]).display_location() job_data["location"] = Location(**job_data["location"]).display_location()
compensation_obj = data.get("compensation") compensation_obj = job_data.get("compensation")
if compensation_obj and isinstance(compensation_obj, dict): if compensation_obj and isinstance(compensation_obj, dict):
data["interval"] = ( job_data["interval"] = (
compensation_obj.get("interval").value compensation_obj.get("interval").value
if compensation_obj.get("interval") if compensation_obj.get("interval")
else None else None
) )
data["min_amount"] = compensation_obj.get("min_amount") job_data["min_amount"] = compensation_obj.get("min_amount")
data["max_amount"] = compensation_obj.get("max_amount") job_data["max_amount"] = compensation_obj.get("max_amount")
data["currency"] = compensation_obj.get("currency", "USD") job_data["currency"] = compensation_obj.get("currency", "USD")
else: else:
data["interval"] = None job_data["interval"] = None
data["min_amount"] = None job_data["min_amount"] = None
data["max_amount"] = None job_data["max_amount"] = None
data["currency"] = None job_data["currency"] = None
job_df = pd.DataFrame([data]) job_df = pd.DataFrame([job_data])
dfs.append(job_df) jobs_dfs.append(job_df)
errors_list = [(key, value) for key, value in errors.items()] if jobs_dfs:
errors_df = pd.DataFrame(errors_list, columns=["Site", "Error"]) jobs_df = pd.concat(jobs_dfs, ignore_index=True)
desired_order: List[str] = [
if dfs: "site",
df = pd.concat(dfs, ignore_index=True) "title",
if hyperlinks: "company",
desired_order = [ "location",
"site", "date_posted",
"title", "job_type",
"company", "interval",
"location", "min_amount",
"job_type", "max_amount",
"interval", "currency",
"min_amount", "job_url_hyper" if hyperlinks else "job_url",
"max_amount", "description",
"currency", ]
"job_url_hyper", jobs_formatted_df = jobs_df[desired_order]
"description",
]
else:
desired_order = [
"site",
"title",
"company",
"location",
"job_type",
"interval",
"min_amount",
"max_amount",
"currency",
"job_url",
"description",
]
df = df[desired_order]
else: else:
df = pd.DataFrame() jobs_formatted_df = pd.DataFrame()
return ScrapeResults(jobs=df, errors=errors_df) return jobs_formatted_df

View File

@ -189,22 +189,4 @@ class JobPost(BaseModel):
class JobResponse(BaseModel): class JobResponse(BaseModel):
success: bool
error: str = None
total_results: Optional[int] = None
jobs: list[JobPost] = [] jobs: list[JobPost] = []
returned_results: int = None
@validator("returned_results", pre=True, always=True)
def set_returned_results(cls, v, values):
jobs_list = values.get("jobs")
if v is None:
if jobs_list is not None:
return len(jobs_list)
else:
return 0
return v

View File

@ -2,11 +2,6 @@ from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
from typing import List, Optional, Any from typing import List, Optional, Any
class StatusException(Exception):
def __init__(self, status_code: int):
self.status_code = status_code
class Site(Enum): class Site(Enum):
LINKEDIN = "linkedin" LINKEDIN = "linkedin"
INDEED = "indeed" INDEED = "indeed"
@ -28,8 +23,9 @@ class ScraperInput(BaseModel):
class Scraper: class Scraper:
def __init__(self, site: Site): def __init__(self, site: Site, proxy: Optional[List[str]] = None):
self.site = site self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
... ...

View File

@ -0,0 +1,18 @@
"""
jobspy.scrapers.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Scrapers' exceptions.
"""
class LinkedInException(Exception):
"""Failed to scrape LinkedIn"""
class IndeedException(Exception):
"""Failed to scrape Indeed"""
class ZipRecruiterException(Exception):
"""Failed to scrape ZipRecruiter"""

View File

@ -1,3 +1,9 @@
"""
jobspy.scrapers.indeed
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape Indeed.
"""
import re import re
import math import math
import io import io
@ -12,6 +18,7 @@ from bs4 import BeautifulSoup
from bs4.element import Tag from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future from concurrent.futures import ThreadPoolExecutor, Future
from ..exceptions import IndeedException
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
Compensation, Compensation,
@ -20,20 +27,16 @@ from ...jobs import (
JobResponse, JobResponse,
JobType, JobType,
) )
from .. import Scraper, ScraperInput, Site, Country, StatusException from .. import Scraper, ScraperInput, Site, Country
class ParsingException(Exception):
pass
class IndeedScraper(Scraper): class IndeedScraper(Scraper):
def __init__(self): def __init__(self, proxy: Optional[str] = None):
""" """
Initializes IndeedScraper with the Indeed job search url Initializes IndeedScraper with the Indeed job search url
""" """
site = Site(Site.INDEED) site = Site(Site.INDEED)
super().__init__(site) super().__init__(site, proxy=proxy)
self.jobs_per_page = 15 self.jobs_per_page = 15
self.seen_urls = set() self.seen_urls = set()
@ -52,7 +55,7 @@ class IndeedScraper(Scraper):
domain = self.country.domain_value domain = self.country.domain_value
self.url = f"https://{domain}.indeed.com" self.url = f"https://{domain}.indeed.com"
job_list = [] job_list: list[JobPost] = []
params = { params = {
"q": scraper_input.search_term, "q": scraper_input.search_term,
@ -71,15 +74,26 @@ class IndeedScraper(Scraper):
if sc_values: if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";" params["sc"] = "0kf:" + "".join(sc_values) + ";"
response = session.get(self.url + "/jobs", params=params, allow_redirects=True) try:
# print(response.status_code) response = session.get(
self.url + "/jobs",
if response.status_code not in range(200, 400): params=params,
raise StatusException(response.status_code) allow_redirects=True,
proxy=self.proxy,
timeout_seconds=10,
)
if response.status_code not in range(200, 400):
raise IndeedException(
f"bad response with status code: {response.status_code}"
)
except Exception as e:
if "Proxy responded with" in str(e):
raise IndeedException("bad proxy")
raise IndeedException(str(e))
soup = BeautifulSoup(response.content, "html.parser") soup = BeautifulSoup(response.content, "html.parser")
if "did not match any jobs" in response.text: if "did not match any jobs" in response.text:
raise ParsingException("Search did not match any jobs") raise IndeedException("Parsing exception: Search did not match any jobs")
jobs = IndeedScraper.parse_jobs( jobs = IndeedScraper.parse_jobs(
soup soup
@ -91,7 +105,7 @@ class IndeedScraper(Scraper):
.get("mosaicProviderJobCardsModel", {}) .get("mosaicProviderJobCardsModel", {})
.get("results") .get("results")
): ):
raise Exception("No jobs found.") raise IndeedException("No jobs found.")
def process_job(job) -> Optional[JobPost]: def process_job(job) -> Optional[JobPost]:
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}' job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
@ -169,42 +183,24 @@ class IndeedScraper(Scraper):
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1 math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
) )
try: #: get first page to initialize session
#: get first page to initialize session job_list, total_results = self.scrape_page(scraper_input, 0, session)
job_list, total_results = self.scrape_page(scraper_input, 0, session)
with ThreadPoolExecutor(max_workers=1) as executor: with ThreadPoolExecutor(max_workers=1) as executor:
futures: list[Future] = [ futures: list[Future] = [
executor.submit(self.scrape_page, scraper_input, page, session) executor.submit(self.scrape_page, scraper_input, page, session)
for page in range(1, pages_to_process + 1) for page in range(1, pages_to_process + 1)
] ]
for future in futures: for future in futures:
jobs, _ = future.result() jobs, _ = future.result()
job_list += jobs job_list += jobs
except StatusException as e:
return JobResponse(
success=False,
error=f"Indeed returned status code {e.status_code}",
)
except ParsingException as e:
return JobResponse(
success=False,
error=f"Indeed failed to parse response: {e}",
)
except Exception as e:
return JobResponse(
success=False,
error=f"Indeed failed to scrape: {e}",
)
if len(job_list) > scraper_input.results_wanted: if len(job_list) > scraper_input.results_wanted:
job_list = job_list[: scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse( job_response = JobResponse(
success=True,
jobs=job_list, jobs=job_list,
total_results=total_results, total_results=total_results,
) )
@ -224,9 +220,9 @@ class IndeedScraper(Scraper):
try: try:
response = session.get( response = session.get(
formatted_url, allow_redirects=True, timeout_seconds=5 formatted_url, allow_redirects=True, timeout_seconds=5, proxy=self.proxy
) )
except requests.exceptions.Timeout: except Exception as e:
return None return None
if response.status_code not in range(200, 400): if response.status_code not in range(200, 400):
@ -253,7 +249,6 @@ class IndeedScraper(Scraper):
label = taxonomy["attributes"][0].get("label") label = taxonomy["attributes"][0].get("label")
if label: if label:
job_type_str = label.replace("-", "").replace(" ", "").lower() job_type_str = label.replace("-", "").replace(" ", "").lower()
# print(f"Debug: job_type_str = {job_type_str}")
return IndeedScraper.get_enum_from_value(job_type_str) return IndeedScraper.get_enum_from_value(job_type_str)
return None return None
@ -299,9 +294,9 @@ class IndeedScraper(Scraper):
jobs = json.loads(m.group(1).strip()) jobs = json.loads(m.group(1).strip())
return jobs return jobs
else: else:
raise ParsingException("Could not find mosaic provider job cards data") raise IndeedException("Could not find mosaic provider job cards data")
else: else:
raise ParsingException( raise IndeedException(
"Could not find a script tag containing mosaic provider data" "Could not find a script tag containing mosaic provider data"
) )

View File

@ -1,13 +1,20 @@
"""
jobspy.scrapers.linkedin
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape LinkedIn.
"""
from typing import Optional, Tuple from typing import Optional, Tuple
from datetime import datetime from datetime import datetime
import traceback import traceback
import requests import requests
from requests.exceptions import Timeout from requests.exceptions import Timeout, ProxyError
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from bs4.element import Tag from bs4.element import Tag
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..exceptions import LinkedInException
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
Location, Location,
@ -19,13 +26,13 @@ from ...jobs import (
class LinkedInScraper(Scraper): class LinkedInScraper(Scraper):
def __init__(self): def __init__(self, proxy: Optional[str] = None):
""" """
Initializes LinkedInScraper with the LinkedIn job search url Initializes LinkedInScraper with the LinkedIn job search url
""" """
site = Site(Site.LINKEDIN) site = Site(Site.LINKEDIN)
self.url = "https://www.linkedin.com" self.url = "https://www.linkedin.com"
super().__init__(site) super().__init__(site, proxy=proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
""" """
@ -64,18 +71,23 @@ class LinkedInScraper(Scraper):
} }
params = {k: v for k, v in params.items() if v is not None} params = {k: v for k, v in params.items() if v is not None}
response = session.get( try:
f"{self.url}/jobs/search", params=params, allow_redirects=True response = session.get(
) f"{self.url}/jobs/search",
params=params,
if response.status_code != 200: allow_redirects=True,
reason = ' (too many requests)' if response.status_code == 429 else '' proxies=self.proxy,
return JobResponse( timeout=10,
success=False,
error=f"LinkedIn returned {response.status_code} {reason}",
jobs=job_list,
total_results=job_count,
) )
response.raise_for_status()
except requests.HTTPError as e:
raise LinkedInException(
f"bad response status code: {response.status_code}"
)
except ProxyError as e:
raise LinkedInException("bad proxy")
except (ProxyError, Exception) as e:
raise LinkedInException(str(e))
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
@ -115,7 +127,7 @@ class LinkedInScraper(Scraper):
datetime_tag = metadata_card.find( datetime_tag = metadata_card.find(
"time", class_="job-search-card__listdate" "time", class_="job-search-card__listdate"
) )
description, job_type = LinkedInScraper.get_description(job_url) description, job_type = self.get_description(job_url)
if datetime_tag: if datetime_tag:
datetime_str = datetime_tag["datetime"] datetime_str = datetime_tag["datetime"]
try: try:
@ -150,26 +162,18 @@ class LinkedInScraper(Scraper):
page += 1 page += 1
job_list = job_list[: scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse( return JobResponse(jobs=job_list)
success=True,
jobs=job_list,
total_results=job_count,
)
return job_response
@staticmethod def get_description(self, job_page_url: str) -> Optional[str]:
def get_description(job_page_url: str) -> Optional[str]:
""" """
Retrieves job description by going to the job page url Retrieves job description by going to the job page url
:param job_page_url: :param job_page_url:
:return: description or None :return: description or None
""" """
try: try:
response = requests.get(job_page_url, timeout=5) response = requests.get(job_page_url, timeout=5, proxies=self.proxy)
except Timeout: response.raise_for_status()
return None, None except Exception as e:
if response.status_code not in range(200, 400):
return None, None return None, None
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")

View File

@ -1,3 +1,9 @@
"""
jobspy.scrapers.ziprecruiter
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape ZipRecruiter.
"""
import math import math
import json import json
import re import re
@ -7,11 +13,13 @@ from typing import Optional, Tuple
from urllib.parse import urlparse, parse_qs from urllib.parse import urlparse, parse_qs
import tls_client import tls_client
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from bs4.element import Tag from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future from concurrent.futures import ThreadPoolExecutor, Future
from .. import Scraper, ScraperInput, Site, StatusException from .. import Scraper, ScraperInput, Site
from ..exceptions import ZipRecruiterException
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
Compensation, Compensation,
@ -24,13 +32,13 @@ from ...jobs import (
class ZipRecruiterScraper(Scraper): class ZipRecruiterScraper(Scraper):
def __init__(self): def __init__(self, proxy: Optional[str] = None):
""" """
Initializes LinkedInScraper with the ZipRecruiter job search url Initializes LinkedInScraper with the ZipRecruiter job search url
""" """
site = Site(Site.ZIP_RECRUITER) site = Site(Site.ZIP_RECRUITER)
self.url = "https://www.ziprecruiter.com" self.url = "https://www.ziprecruiter.com"
super().__init__(site) super().__init__(site, proxy=proxy)
self.jobs_per_page = 20 self.jobs_per_page = 20
self.seen_urls = set() self.seen_urls = set()
@ -38,7 +46,7 @@ class ZipRecruiterScraper(Scraper):
client_identifier="chrome112", random_tls_extension_order=True client_identifier="chrome112", random_tls_extension_order=True
) )
def scrape_page( def find_jobs_in_page(
self, scraper_input: ScraperInput, page: int self, scraper_input: ScraperInput, page: int
) -> tuple[list[JobPost], int | None]: ) -> tuple[list[JobPost], int | None]:
""" """
@ -48,73 +56,62 @@ class ZipRecruiterScraper(Scraper):
:param session: :param session:
:return: jobs found on page, total number of jobs found for search :return: jobs found on page, total number of jobs found for search
""" """
job_list: list[JobPost] = []
job_list = [] try:
response = self.session.get(
job_type_value = None self.url + "/jobs-search",
if scraper_input.job_type: headers=ZipRecruiterScraper.headers(),
if scraper_input.job_type.value == "fulltime": params=ZipRecruiterScraper.add_params(scraper_input, page),
job_type_value = "full_time" allow_redirects=True,
elif scraper_input.job_type.value == "parttime": proxy=self.proxy,
job_type_value = "part_time" timeout_seconds=10,
else: )
job_type_value = scraper_input.job_type.value if response.status_code != 200:
raise ZipRecruiterException(
params = { f"bad response status code: {response.status_code}"
"search": scraper_input.search_term, )
"location": scraper_input.location, except Exception as e:
"page": page, if "Proxy responded with non 200 code" in str(e):
"form": "jobs-landing", raise ZipRecruiterException("bad proxy")
} raise ZipRecruiterException(str(e))
if scraper_input.is_remote:
params["refine_by_location_type"] = "only_remote"
if scraper_input.distance:
params["radius"] = scraper_input.distance
if job_type_value:
params[
"refine_by_employment"
] = f"employment_type:employment_type:{job_type_value}"
response = self.session.get(
self.url + "/jobs-search",
headers=ZipRecruiterScraper.headers(),
params=params,
allow_redirects=True,
)
# print(response.status_code)
if response.status_code != 200:
raise StatusException(response.status_code)
html_string = response.text
soup = BeautifulSoup(html_string, "html.parser")
script_tag = soup.find("script", {"id": "js_variables"})
data = json.loads(script_tag.string)
if page == 1:
job_count = int(data["totalJobCount"].replace(",", ""))
else: else:
job_count = None soup = BeautifulSoup(response.text, "html.parser")
js_tag = soup.find("script", {"id": "js_variables"})
if js_tag:
page_json = json.loads(js_tag.string)
jobs_list = page_json.get("jobList")
if jobs_list:
page_variant = "javascript"
# print('type javascript', len(jobs_list))
else:
page_variant = "html_2"
jobs_list = soup.find_all("div", {"class": "job_content"})
# print('type 2 html', len(jobs_list))
else:
page_variant = "html_1"
jobs_list = soup.find_all("li", {"class": "job-listing"})
# print('type 1 html', len(jobs_list))
# with open("zip_method_8.html", "w") as f:
# f.write(soup.prettify())
with ThreadPoolExecutor(max_workers=10) as executor: with ThreadPoolExecutor(max_workers=10) as executor:
if "jobList" in data and data["jobList"]: if page_variant == "javascript":
jobs_js = data["jobList"]
job_results = [ job_results = [
executor.submit(self.process_job_js, job) for job in jobs_js executor.submit(self.process_job_javascript, job)
for job in jobs_list
] ]
else: elif page_variant == "html_1":
jobs_html = soup.find_all("div", {"class": "job_content"})
job_results = [ job_results = [
executor.submit(self.process_job_html, job) for job in jobs_html executor.submit(self.process_job_html_1, job) for job in jobs_list
]
elif page_variant == "html_2":
job_results = [
executor.submit(self.process_job_html_2, job) for job in jobs_list
] ]
job_list = [result.result() for result in job_results if result.result()] job_list = [result.result() for result in job_results if result.result()]
return job_list
return job_list, job_count
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
""" """
@ -122,50 +119,27 @@ class ZipRecruiterScraper(Scraper):
:param scraper_input: :param scraper_input:
:return: job_response :return: job_response
""" """
#: get first page to initialize session
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, 1)
pages_to_process = max( pages_to_process = max(
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page) 3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
) )
try: with ThreadPoolExecutor(max_workers=10) as executor:
#: get first page to initialize session futures: list[Future] = [
job_list, total_results = self.scrape_page(scraper_input, 1) executor.submit(self.find_jobs_in_page, scraper_input, page)
for page in range(2, pages_to_process + 1)
]
with ThreadPoolExecutor(max_workers=10) as executor: for future in futures:
futures: list[Future] = [ jobs = future.result()
executor.submit(self.scrape_page, scraper_input, page)
for page in range(2, pages_to_process + 1)
]
for future in futures: job_list += jobs
jobs, _ = future.result()
job_list += jobs job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list)
except StatusException as e: def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
return JobResponse(
success=False,
error=f"ZipRecruiter returned status code {e.status_code}",
)
except Exception as e:
return JobResponse(
success=False,
error=f"ZipRecruiter failed to scrape: {e}",
)
#: note: this does not handle if the results are more or less than the results_wanted
if len(job_list) > scraper_input.results_wanted:
job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse(
success=True,
jobs=job_list,
total_results=total_results,
)
return job_response
def process_job_html(self, job: Tag) -> Optional[JobPost]:
""" """
Parses a job from the job content tag Parses a job from the job content tag
:param job: BeautifulSoup Tag for one job post :param job: BeautifulSoup Tag for one job post
@ -179,8 +153,7 @@ class ZipRecruiterScraper(Scraper):
company = job.find("a", {"class": "company_name"}).text.strip() company = job.find("a", {"class": "company_name"}).text.strip()
description, updated_job_url = self.get_description(job_url) description, updated_job_url = self.get_description(job_url)
if updated_job_url is not None: job_url = updated_job_url if updated_job_url else job_url
job_url = updated_job_url
if description is None: if description is None:
description = job.find("p", {"class": "job_snippet"}).text.strip() description = job.find("p", {"class": "job_snippet"}).text.strip()
@ -188,7 +161,7 @@ class ZipRecruiterScraper(Scraper):
job_type = None job_type = None
if job_type_element: if job_type_element:
job_type_text = ( job_type_text = (
job_type_element.text.strip().lower().replace("-", "").replace(" ", "") job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
) )
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text) job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
@ -206,23 +179,64 @@ class ZipRecruiterScraper(Scraper):
) )
return job_post return job_post
def process_job_js(self, job: dict) -> JobPost: def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
"""
Parses a job from the job content tag for a second variat of HTML that ZR uses
:param job: BeautifulSoup Tag for one job post
:return JobPost
"""
job_url = job.find("a", class_="job_link")["href"]
title = job.find("h2", class_="title").text
company = job.find("a", class_="company_name").text.strip()
description, updated_job_url = self.get_description(job_url)
job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", class_="job_snippet").get_text().strip()
job_type_text = job.find("li", class_="perk_item perk_type")
job_type = None
if job_type_text:
job_type_text = (
job_type_text.get_text()
.strip()
.lower()
.replace("-", "")
.replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
description=description,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
)
return job_post
def process_job_javascript(self, job: dict) -> JobPost:
title = job.get("Title") title = job.get("Title")
description = BeautifulSoup( job_url = job.get("JobURL")
job.get("Snippet", "").strip(), "html.parser"
).get_text() description, updated_job_url = self.get_description(job_url)
job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = BeautifulSoup(
job.get("Snippet", "").strip(), "html.parser"
).get_text()
company = job.get("OrgName") company = job.get("OrgName")
location = Location( location = Location(
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
) )
try: job_type = ZipRecruiterScraper.get_job_type_enum(
job_type = ZipRecruiterScraper.get_job_type_enum( job.get("EmploymentType", "").replace("-", "").lower()
job.get("EmploymentType", "").replace("-", "_").lower() )
)
except ValueError:
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
return None
formatted_salary = job.get("FormattedSalaryShort", "") formatted_salary = job.get("FormattedSalaryShort", "")
salary_parts = formatted_salary.split(" ") salary_parts = formatted_salary.split(" ")
@ -272,17 +286,11 @@ class ZipRecruiterScraper(Scraper):
) )
return job_post return job_post
@staticmethod
def get_enum_from_value(value_str):
for job_type in JobType:
if value_str in job_type.value:
return job_type
return None
@staticmethod @staticmethod
def get_job_type_enum(job_type_str: str) -> Optional[JobType]: def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
for job_type in JobType: for job_type in JobType:
if job_type_str in job_type.value: if job_type_str in job_type.value:
a = True
return job_type return job_type
return None return None
@ -294,14 +302,17 @@ class ZipRecruiterScraper(Scraper):
:return: description or None, response url :return: description or None, response url
""" """
try: try:
response = self.session.get( response = requests.get(
job_page_url, job_page_url,
headers=ZipRecruiterScraper.headers(), headers=ZipRecruiterScraper.headers(),
allow_redirects=True, allow_redirects=True,
timeout_seconds=5, timeout=5,
proxies=self.proxy,
) )
except requests.exceptions.Timeout: if response.status_code not in range(200, 400):
return None return None, None
except Exception as e:
return None, None
html_string = response.content html_string = response.content
soup_job = BeautifulSoup(html_string, "html.parser") soup_job = BeautifulSoup(html_string, "html.parser")
@ -311,6 +322,36 @@ class ZipRecruiterScraper(Scraper):
return job_description_div.text.strip(), response.url return job_description_div.text.strip(), response.url
return None, response.url return None, response.url
@staticmethod
def add_params(scraper_input, page) -> Optional[str]:
params = {
"search": scraper_input.search_term,
"location": scraper_input.location,
"page": page,
"form": "jobs-landing",
}
job_type_value = None
if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime":
job_type_value = "full_time"
elif scraper_input.job_type.value == "parttime":
job_type_value = "part_time"
else:
job_type_value = scraper_input.job_type.value
if job_type_value:
params[
"refine_by_employment"
] = f"employment_type:employment_type:{job_type_value}"
if scraper_input.is_remote:
params["refine_by_location_type"] = "only_remote"
if scraper_input.distance:
params["radius"] = scraper_input.distance
return params
@staticmethod @staticmethod
def get_interval(interval_str: str): def get_interval(interval_str: str):
""" """