mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-04 19:44:30 -08:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ccbea51f3c | ||
|
|
6ec7c24f7f | ||
|
|
02caf1b38d | ||
|
|
8e2ab277da | ||
|
|
ce3bd84ee5 | ||
|
|
1ccf2290fe | ||
|
|
ec2eefc58a |
@@ -70,6 +70,7 @@ Optional
|
||||
├── full_description (bool): fetches full description for Indeed / LinkedIn (much slower)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site
|
||||
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
```
|
||||
@@ -80,6 +81,7 @@ Optional
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company (str)
|
||||
├── company_url (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
@@ -158,16 +160,11 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
**Q: Received a response code 429?**
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||
|
||||
- Waiting a few seconds between requests.
|
||||
- Waiting some time between scrapes (site-dependent).
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
|
||||
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
|
||||
|
||||
- Upgrade to a newer version of MacOS
|
||||
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.38"
|
||||
version = "1.1.42"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pandas as pd
|
||||
from typing import Tuple
|
||||
import concurrent.futures
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Tuple, Optional
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
@@ -29,19 +29,20 @@ def _map_str_to_site(site_name: str) -> Site:
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||
search_term: str | None = None,
|
||||
location: str | None = None,
|
||||
distance: int | None = None,
|
||||
is_remote: bool = False,
|
||||
job_type: str = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
job_type: str | None = None,
|
||||
easy_apply: bool | None = None,
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxy: Optional[str] = None,
|
||||
full_description: Optional[bool] = False,
|
||||
offset: Optional[int] = 0,
|
||||
proxy: str | None = None,
|
||||
full_description: bool | None = False,
|
||||
linkedin_company_ids: list[int] | None = None,
|
||||
offset: int | None = 0,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
@@ -56,18 +57,23 @@ def scrape_jobs(
|
||||
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
if type(site_name) == str:
|
||||
site_type = [_map_str_to_site(site_name)]
|
||||
else: #: if type(site_name) == list
|
||||
site_type = [
|
||||
_map_str_to_site(site) if type(site) == str else site_name
|
||||
def get_site_type():
|
||||
site_types = list(Site)
|
||||
if isinstance(site_name, str):
|
||||
site_types = [_map_str_to_site(site_name)]
|
||||
elif isinstance(site_name, Site):
|
||||
site_types = [site_name]
|
||||
elif isinstance(site_name, list):
|
||||
site_types = [
|
||||
_map_str_to_site(site) if isinstance(site, str) else site
|
||||
for site in site_name
|
||||
]
|
||||
return site_types
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
site_type=site_type,
|
||||
site_type=get_site_type(),
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
location=location,
|
||||
@@ -77,6 +83,7 @@ def scrape_jobs(
|
||||
easy_apply=easy_apply,
|
||||
full_description=full_description,
|
||||
results_wanted=results_wanted,
|
||||
linkedin_company_ids=linkedin_company_ids,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
||||
from typing import List, Optional, Any
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
@@ -10,23 +9,24 @@ class Site(Enum):
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: List[Site]
|
||||
search_term: str
|
||||
site_type: list[Site]
|
||||
search_term: str | None = None
|
||||
|
||||
location: str = None
|
||||
country: Optional[Country] = Country.USA
|
||||
distance: Optional[int] = None
|
||||
location: str | None = None
|
||||
country: Country | None = Country.USA
|
||||
distance: int | None = None
|
||||
is_remote: bool = False
|
||||
job_type: Optional[JobType] = None
|
||||
easy_apply: bool = None # linkedin
|
||||
job_type: JobType | None = None
|
||||
easy_apply: bool | None = None
|
||||
full_description: bool = False
|
||||
offset: int = 0
|
||||
linkedin_company_ids: list[int] | None = None
|
||||
|
||||
results_wanted: int = 15
|
||||
|
||||
|
||||
class Scraper:
|
||||
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
|
||||
def __init__(self, site: Site, proxy: list[str] | None = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
|
||||
@@ -246,6 +246,8 @@ class GlassdoorScraper(Scraper):
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
elif location_type == 'N':
|
||||
location_type = "COUNTRY"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -154,8 +154,9 @@ class IndeedScraper(Scraper):
|
||||
)
|
||||
return job_post
|
||||
|
||||
workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback
|
||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
with ThreadPoolExecutor(max_workers=workers) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(process_job, job) for job in jobs
|
||||
]
|
||||
@@ -206,7 +207,7 @@ class IndeedScraper(Scraper):
|
||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
||||
params = urllib.parse.parse_qs(parsed_url.query)
|
||||
jk_value = params.get("jk", [None])[0]
|
||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
||||
formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
|
||||
session = create_session(self.proxy)
|
||||
|
||||
try:
|
||||
@@ -223,10 +224,18 @@ class IndeedScraper(Scraper):
|
||||
return None
|
||||
|
||||
try:
|
||||
data = json.loads(response.text)
|
||||
job_description = data["body"]["jobInfoWrapperModel"]["jobInfoModel"][
|
||||
"sanitizedJobDescription"
|
||||
]
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
script_tags = soup.find_all('script')
|
||||
|
||||
job_description = ''
|
||||
for tag in script_tags:
|
||||
if 'window._initialData' in tag.text:
|
||||
json_str = tag.text
|
||||
json_str = json_str.split('window._initialData=')[1]
|
||||
json_str = json_str.rsplit(';', 1)[0]
|
||||
data = json.loads(json_str)
|
||||
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
|
||||
break
|
||||
except (KeyError, TypeError, IndexError):
|
||||
return None
|
||||
|
||||
@@ -339,7 +348,7 @@ class IndeedScraper(Scraper):
|
||||
def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"l": scraper_input.location if scraper_input.location else scraper_input.country.value[0].split(',')[-1],
|
||||
"filter": 0,
|
||||
"start": scraper_input.offset + page * 10,
|
||||
"sort": "date"
|
||||
|
||||
@@ -70,7 +70,9 @@ class LinkedInScraper(Scraper):
|
||||
|
||||
return mapping.get(job_type_enum, "")
|
||||
|
||||
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||
continue_search = lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||
|
||||
while continue_search():
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
@@ -83,6 +85,7 @@ class LinkedInScraper(Scraper):
|
||||
"pageNum": 0,
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
@@ -130,8 +133,9 @@ class LinkedInScraper(Scraper):
|
||||
except Exception as e:
|
||||
raise LinkedInException("Exception occurred while processing jobs")
|
||||
|
||||
page += 25
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(LinkedInScraper.DELAY, LinkedInScraper.DELAY + 2))
|
||||
page += 25
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@@ -6,8 +6,7 @@ This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
import math
|
||||
import time
|
||||
import re
|
||||
from datetime import datetime, date
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -32,6 +31,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
self.delay = 5
|
||||
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
@@ -44,12 +44,12 @@ class ZipRecruiterScraper(Scraper):
|
||||
"""
|
||||
params = self.add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue"] = continue_token
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=self.add_params(scraper_input),
|
||||
params=params
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
@@ -60,7 +60,6 @@ class ZipRecruiterScraper(Scraper):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
|
||||
time.sleep(5)
|
||||
response_data = response.json()
|
||||
jobs_list = response_data.get("jobs", [])
|
||||
next_continue_token = response_data.get("continue", None)
|
||||
@@ -68,7 +67,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self.process_job, job) for job in jobs_list]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||
return job_list, next_continue_token
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
@@ -86,6 +85,9 @@ class ZipRecruiterScraper(Scraper):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
|
||||
jobs_on_page, continue_token = self.find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
@@ -95,22 +97,21 @@ class ZipRecruiterScraper(Scraper):
|
||||
if not continue_token:
|
||||
break
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@staticmethod
|
||||
def process_job(job: dict) -> JobPost:
|
||||
def process_job(self, job: dict) -> JobPost | None:
|
||||
"""Processes an individual job dict from the response"""
|
||||
title = job.get("name")
|
||||
job_url = job.get("job_url")
|
||||
job_url = f"https://www.ziprecruiter.com/jobs//j?lvk={job['listing_key']}"
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
job_description_html = job.get("job_description", "").strip()
|
||||
description_soup = BeautifulSoup(job_description_html, "html.parser")
|
||||
description = modify_and_get_description(description_soup)
|
||||
|
||||
company = job["hiring_company"].get("name") if "hiring_company" in job else None
|
||||
company = job.get("hiring_company", {}).get("name")
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
@@ -120,17 +121,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
|
||||
save_job_url = job.get("SaveJobURL", "")
|
||||
posted_time_match = re.search(
|
||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||
)
|
||||
if posted_time_match:
|
||||
date_time_str = posted_time_match.group(1)
|
||||
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
|
||||
date_posted = date_posted_obj.date()
|
||||
else:
|
||||
date_posted = date.today()
|
||||
date_posted = datetime.fromisoformat(job['posted_time'].rstrip("Z")).date()
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
@@ -173,7 +164,6 @@ class ZipRecruiterScraper(Scraper):
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
@@ -197,6 +187,8 @@ class ZipRecruiterScraper(Scraper):
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
|
||||
Reference in New Issue
Block a user