Compare commits

..

5 Commits

Author SHA1 Message Date
Cullen Watson
8e2ab277da fix(ziprecruiter): pagination (#97)
* fix(ziprecruiter): pagination

* chore: version
2024-02-02 20:48:28 -06:00
Cullen Watson
ce3bd84ee5 fix: indeed parse description bug (#96)
* fix(indeed): full descr

* chore: version
2024-02-02 18:21:55 -06:00
Cullen Watson
1ccf2290fe docs: readme 2024-02-02 17:59:24 -06:00
Cullen Watson
ec2eefc58a docs: readme 2024-02-02 17:58:15 -06:00
Cullen Watson
13c7694474 Easy apply (#95)
* enh(glassdoor): easy apply filter

* enh(ziprecruiter): easy apply

* enh(indeed): use mobile headers

* chore: version
2024-02-02 17:47:15 -06:00
4 changed files with 71 additions and 60 deletions

View File

@@ -69,7 +69,7 @@ Optional
├── is_remote (bool)
├── full_description (bool): fetches full description for Indeed / LinkedIn (much slower)
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn, Glassdoor
├── easy_apply (bool): filters for jobs that are hosted on the job board site
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
```
@@ -80,6 +80,7 @@ Optional
JobPost
├── title (str)
├── company (str)
├── company_url (str)
├── job_url (str)
├── location (object)
│ ├── country (str)
@@ -158,16 +159,11 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
**Q: Received a response code 429?**
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
- Waiting a few seconds between requests.
- Waiting some time between scrapes (site-dependent).
- Trying a VPN or proxy to change your IP address.
---
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
- Upgrade to a newer version of MacOS
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "python-jobspy"
version = "1.1.37"
version = "1.1.40"
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy"

View File

@@ -8,6 +8,7 @@ import re
import math
import io
import json
from typing import Any
from datetime import datetime
import urllib.parse
@@ -44,7 +45,7 @@ class IndeedScraper(Scraper):
site = Site(Site.INDEED)
super().__init__(site, proxy=proxy)
self.jobs_per_page = 15
self.jobs_per_page = 25
self.seen_urls = set()
def scrape_page(
@@ -60,30 +61,12 @@ class IndeedScraper(Scraper):
domain = self.country.indeed_domain_value
self.url = f"https://{domain}.indeed.com"
params = {
"q": scraper_input.search_term,
"l": scraper_input.location,
"filter": 0,
"start": scraper_input.offset + page * 10,
"sort": "date"
}
if scraper_input.distance:
params["radius"] = scraper_input.distance
sc_values = []
if scraper_input.is_remote:
sc_values.append("attr(DSQF7)")
if scraper_input.job_type:
sc_values.append("jt({})".format(scraper_input.job_type.value))
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
try:
session = create_session(self.proxy)
response = session.get(
f"{self.url}/jobs",
f"{self.url}/m/jobs",
headers=self.get_headers(),
params=params,
params=self.add_params(scraper_input, page),
allow_redirects=True,
timeout_seconds=10,
)
@@ -112,8 +95,8 @@ class IndeedScraper(Scraper):
):
raise IndeedException("No jobs found.")
def process_job(job) -> JobPost | None:
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
def process_job(job: dict) -> JobPost | None:
job_url = f'{self.url}/m/jobs/viewjob?jk={job["jobkey"]}'
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
if job_url in self.seen_urls:
return None
@@ -171,8 +154,9 @@ class IndeedScraper(Scraper):
)
return job_post
workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
with ThreadPoolExecutor(max_workers=1) as executor:
with ThreadPoolExecutor(max_workers=workers) as executor:
job_results: list[Future] = [
executor.submit(process_job, job) for job in jobs
]
@@ -194,7 +178,7 @@ class IndeedScraper(Scraper):
#: get first page to initialize session
job_list, total_results = self.scrape_page(scraper_input, 0)
with ThreadPoolExecutor(max_workers=1) as executor:
with ThreadPoolExecutor(max_workers=10) as executor:
futures: list[Future] = [
executor.submit(self.scrape_page, scraper_input, page)
for page in range(1, pages_to_process + 1)
@@ -223,7 +207,7 @@ class IndeedScraper(Scraper):
parsed_url = urllib.parse.urlparse(job_page_url)
params = urllib.parse.parse_qs(parsed_url.query)
jk_value = params.get("jk", [None])[0]
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
session = create_session(self.proxy)
try:
@@ -240,10 +224,18 @@ class IndeedScraper(Scraper):
return None
try:
data = json.loads(response.text)
job_description = data["body"]["jobInfoWrapperModel"]["jobInfoModel"][
"sanitizedJobDescription"
]
soup = BeautifulSoup(response.text, 'html.parser')
script_tags = soup.find_all('script')
job_description = ''
for tag in script_tags:
if 'window._initialData' in tag.text:
json_str = tag.text
json_str = json_str.split('window._initialData=')[1]
json_str = json_str.rsplit(';', 1)[0]
data = json.loads(json_str)
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
break
except (KeyError, TypeError, IndexError):
return None
@@ -331,17 +323,14 @@ class IndeedScraper(Scraper):
@staticmethod
def get_headers():
return {
"authority": "www.indeed.com",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"referer": "https://www.indeed.com/viewjob?jk=fe6182337d72c7b1&tk=1hcbfcmd0k62t802&from=serp&vjs=3&advn=8132938064490989&adid=408692607&ad=-6NYlbfkN0A3Osc99MJFDKjquSk4WOGT28ALb_ad4QMtrHreCb9ICg6MiSVy9oDAp3evvOrI7Q-O9qOtQTg1EPbthP9xWtBN2cOuVeHQijxHjHpJC65TjDtftH3AXeINjBvAyDrE8DrRaAXl8LD3Fs1e_xuDHQIssdZ2Mlzcav8m5jHrA0fA64ZaqJV77myldaNlM7-qyQpy4AsJQfvg9iR2MY7qeC5_FnjIgjKIy_lNi9OPMOjGRWXA94CuvC7zC6WeiJmBQCHISl8IOBxf7EdJZlYdtzgae3593TFxbkd6LUwbijAfjax39aAuuCXy3s9C4YgcEP3TwEFGQoTpYu9Pmle-Ae1tHGPgsjxwXkgMm7Cz5mBBdJioglRCj9pssn-1u1blHZM4uL1nK9p1Y6HoFgPUU9xvKQTHjKGdH8d4y4ETyCMoNF4hAIyUaysCKdJKitC8PXoYaWhDqFtSMR4Jys8UPqUV&xkcb=SoDD-_M3JLQfWnQTDh0LbzkdCdPP&xpse=SoBa6_I3JLW9FlWZlB0PbzkdCdPP&sjdu=i6xVERweJM_pVUvgf-MzuaunBTY7G71J5eEX6t4DrDs5EMPQdODrX7Nn-WIPMezoqr5wA_l7Of-3CtoiUawcHw",
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
'Host': 'www.indeed.com',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'sec-fetch-site': 'same-origin',
'sec-fetch-dest': 'document',
'accept-language': 'en-US,en;q=0.9',
'sec-fetch-mode': 'navigate',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
}
@staticmethod
@@ -354,3 +343,29 @@ class IndeedScraper(Scraper):
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0:
return True
return False
@staticmethod
def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
params = {
"q": scraper_input.search_term,
"l": scraper_input.location,
"filter": 0,
"start": scraper_input.offset + page * 10,
"sort": "date"
}
if scraper_input.distance:
params["radius"] = scraper_input.distance
sc_values = []
if scraper_input.is_remote:
sc_values.append("attr(DSQF7)")
if scraper_input.job_type:
sc_values.append("jt({})".format(scraper_input.job_type.value))
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
if scraper_input.easy_apply:
params['iafilter'] = 1
return params

View File

@@ -44,12 +44,12 @@ class ZipRecruiterScraper(Scraper):
"""
params = self.add_params(scraper_input)
if continue_token:
params["continue"] = continue_token
params["continue_from"] = continue_token
try:
response = self.session.get(
f"https://api.ziprecruiter.com/jobs-app/jobs",
headers=self.headers(),
params=self.add_params(scraper_input),
params=params
)
if response.status_code != 200:
raise ZipRecruiterException(
@@ -68,7 +68,7 @@ class ZipRecruiterScraper(Scraper):
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
job_results = [executor.submit(self.process_job, job) for job in jobs_list]
job_list = [result.result() for result in job_results if result.result()]
job_list = list(filter(None, (result.result() for result in job_results)))
return job_list, next_continue_token
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
@@ -95,16 +95,15 @@ class ZipRecruiterScraper(Scraper):
if not continue_token:
break
if len(job_list) > scraper_input.results_wanted:
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
return JobResponse(jobs=job_list)
@staticmethod
def process_job(job: dict) -> JobPost:
def process_job(self, job: dict) -> JobPost | None:
"""Processes an individual job dict from the response"""
title = job.get("name")
job_url = job.get("job_url")
job_url = f"https://www.ziprecruiter.com/jobs//j?lvk={job['listing_key']}"
if job_url in self.seen_urls:
return
self.seen_urls.add(job_url)
job_description_html = job.get("job_description", "").strip()
description_soup = BeautifulSoup(job_description_html, "html.parser")
@@ -173,7 +172,6 @@ class ZipRecruiterScraper(Scraper):
params = {
"search": scraper_input.search_term,
"location": scraper_input.location,
"form": "jobs-landing",
}
job_type_value = None
if scraper_input.job_type:
@@ -183,6 +181,8 @@ class ZipRecruiterScraper(Scraper):
job_type_value = "part_time"
else:
job_type_value = scraper_input.job_type.value
if scraper_input.easy_apply:
params['zipapply'] = 1
if job_type_value:
params[