mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ffdb1756f | ||
|
|
1185693422 | ||
|
|
dcd7144318 | ||
|
|
bf73c061bd | ||
|
|
8dd08ed9fd | ||
|
|
5d3df732e6 | ||
|
|
86f858e06d | ||
|
|
1089d1f0a5 | ||
|
|
3e93454738 | ||
|
|
0d150d519f | ||
|
|
cc3497f929 | ||
|
|
5986f75346 | ||
|
|
4b7bdb9313 | ||
|
|
80213f28d2 | ||
|
|
ada38532c3 | ||
|
|
3b0017964c | ||
|
|
94d8f555fd | ||
|
|
e8b4b376b8 | ||
|
|
54ac1bad16 | ||
|
|
0a669e9ba8 |
7
.pre-commit-config.yaml
Normal file
7
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python
|
||||
args: [--line-length=88, --quiet]
|
||||
59
README.md
59
README.md
@@ -21,7 +21,7 @@ Updated for release v1.1.3
|
||||
### Installation
|
||||
|
||||
```
|
||||
pip install python-jobspy
|
||||
pip install -U python-jobspy
|
||||
```
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
@@ -37,8 +37,9 @@ jobs = scrape_jobs(
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=20,
|
||||
hours_old=72, # (only linkedin is hour specific, others round up to days old)
|
||||
country_indeed='USA' # only needed for indeed / glassdoor
|
||||
hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
|
||||
country_indeed='USA', # only needed for indeed / glassdoor
|
||||
# linkedin_fetch_description=True # get full description and direct job url for linkedin (slower)
|
||||
)
|
||||
print(f"Found {len(jobs)} jobs")
|
||||
print(jobs.head())
|
||||
@@ -48,7 +49,7 @@ jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=Fal
|
||||
### Output
|
||||
|
||||
```
|
||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||
@@ -60,23 +61,24 @@ zip_recruiter Software Developer TEKsystems Phoenix
|
||||
### Parameters for `scrape_jobs()`
|
||||
|
||||
```plaintext
|
||||
Required
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
||||
└── search_term (str)
|
||||
Optional
|
||||
├── location (int)
|
||||
├── distance (int): in miles
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── site_name (list|str): linkedin, zip_recruiter, indeed, glassdoor (default is all four)
|
||||
├── search_term (str)
|
||||
├── location (str)
|
||||
├── distance (int): in miles, default 50
|
||||
├── job_type (str): fulltime, parttime, internship, contract
|
||||
├── proxy (str): in format 'http://user:pass@host:port'
|
||||
├── is_remote (bool)
|
||||
├── linkedin_fetch_description (bool): fetches full description for LinkedIn (slower)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site
|
||||
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids
|
||||
├── description_format (enum): markdown, html (format type of the job descriptions)
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
├── hours_old (int): filters jobs by the number of hours since the job was posted (all but LinkedIn rounds up to next day)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_name'
|
||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site (LinkedIn & Indeed do not allow pairing this with hours_old)
|
||||
├── linkedin_fetch_description (bool): fetches full description and direct job url for LinkedIn (slower)
|
||||
├── linkedin_company_ids (list[int]): searches for linkedin jobs with specific company ids
|
||||
├── description_format (str): markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||
├── country_indeed (str): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (int): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
├── hours_old (int): filters jobs by the number of hours since the job was posted (ZipRecruiter and Glassdoor round up to next day. If you use this on Indeed, it will not filter by job_type/is_remote/easy_apply)
|
||||
├── verbose (int) {0, 1, 2}: Controls the verbosity of the runtime printouts (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||
├── hyperlinks (bool): Whether to turn `job_url`s into hyperlinks. Default is false.
|
||||
```
|
||||
|
||||
### JobPost Schema
|
||||
@@ -100,15 +102,26 @@ JobPost
|
||||
│ └── currency (enum)
|
||||
└── date_posted (date)
|
||||
└── emails (str)
|
||||
└── num_urgent_words (int)
|
||||
└── is_remote (bool)
|
||||
|
||||
Indeed specific
|
||||
├── company_country (str)
|
||||
└── company_addresses (str)
|
||||
└── company_industry (str)
|
||||
└── company_employees_label (str)
|
||||
└── company_revenue_label (str)
|
||||
└── company_description (str)
|
||||
└── ceo_name (str)
|
||||
└── ceo_photo_url (str)
|
||||
└── logo_photo_url (str)
|
||||
└── banner_photo_url (str)
|
||||
```
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
### **LinkedIn**
|
||||
|
||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using
|
||||
LinkedIn searches globally & uses only the `location` parameter.
|
||||
|
||||
### **ZipRecruiter**
|
||||
|
||||
@@ -141,7 +154,11 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||
| Venezuela | Vietnam* | | |
|
||||
|
||||
|
||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||
## Notes
|
||||
* Indeed is the best scraper currently with no rate limiting.
|
||||
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||
* LinkedIn is the most restrictive and usually rate limits around the 10th page.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
|
||||
@@ -32,17 +32,18 @@ while len(all_jobs) < results_wanted:
|
||||
search_term="software engineer",
|
||||
# New York, NY
|
||||
# Dallas, TX
|
||||
|
||||
# Los Angeles, CA
|
||||
location="Los Angeles, CA",
|
||||
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
|
||||
results_wanted=min(
|
||||
results_in_each_iteration, results_wanted - len(all_jobs)
|
||||
),
|
||||
country_indeed="USA",
|
||||
offset=offset,
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# Add the scraped jobs to the list
|
||||
all_jobs.extend(jobs.to_dict('records'))
|
||||
all_jobs.extend(jobs.to_dict("records"))
|
||||
|
||||
# Increment the offset for the next page of results
|
||||
offset += results_in_each_iteration
|
||||
|
||||
2184
poetry.lock
generated
2184
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.47"
|
||||
version = "1.1.52"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
@@ -19,12 +19,18 @@ NUMPY = "1.24.2"
|
||||
pydantic = "^2.3.0"
|
||||
tls-client = "^1.0.1"
|
||||
markdownify = "^0.11.6"
|
||||
regex = "^2024.4.28"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^7.4.1"
|
||||
jupyter = "^1.0.0"
|
||||
black = "*"
|
||||
pre-commit = "*"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pandas as pd
|
||||
from typing import Tuple
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.utils import logger, set_logger_level
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.glassdoor import GlassdoorScraper
|
||||
@@ -20,7 +23,7 @@ def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||
search_term: str | None = None,
|
||||
location: str | None = None,
|
||||
distance: int | None = None,
|
||||
distance: int | None = 50,
|
||||
is_remote: bool = False,
|
||||
job_type: str | None = None,
|
||||
easy_apply: bool | None = None,
|
||||
@@ -33,11 +36,12 @@ def scrape_jobs(
|
||||
linkedin_company_ids: list[int] | None = None,
|
||||
offset: int | None = 0,
|
||||
hours_old: int = None,
|
||||
verbose: int = 2,
|
||||
**kwargs,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
:return: results_wanted: pandas dataframe containing job data
|
||||
:return: pandas dataframe containing job data
|
||||
"""
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedInScraper,
|
||||
@@ -45,6 +49,7 @@ def scrape_jobs(
|
||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||
Site.GLASSDOOR: GlassdoorScraper,
|
||||
}
|
||||
set_logger_level(verbose)
|
||||
|
||||
def map_str_to_site(site_name: str) -> Site:
|
||||
return Site[site_name.upper()]
|
||||
@@ -69,6 +74,7 @@ def scrape_jobs(
|
||||
for site in site_name
|
||||
]
|
||||
return site_types
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
@@ -85,13 +91,16 @@ def scrape_jobs(
|
||||
results_wanted=results_wanted,
|
||||
linkedin_company_ids=linkedin_company_ids,
|
||||
offset=offset,
|
||||
hours_old=hours_old
|
||||
hours_old=hours_old,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxy=proxy)
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
cap_name = site.value.capitalize()
|
||||
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||
logger.info(f"{site_name} finished scraping")
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
@@ -114,9 +123,8 @@ def scrape_jobs(
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
job_data = job.dict()
|
||||
job_data[
|
||||
"job_url_hyper"
|
||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||
job_url = job_data["job_url"]
|
||||
job_data["job_url_hyper"] = f'<a href="{job_url}">{job_url}</a>'
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
@@ -153,18 +161,18 @@ def scrape_jobs(
|
||||
|
||||
if jobs_dfs:
|
||||
# Step 1: Filter out all-NA columns from each DataFrame before concatenation
|
||||
filtered_dfs = [df.dropna(axis=1, how='all') for df in jobs_dfs]
|
||||
filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
|
||||
|
||||
# Step 2: Concatenate the filtered DataFrames
|
||||
jobs_df = pd.concat(filtered_dfs, ignore_index=True)
|
||||
|
||||
# Desired column order
|
||||
desired_order = [
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"site",
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"job_url_direct",
|
||||
"title",
|
||||
"company",
|
||||
"company_url",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
@@ -173,10 +181,19 @@ def scrape_jobs(
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"num_urgent_words",
|
||||
"benefits",
|
||||
"emails",
|
||||
"description",
|
||||
"company_url",
|
||||
"company_url_direct",
|
||||
"company_addresses",
|
||||
"company_industry",
|
||||
"company_num_employees",
|
||||
"company_revenue",
|
||||
"company_description",
|
||||
"logo_photo_url",
|
||||
"banner_photo_url",
|
||||
"ceo_name",
|
||||
"ceo_photo_url",
|
||||
]
|
||||
|
||||
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||
@@ -188,6 +205,6 @@ def scrape_jobs(
|
||||
jobs_df = jobs_df[desired_order]
|
||||
|
||||
# Step 4: Sort the DataFrame as required
|
||||
return jobs_df.sort_values(by=['site', 'date_posted'], ascending=[True, False])
|
||||
return jobs_df.sort_values(by=["site", "date_posted"], ascending=[True, False])
|
||||
else:
|
||||
return pd.DataFrame()
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from enum import Enum
|
||||
@@ -57,7 +59,7 @@ class JobType(Enum):
|
||||
class Country(Enum):
|
||||
"""
|
||||
Gets the subdomain for Indeed and Glassdoor.
|
||||
The second item in the tuple is the subdomain for Indeed
|
||||
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||
"""
|
||||
|
||||
@@ -118,8 +120,8 @@ class Country(Enum):
|
||||
TURKEY = ("turkey", "tr")
|
||||
UKRAINE = ("ukraine", "ua")
|
||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||
UK = ("uk,united kingdom", "uk", "co.uk")
|
||||
USA = ("usa,us,united states", "www", "com")
|
||||
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||
USA = ("usa,us,united states", "www:us", "com")
|
||||
URUGUAY = ("uruguay", "uy")
|
||||
VENEZUELA = ("venezuela", "ve")
|
||||
VIETNAM = ("vietnam", "vn", "com")
|
||||
@@ -132,7 +134,10 @@ class Country(Enum):
|
||||
|
||||
@property
|
||||
def indeed_domain_value(self):
|
||||
return self.value[1]
|
||||
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||
if subdomain and api_country_code:
|
||||
return subdomain, api_country_code.upper()
|
||||
return self.value[1], self.value[1].upper()
|
||||
|
||||
@property
|
||||
def glassdoor_domain_value(self):
|
||||
@@ -153,7 +158,7 @@ class Country(Enum):
|
||||
"""Convert a string to the corresponding Country enum."""
|
||||
country_str = country_str.strip().lower()
|
||||
for country in cls:
|
||||
country_names = country.value[0].split(',')
|
||||
country_names = country.value[0].split(",")
|
||||
if country_str in country_names:
|
||||
return country
|
||||
valid_countries = [country.value for country in cls]
|
||||
@@ -163,7 +168,7 @@ class Country(Enum):
|
||||
|
||||
|
||||
class Location(BaseModel):
|
||||
country: Country | None = None
|
||||
country: Country | str | None = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
|
||||
@@ -173,7 +178,12 @@ class Location(BaseModel):
|
||||
location_parts.append(self.city)
|
||||
if self.state:
|
||||
location_parts.append(self.state)
|
||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
||||
if isinstance(self.country, str):
|
||||
location_parts.append(self.country)
|
||||
elif self.country and self.country not in (
|
||||
Country.US_CANADA,
|
||||
Country.WORLDWIDE,
|
||||
):
|
||||
country_name = self.country.value[0]
|
||||
if "," in country_name:
|
||||
country_name = country_name.split(",")[0]
|
||||
@@ -217,21 +227,31 @@ class DescriptionFormat(Enum):
|
||||
|
||||
class JobPost(BaseModel):
|
||||
title: str
|
||||
company_name: str
|
||||
company_name: str | None
|
||||
job_url: str
|
||||
job_url_direct: str | None = None
|
||||
location: Optional[Location]
|
||||
|
||||
description: str | None = None
|
||||
company_url: str | None = None
|
||||
company_url_direct: str | None = None
|
||||
|
||||
job_type: list[JobType] | None = None
|
||||
compensation: Compensation | None = None
|
||||
date_posted: date | None = None
|
||||
benefits: str | None = None
|
||||
emails: list[str] | None = None
|
||||
num_urgent_words: int | None = None
|
||||
is_remote: bool | None = None
|
||||
# company_industry: str | None = None
|
||||
|
||||
# indeed specific
|
||||
company_addresses: str | None = None
|
||||
company_industry: str | None = None
|
||||
company_num_employees: str | None = None
|
||||
company_revenue: str | None = None
|
||||
company_description: str | None = None
|
||||
ceo_name: str | None = None
|
||||
ceo_photo_url: str | None = None
|
||||
logo_photo_url: str | None = None
|
||||
banner_photo_url: str | None = None
|
||||
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from ..jobs import (
|
||||
Enum,
|
||||
BaseModel,
|
||||
JobType,
|
||||
JobResponse,
|
||||
Country,
|
||||
DescriptionFormat
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
|
||||
@@ -34,9 +38,10 @@ class ScraperInput(BaseModel):
|
||||
hours_old: int | None = None
|
||||
|
||||
|
||||
class Scraper:
|
||||
class Scraper(ABC):
|
||||
def __init__(self, site: Site, proxy: list[str] | None = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
@abstractmethod
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||
|
||||
@@ -4,21 +4,23 @@ jobspy.scrapers.glassdoor
|
||||
|
||||
This module contains routines to scrape Glassdoor.
|
||||
"""
|
||||
import json
|
||||
import re
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import json
|
||||
import requests
|
||||
from typing import Optional
|
||||
from typing import Optional, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from ..utils import count_urgent_words, extract_emails_from_text
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import extract_emails_from_text
|
||||
from ..exceptions import GlassdoorException
|
||||
from ..utils import (
|
||||
create_session,
|
||||
markdown_converter,
|
||||
logger
|
||||
logger,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -27,7 +29,7 @@ from ...jobs import (
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
DescriptionFormat
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
|
||||
@@ -59,25 +61,22 @@ class GlassdoorScraper(Scraper):
|
||||
|
||||
self.session = create_session(self.proxy, is_tls=True, has_retry=True)
|
||||
token = self._get_csrf_token()
|
||||
self.headers['gd-csrf-token'] = token if token else self.fallback_token
|
||||
self.headers["gd-csrf-token"] = token if token else self.fallback_token
|
||||
|
||||
location_id, location_type = self._get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
if location_type is None:
|
||||
logger.error('Glassdoor: location not parsed')
|
||||
logger.error("Glassdoor: location not parsed")
|
||||
return JobResponse(jobs=[])
|
||||
all_jobs: list[JobPost] = []
|
||||
cursor = None
|
||||
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
self.max_pages + 1,
|
||||
),
|
||||
):
|
||||
logger.info(f'Glassdoor search page: {page}')
|
||||
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||
range_end = min(tot_pages, self.max_pages + 1)
|
||||
for page in range(range_start, range_end):
|
||||
logger.info(f"Glassdoor search page: {page}")
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
@@ -87,7 +86,7 @@ class GlassdoorScraper(Scraper):
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f'Glassdoor: {str(e)}')
|
||||
logger.error(f"Glassdoor: {str(e)}")
|
||||
break
|
||||
return JobResponse(jobs=all_jobs)
|
||||
|
||||
@@ -98,39 +97,48 @@ class GlassdoorScraper(Scraper):
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> (list[JobPost], str | None):
|
||||
) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
jobs = []
|
||||
self.scraper_input = scraper_input
|
||||
try:
|
||||
payload = self._add_payload(
|
||||
location_id, location_type, page_num, cursor
|
||||
)
|
||||
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||
response = self.session.post(
|
||||
f"{self.base_url}/graph", headers=self.headers, timeout_seconds=15, data=payload
|
||||
f"{self.base_url}/graph",
|
||||
headers=self.headers,
|
||||
timeout_seconds=15,
|
||||
data=payload,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(f"bad response status code: {response.status_code}")
|
||||
exc_msg = f"bad response status code: {response.status_code}"
|
||||
raise GlassdoorException(exc_msg)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except (requests.exceptions.ReadTimeout, GlassdoorException, ValueError, Exception) as e:
|
||||
logger.error(f'Glassdoor: {str(e)}')
|
||||
except (
|
||||
requests.exceptions.ReadTimeout,
|
||||
GlassdoorException,
|
||||
ValueError,
|
||||
Exception,
|
||||
) as e:
|
||||
logger.error(f"Glassdoor: {str(e)}")
|
||||
return jobs, None
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {executor.submit(self._process_job, job): job for job in jobs_data}
|
||||
future_to_job_data = {
|
||||
executor.submit(self._process_job, job): job for job in jobs_data
|
||||
}
|
||||
for future in as_completed(future_to_job_data):
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
except Exception as exc:
|
||||
raise GlassdoorException(f'Glassdoor generated an exception: {exc}')
|
||||
raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
|
||||
|
||||
return jobs, self.get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
@@ -140,7 +148,9 @@ class GlassdoorScraper(Scraper):
|
||||
"""
|
||||
Fetches csrf token needed for API by visiting a generic page
|
||||
"""
|
||||
res = self.session.get(f'{self.base_url}/Job/computer-science-jobs.htm', headers=self.headers)
|
||||
res = self.session.get(
|
||||
f"{self.base_url}/Job/computer-science-jobs.htm", headers=self.headers
|
||||
)
|
||||
pattern = r'"token":\s*"([^"]+)"'
|
||||
matches = re.findall(pattern, res.text)
|
||||
token = None
|
||||
@@ -153,19 +163,20 @@ class GlassdoorScraper(Scraper):
|
||||
Processes a single job and fetches its description.
|
||||
"""
|
||||
job_id = job_data["jobview"]["job"]["listingId"]
|
||||
job_url = f'{self.base_url}job-listing/j?jl={job_id}'
|
||||
job_url = f"{self.base_url}job-listing/j?jl={job_id}"
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
self.seen_urls.add(job_url)
|
||||
job = job_data["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
company_id = job_data['jobview']['header']['employer']['id']
|
||||
company_id = job_data["jobview"]["header"]["employer"]["id"]
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days is not None else None
|
||||
date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
|
||||
date_posted = date_diff if age_in_days is not None else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
@@ -177,9 +188,10 @@ class GlassdoorScraper(Scraper):
|
||||
description = self._fetch_job_description(job_id)
|
||||
except:
|
||||
description = None
|
||||
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_url=f"{self.base_url}Overview/W-EI_IE{company_id}.htm" if company_id else None,
|
||||
company_url=company_url if company_id else None,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
@@ -188,7 +200,6 @@ class GlassdoorScraper(Scraper):
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def _fetch_job_description(self, job_id):
|
||||
@@ -202,7 +213,7 @@ class GlassdoorScraper(Scraper):
|
||||
"variables": {
|
||||
"jl": job_id,
|
||||
"queryString": "q",
|
||||
"pageTypeEnum": "SERP"
|
||||
"pageTypeEnum": "SERP",
|
||||
},
|
||||
"query": """
|
||||
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||
@@ -217,15 +228,17 @@ class GlassdoorScraper(Scraper):
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
""",
|
||||
}
|
||||
]
|
||||
res = requests.post(url, json=body, headers=self.headers)
|
||||
if res.status_code != 200:
|
||||
return None
|
||||
data = res.json()[0]
|
||||
desc = data['data']['jobview']['job']['description']
|
||||
return markdown_converter(desc) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else desc
|
||||
desc = data["data"]["jobview"]["job"]["description"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
desc = markdown_converter(desc)
|
||||
return desc
|
||||
|
||||
def _get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
@@ -235,10 +248,13 @@ class GlassdoorScraper(Scraper):
|
||||
res = self.session.get(url, headers=self.headers)
|
||||
if res.status_code != 200:
|
||||
if res.status_code == 429:
|
||||
logger.error(f'429 Response - Blocked by Glassdoor for too many requests')
|
||||
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||
logger.error(err)
|
||||
return None, None
|
||||
else:
|
||||
logger.error(f'Glassdoor response status code {res.status_code}')
|
||||
err = f"Glassdoor response status code {res.status_code}"
|
||||
err += f" - {res.text}"
|
||||
logger.error(f"Glassdoor response status code {res.status_code}")
|
||||
return None, None
|
||||
items = res.json()
|
||||
|
||||
@@ -249,7 +265,7 @@ class GlassdoorScraper(Scraper):
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
elif location_type == 'N':
|
||||
elif location_type == "N":
|
||||
location_type = "COUNTRY"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
@@ -260,7 +276,9 @@ class GlassdoorScraper(Scraper):
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
fromage = max(self.scraper_input.hours_old // 24, 1) if self.scraper_input.hours_old else None
|
||||
fromage = None
|
||||
if self.scraper_input.hours_old:
|
||||
fromage = max(self.scraper_input.hours_old // 24, 1)
|
||||
filter_params = []
|
||||
if self.scraper_input.easy_apply:
|
||||
filter_params.append({"filterKey": "applicationType", "values": "1"})
|
||||
@@ -279,9 +297,9 @@ class GlassdoorScraper(Scraper):
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
"fromage": fromage,
|
||||
"sort": "date"
|
||||
"sort": "date",
|
||||
},
|
||||
"query": self.query_template
|
||||
"query": self.query_template,
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
|
||||
@@ -4,24 +4,22 @@ jobspy.scrapers.indeed
|
||||
|
||||
This module contains routines to scrape Indeed.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
import json
|
||||
import requests
|
||||
from typing import Any
|
||||
from datetime import datetime
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
from datetime import datetime
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
import requests
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
get_enum_from_job_type,
|
||||
markdown_converter,
|
||||
logger
|
||||
logger,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -30,20 +28,21 @@ from ...jobs import (
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
DescriptionFormat
|
||||
DescriptionFormat,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self, proxy: str | None = None):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed job search url
|
||||
Initializes IndeedScraper with the Indeed API url
|
||||
"""
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 25
|
||||
self.jobs_per_page = 100
|
||||
self.num_workers = 10
|
||||
self.seen_urls = set()
|
||||
self.headers = None
|
||||
self.api_country_code = None
|
||||
self.base_url = None
|
||||
self.api_url = "https://apis.indeed.com/graphql"
|
||||
site = Site(Site.INDEED)
|
||||
@@ -56,284 +55,282 @@ class IndeedScraper(Scraper):
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list = self._scrape_page()
|
||||
pages_processed = 1
|
||||
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
self.headers = self.api_headers.copy()
|
||||
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||
job_list = []
|
||||
page = 1
|
||||
|
||||
cursor = None
|
||||
offset_pages = math.ceil(self.scraper_input.offset / 100)
|
||||
for _ in range(offset_pages):
|
||||
logger.info(f"Indeed skipping search page: {page}")
|
||||
__, cursor = self._scrape_page(cursor)
|
||||
if not __:
|
||||
logger.info(f"Indeed found no jobs on page: {page}")
|
||||
break
|
||||
|
||||
while len(self.seen_urls) < scraper_input.results_wanted:
|
||||
pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page)
|
||||
new_jobs = False
|
||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self._scrape_page, page + pages_processed)
|
||||
for page in range(pages_to_process)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs = future.result()
|
||||
if jobs:
|
||||
logger.info(f"Indeed search page: {page}")
|
||||
jobs, cursor = self._scrape_page(cursor)
|
||||
if not jobs:
|
||||
logger.info(f"Indeed found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
new_jobs = True
|
||||
if len(self.seen_urls) >= scraper_input.results_wanted:
|
||||
break
|
||||
page += 1
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
|
||||
pages_processed += pages_to_process
|
||||
if not new_jobs:
|
||||
break
|
||||
|
||||
if len(self.seen_urls) > scraper_input.results_wanted:
|
||||
job_list = job_list[:scraper_input.results_wanted]
|
||||
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _scrape_page(self, page: int=0) -> list[JobPost]:
|
||||
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param page:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
:param cursor:
|
||||
:return: jobs found on page, next page cursor
|
||||
"""
|
||||
logger.info(f'Indeed search page: {page + 1}')
|
||||
job_list = []
|
||||
domain = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
|
||||
try:
|
||||
session = create_session(self.proxy)
|
||||
response = session.get(
|
||||
f"{self.base_url}/m/jobs",
|
||||
headers=self.headers,
|
||||
params=self._add_params(page),
|
||||
jobs = []
|
||||
new_cursor = None
|
||||
filters = self._build_filters()
|
||||
search_term = self.scraper_input.search_term.replace('"', '\\"') if self.scraper_input.search_term else ""
|
||||
query = self.job_search_query.format(
|
||||
what=(
|
||||
f'what: "{search_term}"'
|
||||
if search_term
|
||||
else ""
|
||||
),
|
||||
location=(
|
||||
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||
if self.scraper_input.location
|
||||
else ""
|
||||
),
|
||||
dateOnIndeed=self.scraper_input.hours_old,
|
||||
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||
filters=filters,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
if response.status_code == 429:
|
||||
logger.error(f'429 Response - Blocked by Indeed for too many requests')
|
||||
else:
|
||||
logger.error(f'Indeed response status code {response.status_code}')
|
||||
return job_list
|
||||
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f'Indeed: Bad proxy')
|
||||
else:
|
||||
logger.error(f'Indeed: {str(e)}')
|
||||
return job_list
|
||||
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
if "did not match any jobs" in response.text:
|
||||
return job_list
|
||||
|
||||
jobs = IndeedScraper._parse_jobs(soup)
|
||||
if not jobs:
|
||||
return []
|
||||
if (
|
||||
not jobs.get("metaData", {})
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
):
|
||||
logger.error("Indeed - No jobs found.")
|
||||
return []
|
||||
|
||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
job_keys = [job['jobkey'] for job in jobs]
|
||||
jobs_detailed = self._get_job_details(job_keys)
|
||||
payload = {
|
||||
"query": query,
|
||||
}
|
||||
api_headers = self.api_headers.copy()
|
||||
api_headers["indeed-co"] = self.api_country_code
|
||||
response = requests.post(
|
||||
self.api_url,
|
||||
headers=api_headers,
|
||||
json=payload,
|
||||
proxies=self.proxy,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
logger.info(
|
||||
f"Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||
)
|
||||
return jobs, new_cursor
|
||||
data = response.json()
|
||||
jobs = data["data"]["jobSearch"]["results"]
|
||||
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(self._process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed)
|
||||
executor.submit(self._process_job, job["job"]) for job in jobs
|
||||
]
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
return job_list, new_cursor
|
||||
|
||||
return job_list
|
||||
def _build_filters(self):
|
||||
"""
|
||||
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||
"""
|
||||
filters_str = ""
|
||||
if self.scraper_input.hours_old:
|
||||
filters_str = """
|
||||
filters: {{
|
||||
date: {{
|
||||
field: "dateOnIndeed",
|
||||
start: "{start}h"
|
||||
}}
|
||||
}}
|
||||
""".format(
|
||||
start=self.scraper_input.hours_old
|
||||
)
|
||||
elif self.scraper_input.easy_apply:
|
||||
filters_str = """
|
||||
filters: {
|
||||
keyword: {
|
||||
field: "indeedApplyScope",
|
||||
keys: ["DESKTOP"]
|
||||
}
|
||||
}
|
||||
"""
|
||||
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||
job_type_key_mapping = {
|
||||
JobType.FULL_TIME: "CF3CP",
|
||||
JobType.PART_TIME: "75GKK",
|
||||
JobType.CONTRACT: "NJXCK",
|
||||
JobType.INTERNSHIP: "VDTG7",
|
||||
}
|
||||
|
||||
def _process_job(self, job: dict, job_detailed: dict) -> JobPost | None:
|
||||
job_url = f'{self.base_url}/m/jobs/viewjob?jk={job["jobkey"]}'
|
||||
job_url_client = f'{self.base_url}/viewjob?jk={job["jobkey"]}'
|
||||
keys = []
|
||||
if self.scraper_input.job_type:
|
||||
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||
keys.append(key)
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
keys.append("DSQF7")
|
||||
|
||||
if keys:
|
||||
keys_str = '", "'.join(keys) # Prepare your keys string
|
||||
filters_str = f"""
|
||||
filters: {{
|
||||
composite: {{
|
||||
filters: [{{
|
||||
keyword: {{
|
||||
field: "attributes",
|
||||
keys: ["{keys_str}"]
|
||||
}}
|
||||
}}]
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
return filters_str
|
||||
|
||||
def _process_job(self, job: dict) -> JobPost | None:
|
||||
"""
|
||||
Parses the job dict into JobPost model
|
||||
:param job: dict to parse
|
||||
:return: JobPost if it's a new job
|
||||
"""
|
||||
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
description = job_detailed['description']['html']
|
||||
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description
|
||||
job_type = self._get_job_type(job)
|
||||
timestamp_seconds = job["pubDate"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||
description = job["description"]["html"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
|
||||
job_type = self._get_job_type(job["attributes"])
|
||||
timestamp_seconds = job["datePublished"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||
employer = job["employer"].get("dossier") if job["employer"] else None
|
||||
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||
return JobPost(
|
||||
title=job["normTitle"],
|
||||
title=job["title"],
|
||||
description=description,
|
||||
company_name=job["company"],
|
||||
company_url=f"{self.base_url}{job_detailed['employer']['relativeCompanyPageUrl']}" if job_detailed[
|
||||
'employer'] else None,
|
||||
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||
company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
|
||||
company_url_direct=(
|
||||
employer["links"]["corporateWebsite"] if employer else None
|
||||
),
|
||||
location=Location(
|
||||
city=job.get("jobLocationCity"),
|
||||
state=job.get("jobLocationState"),
|
||||
country=self.scraper_input.country,
|
||||
city=job.get("location", {}).get("city"),
|
||||
state=job.get("location", {}).get("admin1Code"),
|
||||
country=job.get("location", {}).get("countryCode"),
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=self._get_compensation(job, job_detailed),
|
||||
compensation=self._get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url_client,
|
||||
job_url=job_url,
|
||||
job_url_direct=(
|
||||
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
|
||||
),
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
is_remote=self._is_job_remote(job, job_detailed, description)
|
||||
is_remote=self._is_job_remote(job, description),
|
||||
company_addresses=(
|
||||
employer_details["addresses"][0]
|
||||
if employer_details.get("addresses")
|
||||
else None
|
||||
),
|
||||
company_industry=(
|
||||
employer_details["industry"]
|
||||
.replace("Iv1", "")
|
||||
.replace("_", " ")
|
||||
.title()
|
||||
if employer_details.get("industry")
|
||||
else None
|
||||
),
|
||||
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||
company_description=employer_details.get("briefDescription"),
|
||||
ceo_name=employer_details.get("ceoName"),
|
||||
ceo_photo_url=employer_details.get("ceoPhotoUrl"),
|
||||
logo_photo_url=(
|
||||
employer["images"].get("squareLogoUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
banner_photo_url=(
|
||||
employer["images"].get("headerImageUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
def _get_job_details(self, job_keys: list[str]) -> dict:
|
||||
"""
|
||||
Queries the GraphQL endpoint for detailed job information for the given job keys.
|
||||
"""
|
||||
job_keys_gql = '[' + ', '.join(f'"{key}"' for key in job_keys) + ']'
|
||||
payload = dict(self.api_payload)
|
||||
payload["query"] = self.api_payload["query"].format(job_keys_gql=job_keys_gql)
|
||||
response = requests.post(self.api_url, headers=self.api_headers, json=payload, proxies=self.proxy)
|
||||
if response.status_code == 200:
|
||||
return response.json()['data']['jobData']['results']
|
||||
else:
|
||||
return {}
|
||||
|
||||
def _add_params(self, page: int) -> dict[str, str | Any]:
|
||||
fromage = max(self.scraper_input.hours_old // 24, 1) if self.scraper_input.hours_old else None
|
||||
params = {
|
||||
"q": self.scraper_input.search_term,
|
||||
"l": self.scraper_input.location if self.scraper_input.location else self.scraper_input.country.value[0].split(',')[-1],
|
||||
"filter": 0,
|
||||
"start": self.scraper_input.offset + page * 10,
|
||||
"sort": "date",
|
||||
"fromage": fromage,
|
||||
}
|
||||
if self.scraper_input.distance:
|
||||
params["radius"] = self.scraper_input.distance
|
||||
|
||||
sc_values = []
|
||||
if self.scraper_input.is_remote:
|
||||
sc_values.append("attr(DSQF7)")
|
||||
if self.scraper_input.job_type:
|
||||
sc_values.append("jt({})".format(self.scraper_input.job_type.value[0]))
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
|
||||
if self.scraper_input.easy_apply:
|
||||
params['iafilter'] = 1
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def _get_job_type(job: dict) -> list[JobType] | None:
|
||||
def _get_job_type(attributes: list) -> list[JobType]:
|
||||
"""
|
||||
Parses the job to get list of job types
|
||||
:param job:
|
||||
:return:
|
||||
Parses the attributes to get list of job types
|
||||
:param attributes:
|
||||
:return: list of JobType
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for taxonomy in job["taxonomyAttributes"]:
|
||||
if taxonomy["label"] == "job-types":
|
||||
for i in range(len(taxonomy["attributes"])):
|
||||
label = taxonomy["attributes"][i].get("label")
|
||||
if label:
|
||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||
for attribute in attributes:
|
||||
job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
@staticmethod
|
||||
def _get_compensation(job: dict, job_detailed: dict) -> Compensation:
|
||||
def _get_compensation(job: dict) -> Compensation | None:
|
||||
"""
|
||||
Parses the job to get
|
||||
Parses the job to get compensation
|
||||
:param job:
|
||||
:param job:
|
||||
:param job_detailed:
|
||||
:return: compensation object
|
||||
"""
|
||||
comp = job_detailed['compensation']['baseSalary']
|
||||
if comp:
|
||||
interval = IndeedScraper._get_correct_interval(comp['unitOfWork'])
|
||||
if interval:
|
||||
comp = job["compensation"]["baseSalary"]
|
||||
if not comp:
|
||||
return None
|
||||
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
||||
if not interval:
|
||||
return None
|
||||
min_range = comp["range"].get("min")
|
||||
max_range = comp["range"].get("max")
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None,
|
||||
max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None,
|
||||
currency=job_detailed['compensation']['currencyCode']
|
||||
min_amount=round(min_range, 2) if min_range is not None else None,
|
||||
max_amount=round(max_range, 2) if max_range is not None else None,
|
||||
currency=job["compensation"]["currencyCode"],
|
||||
)
|
||||
|
||||
extracted_salary = job.get("extractedSalary")
|
||||
compensation = None
|
||||
if extracted_salary:
|
||||
salary_snippet = job.get("salarySnippet")
|
||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
||||
interval = (extracted_salary.get("type"),)
|
||||
if isinstance(interval, tuple):
|
||||
interval = interval[0]
|
||||
|
||||
interval = interval.upper()
|
||||
if interval in CompensationInterval.__members__:
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval[interval],
|
||||
min_amount=int(extracted_salary.get("min")),
|
||||
max_amount=int(extracted_salary.get("max")),
|
||||
currency=currency,
|
||||
)
|
||||
return compensation
|
||||
|
||||
@staticmethod
|
||||
def _parse_jobs(soup: BeautifulSoup) -> dict:
|
||||
def _is_job_remote(job: dict, description: str) -> bool:
|
||||
"""
|
||||
Parses the jobs from the soup object
|
||||
:param soup:
|
||||
:return: jobs
|
||||
Searches the description, location, and attributes to check if job is remote
|
||||
"""
|
||||
def find_mosaic_script() -> Tag | None:
|
||||
script_tags = soup.find_all("script")
|
||||
|
||||
for tag in script_tags:
|
||||
if (
|
||||
tag.string
|
||||
and "mosaic.providerData" in tag.string
|
||||
and "mosaic-provider-jobcards" in tag.string
|
||||
):
|
||||
return tag
|
||||
return None
|
||||
|
||||
script_tag = find_mosaic_script()
|
||||
if script_tag:
|
||||
script_str = script_tag.string
|
||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
||||
p = re.compile(pattern, re.DOTALL)
|
||||
m = p.search(script_str)
|
||||
if m:
|
||||
jobs = json.loads(m.group(1).strip())
|
||||
return jobs
|
||||
else:
|
||||
logger.warning(f'Indeed: Could not find mosaic provider job cards data')
|
||||
return {}
|
||||
else:
|
||||
logger.warning(f"Indeed: Could not parse any jobs on the page")
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _is_job_remote(job: dict, job_detailed: dict, description: str) -> bool:
|
||||
remote_keywords = ['remote', 'work from home', 'wfh']
|
||||
remote_keywords = ["remote", "work from home", "wfh"]
|
||||
is_remote_in_attributes = any(
|
||||
any(keyword in attr['label'].lower() for keyword in remote_keywords)
|
||||
for attr in job_detailed['attributes']
|
||||
any(keyword in attr["label"].lower() for keyword in remote_keywords)
|
||||
for attr in job["attributes"]
|
||||
)
|
||||
is_remote_in_description = any(
|
||||
keyword in description.lower() for keyword in remote_keywords
|
||||
)
|
||||
is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords)
|
||||
is_remote_in_location = any(
|
||||
keyword in job_detailed['location']['formatted']['long'].lower()
|
||||
keyword in job["location"]["formatted"]["long"].lower()
|
||||
for keyword in remote_keywords
|
||||
)
|
||||
is_remote_in_taxonomy = any(
|
||||
taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0
|
||||
for taxonomy in job.get("taxonomyAttributes", [])
|
||||
return (
|
||||
is_remote_in_attributes or is_remote_in_description or is_remote_in_location
|
||||
)
|
||||
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location or is_remote_in_taxonomy
|
||||
|
||||
@staticmethod
|
||||
def _get_correct_interval(interval: str) -> CompensationInterval:
|
||||
def _get_compensation_interval(interval: str) -> CompensationInterval:
|
||||
interval_mapping = {
|
||||
"DAY": "DAILY",
|
||||
"YEAR": "YEARLY",
|
||||
"HOUR": "HOURLY",
|
||||
"WEEK": "WEEKLY",
|
||||
"MONTH": "MONTHLY"
|
||||
"MONTH": "MONTHLY",
|
||||
}
|
||||
mapped_interval = interval_mapping.get(interval.upper(), None)
|
||||
if mapped_interval and mapped_interval in CompensationInterval.__members__:
|
||||
@@ -341,43 +338,44 @@ class IndeedScraper(Scraper):
|
||||
else:
|
||||
raise ValueError(f"Unsupported interval: {interval}")
|
||||
|
||||
headers = {
|
||||
'Host': 'www.indeed.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
|
||||
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
|
||||
}
|
||||
api_headers = {
|
||||
'Host': 'apis.indeed.com',
|
||||
'content-type': 'application/json',
|
||||
'indeed-api-key': '161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8',
|
||||
'accept': 'application/json',
|
||||
'indeed-locale': 'en-US',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1',
|
||||
'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone',
|
||||
'indeed-co': 'US',
|
||||
"Host": "apis.indeed.com",
|
||||
"content-type": "application/json",
|
||||
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||
"accept": "application/json",
|
||||
"indeed-locale": "en-US",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||
}
|
||||
api_payload = {
|
||||
"query": """
|
||||
job_search_query = """
|
||||
query GetJobData {{
|
||||
jobData(input: {{
|
||||
jobKeys: {job_keys_gql}
|
||||
}}) {{
|
||||
jobSearch(
|
||||
{what}
|
||||
{location}
|
||||
includeSponsoredResults: NONE
|
||||
limit: 100
|
||||
sort: DATE
|
||||
{cursor}
|
||||
{filters}
|
||||
) {{
|
||||
pageInfo {{
|
||||
nextCursor
|
||||
}}
|
||||
results {{
|
||||
trackingKey
|
||||
job {{
|
||||
key
|
||||
title
|
||||
datePublished
|
||||
dateOnIndeed
|
||||
description {{
|
||||
html
|
||||
}}
|
||||
location {{
|
||||
countryName
|
||||
countryCode
|
||||
admin1Code
|
||||
city
|
||||
postalCode
|
||||
streetAddress
|
||||
@@ -399,10 +397,30 @@ class IndeedScraper(Scraper):
|
||||
currencyCode
|
||||
}}
|
||||
attributes {{
|
||||
key
|
||||
label
|
||||
}}
|
||||
employer {{
|
||||
relativeCompanyPageUrl
|
||||
name
|
||||
dossier {{
|
||||
employerDetails {{
|
||||
addresses
|
||||
industry
|
||||
employeesLocalizedLabel
|
||||
revenueLocalizedLabel
|
||||
briefDescription
|
||||
ceoName
|
||||
ceoPhotoUrl
|
||||
}}
|
||||
images {{
|
||||
headerImageUrl
|
||||
squareLogoUrl
|
||||
}}
|
||||
links {{
|
||||
corporateWebsite
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
recruit {{
|
||||
viewJobUrl
|
||||
@@ -414,4 +432,3 @@ class IndeedScraper(Scraper):
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
}
|
||||
|
||||
@@ -4,8 +4,13 @@ jobspy.scrapers.linkedin
|
||||
|
||||
This module contains routines to scrape LinkedIn.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
import random
|
||||
import regex as re
|
||||
import urllib.parse
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
@@ -24,15 +29,14 @@ from ...jobs import (
|
||||
JobType,
|
||||
Country,
|
||||
Compensation,
|
||||
DescriptionFormat
|
||||
DescriptionFormat,
|
||||
)
|
||||
from ..utils import (
|
||||
logger,
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
get_enum_from_job_type,
|
||||
currency_parser,
|
||||
markdown_converter
|
||||
markdown_converter,
|
||||
)
|
||||
|
||||
|
||||
@@ -49,6 +53,7 @@ class LinkedInScraper(Scraper):
|
||||
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
|
||||
self.scraper_input = None
|
||||
self.country = "worldwide"
|
||||
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
@@ -62,26 +67,32 @@ class LinkedInScraper(Scraper):
|
||||
url_lock = Lock()
|
||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||
seconds_old = (
|
||||
scraper_input.hours_old * 3600
|
||||
if scraper_input.hours_old
|
||||
else None
|
||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||
)
|
||||
continue_search = (
|
||||
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||
)
|
||||
continue_search = lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||
while continue_search():
|
||||
logger.info(f'LinkedIn search page: {page // 25 + 1}')
|
||||
logger.info(f"LinkedIn search page: {page // 25 + 1}")
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": self.job_type_code(scraper_input.job_type)
|
||||
"f_JT": (
|
||||
self.job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
else None
|
||||
),
|
||||
"pageNum": 0,
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None,
|
||||
"f_C": (
|
||||
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||
if scraper_input.linkedin_company_ids
|
||||
else None
|
||||
),
|
||||
}
|
||||
if seconds_old is not None:
|
||||
params["f_TPR"] = f"r{seconds_old}"
|
||||
@@ -98,15 +109,19 @@ class LinkedInScraper(Scraper):
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
if response.status_code == 429:
|
||||
logger.error(f'429 Response - Blocked by LinkedIn for too many requests')
|
||||
err = (
|
||||
f"429 Response - Blocked by LinkedIn for too many requests"
|
||||
)
|
||||
else:
|
||||
logger.error(f'LinkedIn response status code {response.status_code}')
|
||||
err = f"LinkedIn response status code {response.status_code}"
|
||||
err += f" - {response.text}"
|
||||
logger.error(err)
|
||||
return JobResponse(jobs=job_list)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f'LinkedIn: Bad proxy')
|
||||
logger.error(f"LinkedIn: Bad proxy")
|
||||
else:
|
||||
logger.error(f'LinkedIn: {str(e)}')
|
||||
logger.error(f"LinkedIn: {str(e)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
@@ -127,7 +142,8 @@ class LinkedInScraper(Scraper):
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
try:
|
||||
job_post = self._process_job(job_card, job_url, scraper_input.linkedin_fetch_description)
|
||||
fetch_desc = scraper_input.linkedin_fetch_description
|
||||
job_post = self._process_job(job_card, job_url, fetch_desc)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if not continue_search():
|
||||
@@ -142,8 +158,10 @@ class LinkedInScraper(Scraper):
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find('span', class_='job-search-card__salary-info')
|
||||
def _process_job(
|
||||
self, job_card: Tag, job_url: str, full_descr: bool
|
||||
) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||
|
||||
compensation = None
|
||||
if salary_tag:
|
||||
@@ -179,17 +197,16 @@ class LinkedInScraper(Scraper):
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = description = job_type = None
|
||||
date_posted = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except:
|
||||
date_posted = None
|
||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||
job_details = {}
|
||||
if full_descr:
|
||||
description, job_type = self._get_job_description(job_url)
|
||||
job_details = self._get_job_details(job_url)
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
@@ -199,29 +216,29 @@ class LinkedInScraper(Scraper):
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
compensation=compensation,
|
||||
benefits=benefits,
|
||||
job_type=job_type,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
job_type=job_details.get("job_type"),
|
||||
description=job_details.get("description"),
|
||||
job_url_direct=job_details.get("job_url_direct"),
|
||||
emails=extract_emails_from_text(job_details.get("description")),
|
||||
logo_photo_url=job_details.get("logo_photo_url"),
|
||||
)
|
||||
|
||||
def _get_job_description(
|
||||
self, job_page_url: str
|
||||
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
||||
def _get_job_details(self, job_page_url: str) -> dict:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
Retrieves job description and other job details by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None
|
||||
:return: dict
|
||||
"""
|
||||
try:
|
||||
session = create_session(is_tls=False, has_retry=True)
|
||||
response = session.get(job_page_url, headers=self.headers, timeout=5, proxies=self.proxy)
|
||||
response = session.get(
|
||||
job_page_url, headers=self.headers, timeout=5, proxies=self.proxy
|
||||
)
|
||||
response.raise_for_status()
|
||||
except:
|
||||
return None, None
|
||||
return {}
|
||||
if response.url == "https://www.linkedin.com/signup":
|
||||
return None, None
|
||||
return {}
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
div_content = soup.find(
|
||||
@@ -229,15 +246,24 @@ class LinkedInScraper(Scraper):
|
||||
)
|
||||
description = None
|
||||
if div_content is not None:
|
||||
|
||||
def remove_attributes(tag):
|
||||
for attr in list(tag.attrs):
|
||||
del tag[attr]
|
||||
return tag
|
||||
|
||||
div_content = remove_attributes(div_content)
|
||||
description = div_content.prettify(formatter="html")
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
return description, self._parse_job_type(soup)
|
||||
return {
|
||||
"description": description,
|
||||
"job_type": self._parse_job_type(soup),
|
||||
"job_url_direct": self._parse_job_url_direct(soup),
|
||||
"logo_photo_url": soup.find("img", {"class": "artdeco-entity-image"}).get(
|
||||
"data-delayed-url"
|
||||
),
|
||||
}
|
||||
|
||||
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
@@ -261,11 +287,8 @@ class LinkedInScraper(Scraper):
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(country)
|
||||
)
|
||||
country = Country.from_string(country)
|
||||
location = Location(city=city, state=state, country=country)
|
||||
return location
|
||||
|
||||
@staticmethod
|
||||
@@ -293,6 +316,23 @@ class LinkedInScraper(Scraper):
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job url direct from job page
|
||||
:param soup:
|
||||
:return: str
|
||||
"""
|
||||
job_url_direct = None
|
||||
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||
if job_url_direct_content:
|
||||
job_url_direct_match = self.job_url_direct_regex.search(
|
||||
job_url_direct_content.decode_contents().strip()
|
||||
)
|
||||
if job_url_direct_match:
|
||||
job_url_direct = urllib.parse.unquote(job_url_direct_match.group())
|
||||
|
||||
return job_url_direct
|
||||
|
||||
@staticmethod
|
||||
def job_type_code(job_type_enum: JobType) -> str:
|
||||
return {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import logging
|
||||
import re
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import re
|
||||
import logging
|
||||
import requests
|
||||
import tls_client
|
||||
import numpy as np
|
||||
from markdownify import markdownify as md
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
@@ -14,23 +15,27 @@ logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
formatter = logging.Formatter(format)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
def count_urgent_words(description: str) -> int:
|
||||
def set_logger_level(verbose: int = 2):
|
||||
"""
|
||||
Count the number of urgent words or phrases in a job description.
|
||||
"""
|
||||
urgent_patterns = re.compile(
|
||||
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
matches = re.findall(urgent_patterns, description)
|
||||
count = len(matches)
|
||||
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||
|
||||
return count
|
||||
Parameters:
|
||||
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||
"""
|
||||
if verbose is None:
|
||||
return
|
||||
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||
level = getattr(logging, level_name.upper(), None)
|
||||
if level is not None:
|
||||
logger.setLevel(level)
|
||||
else:
|
||||
raise ValueError(f"Invalid log level: {level_name}")
|
||||
|
||||
|
||||
def markdown_converter(description_html: str):
|
||||
@@ -47,7 +52,12 @@ def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session:
|
||||
def create_session(
|
||||
proxy: dict | None = None,
|
||||
is_tls: bool = True,
|
||||
has_retry: bool = False,
|
||||
delay: int = 1,
|
||||
) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
:return: A session object
|
||||
@@ -61,15 +71,17 @@ def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bo
|
||||
if proxy:
|
||||
session.proxies.update(proxy)
|
||||
if has_retry:
|
||||
retries = Retry(total=3,
|
||||
retries = Retry(
|
||||
total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay)
|
||||
backoff_factor=delay,
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
return session
|
||||
|
||||
|
||||
@@ -87,17 +99,15 @@ def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", '', cur_str)
|
||||
cur_str = re.sub("[^-0-9.,]", "", cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", '', cur_str[:-3]) + cur_str[-3:]
|
||||
cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if '.' in list(cur_str[-3:]):
|
||||
if "." in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif ',' in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(',', '.'))
|
||||
elif "," in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(",", "."))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,9 @@ jobspy.scrapers.ziprecruiter
|
||||
|
||||
This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import time
|
||||
from datetime import datetime
|
||||
@@ -14,10 +17,9 @@ from concurrent.futures import ThreadPoolExecutor
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import (
|
||||
logger,
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
markdown_converter
|
||||
markdown_converter,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -26,7 +28,7 @@ from ...jobs import (
|
||||
JobResponse,
|
||||
JobType,
|
||||
Country,
|
||||
DescriptionFormat
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
|
||||
@@ -63,7 +65,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
break
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
logger.info(f'ZipRecruiter search page: {page}')
|
||||
logger.info(f"ZipRecruiter search page: {page}")
|
||||
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
@@ -89,25 +91,24 @@ class ZipRecruiterScraper(Scraper):
|
||||
if continue_token:
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
res= self.session.get(
|
||||
f"{self.api_url}/jobs-app/jobs",
|
||||
headers=self.headers,
|
||||
params=params
|
||||
res = self.session.get(
|
||||
f"{self.api_url}/jobs-app/jobs", headers=self.headers, params=params
|
||||
)
|
||||
if res.status_code not in range(200, 400):
|
||||
if res.status_code == 429:
|
||||
logger.error(f'429 Response - Blocked by ZipRecruiter for too many requests')
|
||||
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||
else:
|
||||
logger.error(f'ZipRecruiter response status code {res.status_code}')
|
||||
err = f"ZipRecruiter response status code {res.status_code}"
|
||||
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||
logger.error(err)
|
||||
return jobs_list, ""
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f'Indeed: Bad proxy')
|
||||
logger.error(f"Indeed: Bad proxy")
|
||||
else:
|
||||
logger.error(f'Indeed: {str(e)}')
|
||||
logger.error(f"Indeed: {str(e)}")
|
||||
return jobs_list, ""
|
||||
|
||||
|
||||
res_data = res.json()
|
||||
jobs_list = res_data.get("jobs", [])
|
||||
next_continue_token = res_data.get("continue", None)
|
||||
@@ -128,7 +129,11 @@ class ZipRecruiterScraper(Scraper):
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
description = job.get("job_description", "").strip()
|
||||
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description
|
||||
description = (
|
||||
markdown_converter(description)
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||
else description
|
||||
)
|
||||
company = job.get("hiring_company", {}).get("name")
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
@@ -139,34 +144,33 @@ class ZipRecruiterScraper(Scraper):
|
||||
job_type = self._get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
date_posted = datetime.fromisoformat(job['posted_time'].rstrip("Z")).date()
|
||||
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
|
||||
comp_interval = job.get("compensation_interval")
|
||||
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
|
||||
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||
comp_currency = job.get("compensation_currency")
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval="yearly"
|
||||
if job.get("compensation_interval") == "annual"
|
||||
else job.get("compensation_interval"),
|
||||
min_amount=int(job["compensation_min"])
|
||||
if "compensation_min" in job
|
||||
else None,
|
||||
max_amount=int(job["compensation_max"])
|
||||
if "compensation_max" in job
|
||||
else None,
|
||||
currency=job.get("compensation_currency"),
|
||||
interval=comp_interval,
|
||||
min_amount=comp_min,
|
||||
max_amount=comp_max,
|
||||
currency=comp_currency,
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def _get_cookies(self):
|
||||
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
self.session.post(f"{self.api_url}/jobs-app/event", data=data, headers=self.headers)
|
||||
data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
url = f"{self.api_url}/jobs-app/event"
|
||||
self.session.post(url, data=data, headers=self.headers)
|
||||
|
||||
@staticmethod
|
||||
def _get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
@@ -182,16 +186,13 @@ class ZipRecruiterScraper(Scraper):
|
||||
"location": scraper_input.location,
|
||||
}
|
||||
if scraper_input.hours_old:
|
||||
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
|
||||
params['days'] = fromage
|
||||
job_type_map = {
|
||||
JobType.FULL_TIME: 'full_time',
|
||||
JobType.PART_TIME: 'part_time'
|
||||
}
|
||||
params["days"] = max(scraper_input.hours_old // 24, 1)
|
||||
job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
|
||||
if scraper_input.job_type:
|
||||
params['employment_type'] = job_type_map[scraper_input.job_type] if scraper_input.job_type in job_type_map else scraper_input.job_type.value[0]
|
||||
job_type = scraper_input.job_type
|
||||
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
|
||||
if scraper_input.easy_apply:
|
||||
params['zipapply'] = 1
|
||||
params["zipapply"] = 1
|
||||
if scraper_input.is_remote:
|
||||
params["remote"] = 1
|
||||
if scraper_input.distance:
|
||||
|
||||
Reference in New Issue
Block a user