Compare commits

...

24 Commits

Author SHA1 Message Date
Cullen Watson
65d2e5e707 Update pyproject.toml 2024-05-20 11:46:36 -05:00
fasih hussain
08d63a87a2 chore: id added for JobPost schema (#152) 2024-05-20 11:45:52 -05:00
Cullen
1ffdb1756f fix: dup line 2024-04-30 12:11:48 -05:00
Cullen Watson
1185693422 delete empty file 2024-04-30 12:06:20 -05:00
Lluís Salord Quetglas
dcd7144318 FIX: Allow Indeed search term with complex syntax (#139) 2024-04-30 12:05:43 -05:00
Cullen Watson
bf73c061bd enh: linkedin company logo (#141) 2024-04-30 12:03:10 -05:00
Lluís Salord Quetglas
8dd08ed9fd FEAT: Allow LinkedIn scraper to get external job apply url (#140) 2024-04-30 11:36:01 -05:00
Cullen Watson
5d3df732e6 docs: readme 2024-03-12 20:46:25 -05:00
Kellen Mace
86f858e06d Update scrape_jobs() parameters info in readme (#130) 2024-03-12 20:45:13 -05:00
Cullen
1089d1f0a5 docs: readme 2024-03-11 21:30:57 -05:00
Cullen
3e93454738 fix(indeed): readd param 2024-03-11 21:23:20 -05:00
Cullen Watson
0d150d519f docs: readme 2024-03-11 14:52:20 -05:00
Cullen Watson
cc3497f929 docs: readme 2024-03-11 14:45:17 -05:00
Cullen Watson
5986f75346 docs: readme 2024-03-11 14:41:12 -05:00
VitaminB16
4b7bdb9313 feat: Adjust log verbosity via verbose arg (#128) 2024-03-11 14:38:44 -05:00
Cullen Watson
80213f28d2 chore: version 2024-03-11 09:43:12 -05:00
Cullen Watson
ada38532c3 fix: indeed empty location term 2024-03-11 09:42:43 -05:00
Cullen Watson
3b0017964c fix: indeed empty search term 2024-03-11 09:21:11 -05:00
VitaminB16
94d8f555fd format: Apply Black formatter to the codebase (#127) 2024-03-10 23:36:27 -05:00
Cullen Watson
e8b4b376b8 docs: readme 2024-03-09 13:40:34 -06:00
Cullen Watson
54ac1bad16 docs: readme 2024-03-09 01:49:05 -06:00
Cullen Watson
0a669e9ba8 enh: indeed more fields (#126) 2024-03-09 01:40:01 -06:00
gigaSec
a4f6851c32 Fix GlassDoor Country Vietnam(#122) 2024-03-04 17:35:57 -06:00
troy-conte
db01bc6bbb log search updates, fix glassdoor (#120) 2024-03-04 16:39:38 -06:00
14 changed files with 2061 additions and 1503 deletions

7
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,7 @@
repos:
- repo: https://github.com/psf/black
rev: 24.2.0
hooks:
- id: black
language_version: python
args: [--line-length=88, --quiet]

View File

@@ -21,7 +21,7 @@ Updated for release v1.1.3
### Installation ### Installation
``` ```
pip install python-jobspy pip install -U python-jobspy
``` ```
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_ _Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
@@ -37,8 +37,9 @@ jobs = scrape_jobs(
search_term="software engineer", search_term="software engineer",
location="Dallas, TX", location="Dallas, TX",
results_wanted=20, results_wanted=20,
hours_old=72, # (only linkedin is hour specific, others round up to days old) hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
country_indeed='USA' # only needed for indeed / glassdoor country_indeed='USA', # only needed for indeed / glassdoor
# linkedin_fetch_description=True # get full description and direct job url for linkedin (slower)
) )
print(f"Found {len(jobs)} jobs") print(f"Found {len(jobs)} jobs")
print(jobs.head()) print(jobs.head())
@@ -48,7 +49,7 @@ jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=Fal
### Output ### Output
``` ```
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!... indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i... indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u... linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
@@ -60,23 +61,24 @@ zip_recruiter Software Developer TEKsystems Phoenix
### Parameters for `scrape_jobs()` ### Parameters for `scrape_jobs()`
```plaintext ```plaintext
Required
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
└── search_term (str)
Optional Optional
├── location (int) ├── site_name (list|str): linkedin, zip_recruiter, indeed, glassdoor (default is all four)
├── distance (int): in miles ├── search_term (str)
├── job_type (enum): fulltime, parttime, internship, contract ├── location (str)
├── distance (int): in miles, default 50
├── job_type (str): fulltime, parttime, internship, contract
├── proxy (str): in format 'http://user:pass@host:port' ├── proxy (str): in format 'http://user:pass@host:port'
├── is_remote (bool) ├── is_remote (bool)
├── linkedin_fetch_description (bool): fetches full description for LinkedIn (slower) ├── results_wanted (int): number of job results to retrieve for each site specified in 'site_name'
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type' ├── easy_apply (bool): filters for jobs that are hosted on the job board site (LinkedIn & Indeed do not allow pairing this with hours_old)
├── easy_apply (bool): filters for jobs that are hosted on the job board site ├── linkedin_fetch_description (bool): fetches full description and direct job url for LinkedIn (slower)
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids ├── linkedin_company_ids (list[int]): searches for linkedin jobs with specific company ids
├── description_format (enum): markdown, html (format type of the job descriptions) ├── description_format (str): markdown, html (Format type of the job descriptions. Default is markdown.)
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling) ├── country_indeed (str): filters the country on Indeed (see below for correct spelling)
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result) ├── offset (int): starts the search from an offset (e.g. 25 will start the search from the 25th result)
├── hours_old (int): filters jobs by the number of hours since the job was posted (all but LinkedIn rounds up to next day) ├── hours_old (int): filters jobs by the number of hours since the job was posted (ZipRecruiter and Glassdoor round up to next day. If you use this on Indeed, it will not filter by job_type/is_remote/easy_apply)
├── verbose (int) {0, 1, 2}: Controls the verbosity of the runtime printouts (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
├── hyperlinks (bool): Whether to turn `job_url`s into hyperlinks. Default is false.
``` ```
### JobPost Schema ### JobPost Schema
@@ -100,24 +102,26 @@ JobPost
│ └── currency (enum) │ └── currency (enum)
└── date_posted (date) └── date_posted (date)
└── emails (str) └── emails (str)
└── num_urgent_words (int)
└── is_remote (bool) └── is_remote (bool)
Indeed specific
├── company_country (str)
└── company_addresses (str)
└── company_industry (str)
└── company_employees_label (str)
└── company_revenue_label (str)
└── company_description (str)
└── ceo_name (str)
└── ceo_photo_url (str)
└── logo_photo_url (str)
└── banner_photo_url (str)
``` ```
### Exceptions
The following exceptions may be raised when using JobSpy:
* `LinkedInException`
* `IndeedException`
* `ZipRecruiterException`
* `GlassdoorException`
## Supported Countries for Job Searching ## Supported Countries for Job Searching
### **LinkedIn** ### **LinkedIn**
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using LinkedIn searches globally & uses only the `location` parameter.
### **ZipRecruiter** ### **ZipRecruiter**
@@ -147,10 +151,14 @@ You can specify the following countries when searching on Indeed (use the exact
| South Korea | Spain* | Sweden | Switzerland* | | South Korea | Spain* | Sweden | Switzerland* |
| Taiwan | Thailand | Turkey | Ukraine | | Taiwan | Thailand | Turkey | Ukraine |
| United Arab Emirates | UK* | USA* | Uruguay | | United Arab Emirates | UK* | USA* | Uruguay |
| Venezuela | Vietnam | | | | Venezuela | Vietnam* | | |
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search. ## Notes
* Indeed is the best scraper currently with no rate limiting.
* All the job board endpoints are capped at around 1000 jobs on a given search.
* LinkedIn is the most restrictive and usually rate limits around the 10th page.
## Frequently Asked Questions ## Frequently Asked Questions
--- ---
@@ -168,7 +176,3 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
- Trying a VPN or proxy to change your IP address. - Trying a VPN or proxy to change your IP address.
--- ---

View File

@@ -27,4 +27,4 @@ print("outputted to jobs.csv")
# jobs.to_xlsx('jobs.xlsx', index=False) # jobs.to_xlsx('jobs.xlsx', index=False)
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook) # 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
# display(jobs) # display(jobs)

View File

@@ -32,17 +32,18 @@ while len(all_jobs) < results_wanted:
search_term="software engineer", search_term="software engineer",
# New York, NY # New York, NY
# Dallas, TX # Dallas, TX
# Los Angeles, CA # Los Angeles, CA
location="Los Angeles, CA", location="Los Angeles, CA",
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)), results_wanted=min(
results_in_each_iteration, results_wanted - len(all_jobs)
),
country_indeed="USA", country_indeed="USA",
offset=offset, offset=offset,
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001", # proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
) )
# Add the scraped jobs to the list # Add the scraped jobs to the list
all_jobs.extend(jobs.to_dict('records')) all_jobs.extend(jobs.to_dict("records"))
# Increment the offset for the next page of results # Increment the offset for the next page of results
offset += results_in_each_iteration offset += results_in_each_iteration

2210
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.46" version = "1.1.53"
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"] authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy" homepage = "https://github.com/Bunsly/JobSpy"
@@ -17,14 +17,20 @@ beautifulsoup4 = "^4.12.2"
pandas = "^2.1.0" pandas = "^2.1.0"
NUMPY = "1.24.2" NUMPY = "1.24.2"
pydantic = "^2.3.0" pydantic = "^2.3.0"
html2text = "^2020.1.16"
tls-client = "^1.0.1" tls-client = "^1.0.1"
markdownify = "^0.11.6"
regex = "^2024.4.28"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
pytest = "^7.4.1" pytest = "^7.4.1"
jupyter = "^1.0.0" jupyter = "^1.0.0"
black = "*"
pre-commit = "*"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 88

View File

@@ -1,8 +1,11 @@
from __future__ import annotations
import pandas as pd import pandas as pd
from typing import Tuple from typing import Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from .jobs import JobType, Location from .jobs import JobType, Location
from .scrapers.utils import logger, set_logger_level
from .scrapers.indeed import IndeedScraper from .scrapers.indeed import IndeedScraper
from .scrapers.ziprecruiter import ZipRecruiterScraper from .scrapers.ziprecruiter import ZipRecruiterScraper
from .scrapers.glassdoor import GlassdoorScraper from .scrapers.glassdoor import GlassdoorScraper
@@ -20,7 +23,7 @@ def scrape_jobs(
site_name: str | list[str] | Site | list[Site] | None = None, site_name: str | list[str] | Site | list[Site] | None = None,
search_term: str | None = None, search_term: str | None = None,
location: str | None = None, location: str | None = None,
distance: int | None = None, distance: int | None = 50,
is_remote: bool = False, is_remote: bool = False,
job_type: str | None = None, job_type: str | None = None,
easy_apply: bool | None = None, easy_apply: bool | None = None,
@@ -33,11 +36,12 @@ def scrape_jobs(
linkedin_company_ids: list[int] | None = None, linkedin_company_ids: list[int] | None = None,
offset: int | None = 0, offset: int | None = 0,
hours_old: int = None, hours_old: int = None,
verbose: int = 2,
**kwargs, **kwargs,
) -> pd.DataFrame: ) -> pd.DataFrame:
""" """
Simultaneously scrapes job data from multiple job sites. Simultaneously scrapes job data from multiple job sites.
:return: results_wanted: pandas dataframe containing job data :return: pandas dataframe containing job data
""" """
SCRAPER_MAPPING = { SCRAPER_MAPPING = {
Site.LINKEDIN: LinkedInScraper, Site.LINKEDIN: LinkedInScraper,
@@ -45,6 +49,7 @@ def scrape_jobs(
Site.ZIP_RECRUITER: ZipRecruiterScraper, Site.ZIP_RECRUITER: ZipRecruiterScraper,
Site.GLASSDOOR: GlassdoorScraper, Site.GLASSDOOR: GlassdoorScraper,
} }
set_logger_level(verbose)
def map_str_to_site(site_name: str) -> Site: def map_str_to_site(site_name: str) -> Site:
return Site[site_name.upper()] return Site[site_name.upper()]
@@ -69,6 +74,7 @@ def scrape_jobs(
for site in site_name for site in site_name
] ]
return site_types return site_types
country_enum = Country.from_string(country_indeed) country_enum = Country.from_string(country_indeed)
scraper_input = ScraperInput( scraper_input = ScraperInput(
@@ -85,13 +91,16 @@ def scrape_jobs(
results_wanted=results_wanted, results_wanted=results_wanted,
linkedin_company_ids=linkedin_company_ids, linkedin_company_ids=linkedin_company_ids,
offset=offset, offset=offset,
hours_old=hours_old hours_old=hours_old,
) )
def scrape_site(site: Site) -> Tuple[str, JobResponse]: def scrape_site(site: Site) -> Tuple[str, JobResponse]:
scraper_class = SCRAPER_MAPPING[site] scraper_class = SCRAPER_MAPPING[site]
scraper = scraper_class(proxy=proxy) scraper = scraper_class(proxy=proxy)
scraped_data: JobResponse = scraper.scrape(scraper_input) scraped_data: JobResponse = scraper.scrape(scraper_input)
cap_name = site.value.capitalize()
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
logger.info(f"{site_name} finished scraping")
return site.value, scraped_data return site.value, scraped_data
site_to_jobs_dict = {} site_to_jobs_dict = {}
@@ -114,9 +123,8 @@ def scrape_jobs(
for site, job_response in site_to_jobs_dict.items(): for site, job_response in site_to_jobs_dict.items():
for job in job_response.jobs: for job in job_response.jobs:
job_data = job.dict() job_data = job.dict()
job_data[ job_url = job_data["job_url"]
"job_url_hyper" job_data["job_url_hyper"] = f'<a href="{job_url}">{job_url}</a>'
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
job_data["site"] = site job_data["site"] = site
job_data["company"] = job_data["company_name"] job_data["company"] = job_data["company_name"]
job_data["job_type"] = ( job_data["job_type"] = (
@@ -153,18 +161,19 @@ def scrape_jobs(
if jobs_dfs: if jobs_dfs:
# Step 1: Filter out all-NA columns from each DataFrame before concatenation # Step 1: Filter out all-NA columns from each DataFrame before concatenation
filtered_dfs = [df.dropna(axis=1, how='all') for df in jobs_dfs] filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
# Step 2: Concatenate the filtered DataFrames # Step 2: Concatenate the filtered DataFrames
jobs_df = pd.concat(filtered_dfs, ignore_index=True) jobs_df = pd.concat(filtered_dfs, ignore_index=True)
# Desired column order # Desired column order
desired_order = [ desired_order = [
"job_url_hyper" if hyperlinks else "job_url", "id",
"site", "site",
"job_url_hyper" if hyperlinks else "job_url",
"job_url_direct",
"title", "title",
"company", "company",
"company_url",
"location", "location",
"job_type", "job_type",
"date_posted", "date_posted",
@@ -173,21 +182,30 @@ def scrape_jobs(
"max_amount", "max_amount",
"currency", "currency",
"is_remote", "is_remote",
"num_urgent_words",
"benefits",
"emails", "emails",
"description", "description",
"company_url",
"company_url_direct",
"company_addresses",
"company_industry",
"company_num_employees",
"company_revenue",
"company_description",
"logo_photo_url",
"banner_photo_url",
"ceo_name",
"ceo_photo_url",
] ]
# Step 3: Ensure all desired columns are present, adding missing ones as empty # Step 3: Ensure all desired columns are present, adding missing ones as empty
for column in desired_order: for column in desired_order:
if column not in jobs_df.columns: if column not in jobs_df.columns:
jobs_df[column] = None # Add missing columns as empty jobs_df[column] = None # Add missing columns as empty
# Reorder the DataFrame according to the desired order # Reorder the DataFrame according to the desired order
jobs_df = jobs_df[desired_order] jobs_df = jobs_df[desired_order]
# Step 4: Sort the DataFrame as required # Step 4: Sort the DataFrame as required
return jobs_df.sort_values(by=['site', 'date_posted'], ascending=[True, False]) return jobs_df.sort_values(by=["site", "date_posted"], ascending=[True, False])
else: else:
return pd.DataFrame() return pd.DataFrame()

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from typing import Optional from typing import Optional
from datetime import date from datetime import date
from enum import Enum from enum import Enum
@@ -57,7 +59,7 @@ class JobType(Enum):
class Country(Enum): class Country(Enum):
""" """
Gets the subdomain for Indeed and Glassdoor. Gets the subdomain for Indeed and Glassdoor.
The second item in the tuple is the subdomain for Indeed The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
""" """
@@ -118,11 +120,11 @@ class Country(Enum):
TURKEY = ("turkey", "tr") TURKEY = ("turkey", "tr")
UKRAINE = ("ukraine", "ua") UKRAINE = ("ukraine", "ua")
UNITEDARABEMIRATES = ("united arab emirates", "ae") UNITEDARABEMIRATES = ("united arab emirates", "ae")
UK = ("uk,united kingdom", "uk", "co.uk") UK = ("uk,united kingdom", "uk:gb", "co.uk")
USA = ("usa,us,united states", "www", "com") USA = ("usa,us,united states", "www:us", "com")
URUGUAY = ("uruguay", "uy") URUGUAY = ("uruguay", "uy")
VENEZUELA = ("venezuela", "ve") VENEZUELA = ("venezuela", "ve")
VIETNAM = ("vietnam", "vn") VIETNAM = ("vietnam", "vn", "com")
# internal for ziprecruiter # internal for ziprecruiter
US_CANADA = ("usa/ca", "www") US_CANADA = ("usa/ca", "www")
@@ -132,7 +134,10 @@ class Country(Enum):
@property @property
def indeed_domain_value(self): def indeed_domain_value(self):
return self.value[1] subdomain, _, api_country_code = self.value[1].partition(":")
if subdomain and api_country_code:
return subdomain, api_country_code.upper()
return self.value[1], self.value[1].upper()
@property @property
def glassdoor_domain_value(self): def glassdoor_domain_value(self):
@@ -145,7 +150,7 @@ class Country(Enum):
else: else:
raise Exception(f"Glassdoor is not available for {self.name}") raise Exception(f"Glassdoor is not available for {self.name}")
def get_url(self): def get_glassdoor_url(self):
return f"https://{self.glassdoor_domain_value}/" return f"https://{self.glassdoor_domain_value}/"
@classmethod @classmethod
@@ -153,7 +158,7 @@ class Country(Enum):
"""Convert a string to the corresponding Country enum.""" """Convert a string to the corresponding Country enum."""
country_str = country_str.strip().lower() country_str = country_str.strip().lower()
for country in cls: for country in cls:
country_names = country.value[0].split(',') country_names = country.value[0].split(",")
if country_str in country_names: if country_str in country_names:
return country return country
valid_countries = [country.value for country in cls] valid_countries = [country.value for country in cls]
@@ -163,7 +168,7 @@ class Country(Enum):
class Location(BaseModel): class Location(BaseModel):
country: Country | None = None country: Country | str | None = None
city: Optional[str] = None city: Optional[str] = None
state: Optional[str] = None state: Optional[str] = None
@@ -173,7 +178,12 @@ class Location(BaseModel):
location_parts.append(self.city) location_parts.append(self.city)
if self.state: if self.state:
location_parts.append(self.state) location_parts.append(self.state)
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE): if isinstance(self.country, str):
location_parts.append(self.country)
elif self.country and self.country not in (
Country.US_CANADA,
Country.WORLDWIDE,
):
country_name = self.country.value[0] country_name = self.country.value[0]
if "," in country_name: if "," in country_name:
country_name = country_name.split(",")[0] country_name = country_name.split(",")[0]
@@ -216,22 +226,33 @@ class DescriptionFormat(Enum):
class JobPost(BaseModel): class JobPost(BaseModel):
id: str | None = None
title: str title: str
company_name: str company_name: str | None
job_url: str job_url: str
job_url_direct: str | None = None
location: Optional[Location] location: Optional[Location]
description: str | None = None description: str | None = None
company_url: str | None = None company_url: str | None = None
company_url_direct: str | None = None
job_type: list[JobType] | None = None job_type: list[JobType] | None = None
compensation: Compensation | None = None compensation: Compensation | None = None
date_posted: date | None = None date_posted: date | None = None
benefits: str | None = None
emails: list[str] | None = None emails: list[str] | None = None
num_urgent_words: int | None = None
is_remote: bool | None = None is_remote: bool | None = None
# company_industry: str | None = None
# indeed specific
company_addresses: str | None = None
company_industry: str | None = None
company_num_employees: str | None = None
company_revenue: str | None = None
company_description: str | None = None
ceo_name: str | None = None
ceo_photo_url: str | None = None
logo_photo_url: str | None = None
banner_photo_url: str | None = None
class JobResponse(BaseModel): class JobResponse(BaseModel):

View File

@@ -1,10 +1,14 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from ..jobs import ( from ..jobs import (
Enum, Enum,
BaseModel, BaseModel,
JobType, JobType,
JobResponse, JobResponse,
Country, Country,
DescriptionFormat DescriptionFormat,
) )
@@ -34,9 +38,10 @@ class ScraperInput(BaseModel):
hours_old: int | None = None hours_old: int | None = None
class Scraper: class Scraper(ABC):
def __init__(self, site: Site, proxy: list[str] | None = None): def __init__(self, site: Site, proxy: list[str] | None = None):
self.site = site self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy) self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
@abstractmethod
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ... def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...

View File

@@ -4,19 +4,23 @@ jobspy.scrapers.glassdoor
This module contains routines to scrape Glassdoor. This module contains routines to scrape Glassdoor.
""" """
from __future__ import annotations
import re
import json import json
import requests import requests
from typing import Optional from typing import Optional, Tuple
from datetime import datetime, timedelta from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from ..utils import count_urgent_words, extract_emails_from_text
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..utils import extract_emails_from_text
from ..exceptions import GlassdoorException from ..exceptions import GlassdoorException
from ..utils import ( from ..utils import (
create_session, create_session,
markdown_converter, markdown_converter,
logger logger,
) )
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
@@ -25,7 +29,7 @@ from ...jobs import (
Location, Location,
JobResponse, JobResponse,
JobType, JobType,
DescriptionFormat DescriptionFormat,
) )
@@ -42,6 +46,7 @@ class GlassdoorScraper(Scraper):
self.session = None self.session = None
self.scraper_input = None self.scraper_input = None
self.jobs_per_page = 30 self.jobs_per_page = 30
self.max_pages = 30
self.seen_urls = set() self.seen_urls = set()
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
@@ -52,39 +57,37 @@ class GlassdoorScraper(Scraper):
""" """
self.scraper_input = scraper_input self.scraper_input = scraper_input
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted) self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
self.base_url = self.scraper_input.country.get_url() self.base_url = self.scraper_input.country.get_glassdoor_url()
self.session = create_session(self.proxy, is_tls=True, has_retry=True)
token = self._get_csrf_token()
self.headers["gd-csrf-token"] = token if token else self.fallback_token
location_id, location_type = self._get_location( location_id, location_type = self._get_location(
scraper_input.location, scraper_input.is_remote scraper_input.location, scraper_input.is_remote
) )
if location_type is None: if location_type is None:
logger.error("Glassdoor: location not parsed")
return JobResponse(jobs=[]) return JobResponse(jobs=[])
all_jobs: list[JobPost] = [] all_jobs: list[JobPost] = []
cursor = None cursor = None
max_pages = 30
self.session = create_session(self.proxy, is_tls=False, has_retry=True)
self.session.get(self.base_url)
try: range_start = 1 + (scraper_input.offset // self.jobs_per_page)
for page in range( tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
1 + (scraper_input.offset // self.jobs_per_page), range_end = min(tot_pages, self.max_pages + 1)
min( for page in range(range_start, range_end):
(scraper_input.results_wanted // self.jobs_per_page) + 2, logger.info(f"Glassdoor search page: {page}")
max_pages + 1, try:
), jobs, cursor = self._fetch_jobs_page(
): scraper_input, location_id, location_type, page, cursor
try: )
jobs, cursor = self._fetch_jobs_page( all_jobs.extend(jobs)
scraper_input, location_id, location_type, page, cursor if not jobs or len(all_jobs) >= scraper_input.results_wanted:
) all_jobs = all_jobs[: scraper_input.results_wanted]
all_jobs.extend(jobs) break
if len(all_jobs) >= scraper_input.results_wanted: except Exception as e:
all_jobs = all_jobs[: scraper_input.results_wanted] logger.error(f"Glassdoor: {str(e)}")
break break
except Exception as e:
raise GlassdoorException(str(e))
except Exception as e:
raise GlassdoorException(str(e))
return JobResponse(jobs=all_jobs) return JobResponse(jobs=all_jobs)
def _fetch_jobs_page( def _fetch_jobs_page(
@@ -94,63 +97,86 @@ class GlassdoorScraper(Scraper):
location_type: str, location_type: str,
page_num: int, page_num: int,
cursor: str | None, cursor: str | None,
) -> (list[JobPost], str | None): ) -> Tuple[list[JobPost], str | None]:
""" """
Scrapes a page of Glassdoor for jobs with scraper_input criteria Scrapes a page of Glassdoor for jobs with scraper_input criteria
""" """
jobs = []
self.scraper_input = scraper_input self.scraper_input = scraper_input
try: try:
payload = self._add_payload( payload = self._add_payload(location_id, location_type, page_num, cursor)
location_id, location_type, page_num, cursor
)
response = self.session.post( response = self.session.post(
f"{self.base_url}/graph", headers=self.headers, timeout=10, data=payload f"{self.base_url}/graph",
headers=self.headers,
timeout_seconds=15,
data=payload,
) )
if response.status_code != 200: if response.status_code != 200:
raise GlassdoorException( exc_msg = f"bad response status code: {response.status_code}"
f"bad response status code: {response.status_code}" raise GlassdoorException(exc_msg)
)
res_json = response.json()[0] res_json = response.json()[0]
if "errors" in res_json: if "errors" in res_json:
raise ValueError("Error encountered in API response") raise ValueError("Error encountered in API response")
except Exception as e: except (
raise GlassdoorException(str(e)) requests.exceptions.ReadTimeout,
GlassdoorException,
ValueError,
Exception,
) as e:
logger.error(f"Glassdoor: {str(e)}")
return jobs, None
jobs_data = res_json["data"]["jobListings"]["jobListings"] jobs_data = res_json["data"]["jobListings"]["jobListings"]
jobs = []
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor: with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
future_to_job_data = {executor.submit(self._process_job, job): job for job in jobs_data} future_to_job_data = {
executor.submit(self._process_job, job): job for job in jobs_data
}
for future in as_completed(future_to_job_data): for future in as_completed(future_to_job_data):
try: try:
job_post = future.result() job_post = future.result()
if job_post: if job_post:
jobs.append(job_post) jobs.append(job_post)
except Exception as exc: except Exception as exc:
raise GlassdoorException(f'Glassdoor generated an exception: {exc}') raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
return jobs, self.get_cursor_for_page( return jobs, self.get_cursor_for_page(
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1 res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
) )
def _get_csrf_token(self):
"""
Fetches csrf token needed for API by visiting a generic page
"""
res = self.session.get(
f"{self.base_url}/Job/computer-science-jobs.htm", headers=self.headers
)
pattern = r'"token":\s*"([^"]+)"'
matches = re.findall(pattern, res.text)
token = None
if matches:
token = matches[0]
return token
def _process_job(self, job_data): def _process_job(self, job_data):
""" """
Processes a single job and fetches its description. Processes a single job and fetches its description.
""" """
job_id = job_data["jobview"]["job"]["listingId"] job_id = job_data["jobview"]["job"]["listingId"]
job_url = f'{self.base_url}job-listing/j?jl={job_id}' job_url = f"{self.base_url}job-listing/j?jl={job_id}"
if job_url in self.seen_urls: if job_url in self.seen_urls:
return None return None
self.seen_urls.add(job_url) self.seen_urls.add(job_url)
job = job_data["jobview"] job = job_data["jobview"]
title = job["job"]["jobTitleText"] title = job["job"]["jobTitleText"]
company_name = job["header"]["employerNameFromSearch"] company_name = job["header"]["employerNameFromSearch"]
company_id = job_data['jobview']['header']['employer']['id'] company_id = job_data["jobview"]["header"]["employer"]["id"]
location_name = job["header"].get("locationName", "") location_name = job["header"].get("locationName", "")
location_type = job["header"].get("locationType", "") location_type = job["header"].get("locationType", "")
age_in_days = job["header"].get("ageInDays") age_in_days = job["header"].get("ageInDays")
is_remote, location = False, None is_remote, location = False, None
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days is not None else None date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
date_posted = date_diff if age_in_days is not None else None
if location_type == "S": if location_type == "S":
is_remote = True is_remote = True
@@ -162,9 +188,11 @@ class GlassdoorScraper(Scraper):
description = self._fetch_job_description(job_id) description = self._fetch_job_description(job_id)
except: except:
description = None description = None
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
return JobPost( return JobPost(
id=str(job_id),
title=title, title=title,
company_url=f"{self.base_url}Overview/W-EI_IE{company_id}.htm" if company_id else None, company_url=company_url if company_id else None,
company_name=company_name, company_name=company_name,
date_posted=date_posted, date_posted=date_posted,
job_url=job_url, job_url=job_url,
@@ -173,7 +201,6 @@ class GlassdoorScraper(Scraper):
is_remote=is_remote, is_remote=is_remote,
description=description, description=description,
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None,
) )
def _fetch_job_description(self, job_id): def _fetch_job_description(self, job_id):
@@ -187,7 +214,7 @@ class GlassdoorScraper(Scraper):
"variables": { "variables": {
"jl": job_id, "jl": job_id,
"queryString": "q", "queryString": "q",
"pageTypeEnum": "SERP" "pageTypeEnum": "SERP",
}, },
"query": """ "query": """
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) { query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
@@ -202,28 +229,33 @@ class GlassdoorScraper(Scraper):
__typename __typename
} }
} }
""" """,
} }
] ]
res = requests.post(url, json=body, headers=self.headers) res = requests.post(url, json=body, headers=self.headers)
if res.status_code != 200: if res.status_code != 200:
return None return None
data = res.json()[0] data = res.json()[0]
desc = data['data']['jobview']['job']['description'] desc = data["data"]["jobview"]["job"]["description"]
return markdown_converter(desc) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else desc if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
desc = markdown_converter(desc)
return desc
def _get_location(self, location: str, is_remote: bool) -> (int, str): def _get_location(self, location: str, is_remote: bool) -> (int, str):
if not location or is_remote: if not location or is_remote:
return "11047", "STATE" # remote options return "11047", "STATE" # remote options
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}" url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
session = create_session(self.proxy, has_retry=True) session = create_session(self.proxy, has_retry=True)
res = session.get(url) res = self.session.get(url, headers=self.headers)
if res.status_code != 200: if res.status_code != 200:
if res.status_code == 429: if res.status_code == 429:
logger.error(f'429 Response - Blocked by Glassdoor for too many requests') err = f"429 Response - Blocked by Glassdoor for too many requests"
logger.error(err)
return None, None return None, None
else: else:
logger.error(f'Glassdoor response status code {res.status_code}') err = f"Glassdoor response status code {res.status_code}"
err += f" - {res.text}"
logger.error(f"Glassdoor response status code {res.status_code}")
return None, None return None, None
items = res.json() items = res.json()
@@ -234,7 +266,7 @@ class GlassdoorScraper(Scraper):
location_type = "CITY" location_type = "CITY"
elif location_type == "S": elif location_type == "S":
location_type = "STATE" location_type = "STATE"
elif location_type == 'N': elif location_type == "N":
location_type = "COUNTRY" location_type = "COUNTRY"
return int(items[0]["locationId"]), location_type return int(items[0]["locationId"]), location_type
@@ -245,7 +277,9 @@ class GlassdoorScraper(Scraper):
page_num: int, page_num: int,
cursor: str | None = None, cursor: str | None = None,
) -> str: ) -> str:
fromage = max(self.scraper_input.hours_old // 24, 1) if self.scraper_input.hours_old else None fromage = None
if self.scraper_input.hours_old:
fromage = max(self.scraper_input.hours_old // 24, 1)
filter_params = [] filter_params = []
if self.scraper_input.easy_apply: if self.scraper_input.easy_apply:
filter_params.append({"filterKey": "applicationType", "values": "1"}) filter_params.append({"filterKey": "applicationType", "values": "1"})
@@ -264,9 +298,76 @@ class GlassdoorScraper(Scraper):
"pageNumber": page_num, "pageNumber": page_num,
"pageCursor": cursor, "pageCursor": cursor,
"fromage": fromage, "fromage": fromage,
"sort": "date" "sort": "date",
}, },
"query": """ "query": self.query_template,
}
if self.scraper_input.job_type:
payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
)
return json.dumps([payload])
@staticmethod
def parse_compensation(data: dict) -> Optional[Compensation]:
pay_period = data.get("payPeriod")
adjusted_pay = data.get("payPeriodAdjustedPay")
currency = data.get("payCurrency", "USD")
if not pay_period or not adjusted_pay:
return None
interval = None
if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY
elif pay_period:
interval = CompensationInterval.get_interval(pay_period)
min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1)
return Compensation(
interval=interval,
min_amount=min_amount,
max_amount=max_amount,
currency=currency,
)
@staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
@staticmethod
def parse_location(location_name: str) -> Location | None:
if not location_name or location_name == "Remote":
return
city, _, state = location_name.partition(", ")
return Location(city=city, state=state)
@staticmethod
def get_cursor_for_page(pagination_cursors, page_num):
for cursor_data in pagination_cursors:
if cursor_data["pageNumber"] == page_num:
return cursor_data["cursor"]
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
headers = {
"authority": "www.glassdoor.com",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"apollographql-client-name": "job-search-next",
"apollographql-client-version": "4.65.5",
"content-type": "application/json",
"origin": "https://www.glassdoor.com",
"referer": "https://www.glassdoor.com/",
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
}
query_template = """
query JobSearchResultsQuery( query JobSearchResultsQuery(
$excludeJobListingIds: [Long!], $excludeJobListingIds: [Long!],
$keyword: String, $keyword: String,
@@ -431,70 +532,4 @@ class GlassdoorScraper(Scraper):
} }
__typename __typename
} }
""" """
}
if self.scraper_input.job_type:
payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
)
return json.dumps([payload])
@staticmethod
def parse_compensation(data: dict) -> Optional[Compensation]:
pay_period = data.get("payPeriod")
adjusted_pay = data.get("payPeriodAdjustedPay")
currency = data.get("payCurrency", "USD")
if not pay_period or not adjusted_pay:
return None
interval = None
if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY
elif pay_period:
interval = CompensationInterval.get_interval(pay_period)
min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1)
return Compensation(
interval=interval,
min_amount=min_amount,
max_amount=max_amount,
currency=currency,
)
@staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
@staticmethod
def parse_location(location_name: str) -> Location | None:
if not location_name or location_name == "Remote":
return
city, _, state = location_name.partition(", ")
return Location(city=city, state=state)
@staticmethod
def get_cursor_for_page(pagination_cursors, page_num):
for cursor_data in pagination_cursors:
if cursor_data["pageNumber"] == page_num:
return cursor_data["cursor"]
headers = {
"authority": "www.glassdoor.com",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"apollographql-client-name": "job-search-next",
"apollographql-client-version": "4.65.5",
"content-type": "application/json",
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
"origin": "https://www.glassdoor.com",
"referer": "https://www.glassdoor.com/",
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
}

View File

@@ -4,25 +4,22 @@ jobspy.scrapers.indeed
This module contains routines to scrape Indeed. This module contains routines to scrape Indeed.
""" """
import re
import math
import json
import requests
from typing import Any
from datetime import datetime
from bs4 import BeautifulSoup from __future__ import annotations
from bs4.element import Tag
import math
from typing import Tuple
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, Future from concurrent.futures import ThreadPoolExecutor, Future
from ..exceptions import IndeedException import requests
from .. import Scraper, ScraperInput, Site
from ..utils import ( from ..utils import (
count_urgent_words,
extract_emails_from_text, extract_emails_from_text,
create_session,
get_enum_from_job_type, get_enum_from_job_type,
markdown_converter, markdown_converter,
logger logger,
) )
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
@@ -31,20 +28,21 @@ from ...jobs import (
Location, Location,
JobResponse, JobResponse,
JobType, JobType,
DescriptionFormat DescriptionFormat,
) )
from .. import Scraper, ScraperInput, Site
class IndeedScraper(Scraper): class IndeedScraper(Scraper):
def __init__(self, proxy: str | None = None): def __init__(self, proxy: str | None = None):
""" """
Initializes IndeedScraper with the Indeed job search url Initializes IndeedScraper with the Indeed API url
""" """
self.scraper_input = None self.scraper_input = None
self.jobs_per_page = 25 self.jobs_per_page = 100
self.num_workers = 10 self.num_workers = 10
self.seen_urls = set() self.seen_urls = set()
self.headers = None
self.api_country_code = None
self.base_url = None self.base_url = None
self.api_url = "https://apis.indeed.com/graphql" self.api_url = "https://apis.indeed.com/graphql"
site = Site(Site.INDEED) site = Site(Site.INDEED)
@@ -57,285 +55,283 @@ class IndeedScraper(Scraper):
:return: job_response :return: job_response
""" """
self.scraper_input = scraper_input self.scraper_input = scraper_input
job_list = self._scrape_page() domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
pages_processed = 1 self.base_url = f"https://{domain}.indeed.com"
self.headers = self.api_headers.copy()
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
job_list = []
page = 1
while len(self.seen_urls) < scraper_input.results_wanted: cursor = None
pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page) offset_pages = math.ceil(self.scraper_input.offset / 100)
new_jobs = False for _ in range(offset_pages):
logger.info(f"Indeed skipping search page: {page}")
with ThreadPoolExecutor(max_workers=10) as executor: __, cursor = self._scrape_page(cursor)
futures: list[Future] = [ if not __:
executor.submit(self._scrape_page, page + pages_processed) logger.info(f"Indeed found no jobs on page: {page}")
for page in range(pages_to_process)
]
for future in futures:
jobs = future.result()
if jobs:
job_list += jobs
new_jobs = True
if len(self.seen_urls) >= scraper_input.results_wanted:
break
pages_processed += pages_to_process
if not new_jobs:
break break
if len(self.seen_urls) > scraper_input.results_wanted: while len(self.seen_urls) < scraper_input.results_wanted:
job_list = job_list[:scraper_input.results_wanted] logger.info(f"Indeed search page: {page}")
jobs, cursor = self._scrape_page(cursor)
if not jobs:
logger.info(f"Indeed found no jobs on page: {page}")
break
job_list += jobs
page += 1
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
return JobResponse(jobs=job_list) def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
def _scrape_page(self, page: int=0) -> list[JobPost]:
""" """
Scrapes a page of Indeed for jobs with scraper_input criteria Scrapes a page of Indeed for jobs with scraper_input criteria
:param page: :param cursor:
:return: jobs found on page, total number of jobs found for search :return: jobs found on page, next page cursor
""" """
job_list = [] jobs = []
domain = self.scraper_input.country.indeed_domain_value new_cursor = None
self.base_url = f"https://{domain}.indeed.com" filters = self._build_filters()
search_term = self.scraper_input.search_term.replace('"', '\\"') if self.scraper_input.search_term else ""
try: query = self.job_search_query.format(
session = create_session(self.proxy) what=(
response = session.get( f'what: "{search_term}"'
f"{self.base_url}/m/jobs", if search_term
headers=self.headers, else ""
params=self._add_params(page), ),
location=(
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
if self.scraper_input.location
else ""
),
dateOnIndeed=self.scraper_input.hours_old,
cursor=f'cursor: "{cursor}"' if cursor else "",
filters=filters,
)
payload = {
"query": query,
}
api_headers = self.api_headers.copy()
api_headers["indeed-co"] = self.api_country_code
response = requests.post(
self.api_url,
headers=api_headers,
json=payload,
proxies=self.proxy,
timeout=10,
)
if response.status_code != 200:
logger.info(
f"Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
) )
if response.status_code not in range(200, 400): return jobs, new_cursor
if response.status_code == 429: data = response.json()
logger.error(f'429 Response - Blocked by Indeed for too many requests') jobs = data["data"]["jobSearch"]["results"]
else: new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
logger.error(f'Indeed response status code {response.status_code}')
return job_list
except Exception as e:
if "Proxy responded with" in str(e):
logger.error(f'Indeed: Bad proxy')
else:
logger.error(f'Indeed: {str(e)}')
return job_list
soup = BeautifulSoup(response.content, "html.parser")
if "did not match any jobs" in response.text:
return job_list
jobs = IndeedScraper._parse_jobs(soup)
if not jobs:
return []
if (
not jobs.get("metaData", {})
.get("mosaicProviderJobCardsModel", {})
.get("results")
):
logger.error("Indeed - No jobs found.")
return []
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
job_keys = [job['jobkey'] for job in jobs]
jobs_detailed = self._get_job_details(job_keys)
with ThreadPoolExecutor(max_workers=self.num_workers) as executor: with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
job_results: list[Future] = [ job_results: list[Future] = [
executor.submit(self._process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed) executor.submit(self._process_job, job["job"]) for job in jobs
] ]
job_list = [result.result() for result in job_results if result.result()] job_list = [result.result() for result in job_results if result.result()]
return job_list, new_cursor
return job_list def _build_filters(self):
"""
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
"""
filters_str = ""
if self.scraper_input.hours_old:
filters_str = """
filters: {{
date: {{
field: "dateOnIndeed",
start: "{start}h"
}}
}}
""".format(
start=self.scraper_input.hours_old
)
elif self.scraper_input.easy_apply:
filters_str = """
filters: {
keyword: {
field: "indeedApplyScope",
keys: ["DESKTOP"]
}
}
"""
elif self.scraper_input.job_type or self.scraper_input.is_remote:
job_type_key_mapping = {
JobType.FULL_TIME: "CF3CP",
JobType.PART_TIME: "75GKK",
JobType.CONTRACT: "NJXCK",
JobType.INTERNSHIP: "VDTG7",
}
def _process_job(self, job: dict, job_detailed: dict) -> JobPost | None: keys = []
job_url = f'{self.base_url}/m/jobs/viewjob?jk={job["jobkey"]}' if self.scraper_input.job_type:
job_url_client = f'{self.base_url}/viewjob?jk={job["jobkey"]}' key = job_type_key_mapping[self.scraper_input.job_type]
keys.append(key)
if self.scraper_input.is_remote:
keys.append("DSQF7")
if keys:
keys_str = '", "'.join(keys) # Prepare your keys string
filters_str = f"""
filters: {{
composite: {{
filters: [{{
keyword: {{
field: "attributes",
keys: ["{keys_str}"]
}}
}}]
}}
}}
"""
return filters_str
def _process_job(self, job: dict) -> JobPost | None:
"""
Parses the job dict into JobPost model
:param job: dict to parse
:return: JobPost if it's a new job
"""
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
if job_url in self.seen_urls: if job_url in self.seen_urls:
return None return
self.seen_urls.add(job_url) self.seen_urls.add(job_url)
description = job_detailed['description']['html'] description = job["description"]["html"]
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
job_type = self._get_job_type(job) description = markdown_converter(description)
timestamp_seconds = job["pubDate"] / 1000
date_posted = datetime.fromtimestamp(timestamp_seconds) job_type = self._get_job_type(job["attributes"])
date_posted = date_posted.strftime("%Y-%m-%d") timestamp_seconds = job["datePublished"] / 1000
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
employer = job["employer"].get("dossier") if job["employer"] else None
employer_details = employer.get("employerDetails", {}) if employer else {}
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
return JobPost( return JobPost(
title=job["normTitle"], id=str(job["key"]),
title=job["title"],
description=description, description=description,
company_name=job["company"], company_name=job["employer"].get("name") if job.get("employer") else None,
company_url=f"{self.base_url}{job_detailed['employer']['relativeCompanyPageUrl']}" if job_detailed[ company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
'employer'] else None, company_url_direct=(
employer["links"]["corporateWebsite"] if employer else None
),
location=Location( location=Location(
city=job.get("jobLocationCity"), city=job.get("location", {}).get("city"),
state=job.get("jobLocationState"), state=job.get("location", {}).get("admin1Code"),
country=self.scraper_input.country, country=job.get("location", {}).get("countryCode"),
), ),
job_type=job_type, job_type=job_type,
compensation=self._get_compensation(job, job_detailed), compensation=self._get_compensation(job),
date_posted=date_posted, date_posted=date_posted,
job_url=job_url_client, job_url=job_url,
job_url_direct=(
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
),
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None, is_remote=self._is_job_remote(job, description),
is_remote=self._is_job_remote(job, job_detailed, description) company_addresses=(
employer_details["addresses"][0]
if employer_details.get("addresses")
else None
),
company_industry=(
employer_details["industry"]
.replace("Iv1", "")
.replace("_", " ")
.title()
if employer_details.get("industry")
else None
),
company_num_employees=employer_details.get("employeesLocalizedLabel"),
company_revenue=employer_details.get("revenueLocalizedLabel"),
company_description=employer_details.get("briefDescription"),
ceo_name=employer_details.get("ceoName"),
ceo_photo_url=employer_details.get("ceoPhotoUrl"),
logo_photo_url=(
employer["images"].get("squareLogoUrl")
if employer and employer.get("images")
else None
),
banner_photo_url=(
employer["images"].get("headerImageUrl")
if employer and employer.get("images")
else None
),
) )
def _get_job_details(self, job_keys: list[str]) -> dict:
"""
Queries the GraphQL endpoint for detailed job information for the given job keys.
"""
job_keys_gql = '[' + ', '.join(f'"{key}"' for key in job_keys) + ']'
payload = dict(self.api_payload)
payload["query"] = self.api_payload["query"].format(job_keys_gql=job_keys_gql)
response = requests.post(self.api_url, headers=self.api_headers, json=payload, proxies=self.proxy)
if response.status_code == 200:
return response.json()['data']['jobData']['results']
else:
return {}
def _add_params(self, page: int) -> dict[str, str | Any]:
fromage = max(self.scraper_input.hours_old // 24, 1) if self.scraper_input.hours_old else None
params = {
"q": self.scraper_input.search_term,
"l": self.scraper_input.location if self.scraper_input.location else self.scraper_input.country.value[0].split(',')[-1],
"filter": 0,
"start": self.scraper_input.offset + page * 10,
"sort": "date",
"fromage": fromage,
}
if self.scraper_input.distance:
params["radius"] = self.scraper_input.distance
sc_values = []
if self.scraper_input.is_remote:
sc_values.append("attr(DSQF7)")
if self.scraper_input.job_type:
sc_values.append("jt({})".format(self.scraper_input.job_type.value[0]))
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
if self.scraper_input.easy_apply:
params['iafilter'] = 1
return params
@staticmethod @staticmethod
def _get_job_type(job: dict) -> list[JobType] | None: def _get_job_type(attributes: list) -> list[JobType]:
""" """
Parses the job to get list of job types Parses the attributes to get list of job types
:param job: :param attributes:
:return: :return: list of JobType
""" """
job_types: list[JobType] = [] job_types: list[JobType] = []
for taxonomy in job["taxonomyAttributes"]: for attribute in attributes:
if taxonomy["label"] == "job-types": job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
for i in range(len(taxonomy["attributes"])): job_type = get_enum_from_job_type(job_type_str)
label = taxonomy["attributes"][i].get("label") if job_type:
if label: job_types.append(job_type)
job_type_str = label.replace("-", "").replace(" ", "").lower()
job_type = get_enum_from_job_type(job_type_str)
if job_type:
job_types.append(job_type)
return job_types return job_types
@staticmethod @staticmethod
def _get_compensation(job: dict, job_detailed: dict) -> Compensation: def _get_compensation(job: dict) -> Compensation | None:
""" """
Parses the job to get Parses the job to get compensation
:param job:
:param job: :param job:
:param job_detailed:
:return: compensation object :return: compensation object
""" """
comp = job_detailed['compensation']['baseSalary'] comp = job["compensation"]["baseSalary"]
if comp: if not comp:
interval = IndeedScraper._get_correct_interval(comp['unitOfWork'])
if interval:
return Compensation(
interval=interval,
min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None,
max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None,
currency=job_detailed['compensation']['currencyCode']
)
extracted_salary = job.get("extractedSalary")
compensation = None
if extracted_salary:
salary_snippet = job.get("salarySnippet")
currency = salary_snippet.get("currency") if salary_snippet else None
interval = (extracted_salary.get("type"),)
if isinstance(interval, tuple):
interval = interval[0]
interval = interval.upper()
if interval in CompensationInterval.__members__:
compensation = Compensation(
interval=CompensationInterval[interval],
min_amount=int(extracted_salary.get("min")),
max_amount=int(extracted_salary.get("max")),
currency=currency,
)
return compensation
@staticmethod
def _parse_jobs(soup: BeautifulSoup) -> dict:
"""
Parses the jobs from the soup object
:param soup:
:return: jobs
"""
def find_mosaic_script() -> Tag | None:
script_tags = soup.find_all("script")
for tag in script_tags:
if (
tag.string
and "mosaic.providerData" in tag.string
and "mosaic-provider-jobcards" in tag.string
):
return tag
return None return None
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
script_tag = find_mosaic_script() if not interval:
if script_tag: return None
script_str = script_tag.string min_range = comp["range"].get("min")
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});' max_range = comp["range"].get("max")
p = re.compile(pattern, re.DOTALL) return Compensation(
m = p.search(script_str) interval=interval,
if m: min_amount=round(min_range, 2) if min_range is not None else None,
jobs = json.loads(m.group(1).strip()) max_amount=round(max_range, 2) if max_range is not None else None,
return jobs currency=job["compensation"]["currencyCode"],
else: )
logger.warning(f'Indeed: Could not find mosaic provider job cards data')
return {}
else:
logger.warning(f"Indeed: Could not parse any jobs on the page")
return {}
@staticmethod @staticmethod
def _is_job_remote(job: dict, job_detailed: dict, description: str) -> bool: def _is_job_remote(job: dict, description: str) -> bool:
remote_keywords = ['remote', 'work from home', 'wfh'] """
Searches the description, location, and attributes to check if job is remote
"""
remote_keywords = ["remote", "work from home", "wfh"]
is_remote_in_attributes = any( is_remote_in_attributes = any(
any(keyword in attr['label'].lower() for keyword in remote_keywords) any(keyword in attr["label"].lower() for keyword in remote_keywords)
for attr in job_detailed['attributes'] for attr in job["attributes"]
)
is_remote_in_description = any(
keyword in description.lower() for keyword in remote_keywords
) )
is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords)
is_remote_in_location = any( is_remote_in_location = any(
keyword in job_detailed['location']['formatted']['long'].lower() keyword in job["location"]["formatted"]["long"].lower()
for keyword in remote_keywords for keyword in remote_keywords
) )
is_remote_in_taxonomy = any( return (
taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0 is_remote_in_attributes or is_remote_in_description or is_remote_in_location
for taxonomy in job.get("taxonomyAttributes", [])
) )
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location or is_remote_in_taxonomy
@staticmethod @staticmethod
def _get_correct_interval(interval: str) -> CompensationInterval: def _get_compensation_interval(interval: str) -> CompensationInterval:
interval_mapping = { interval_mapping = {
"DAY": "DAILY", "DAY": "DAILY",
"YEAR": "YEARLY", "YEAR": "YEARLY",
"HOUR": "HOURLY", "HOUR": "HOURLY",
"WEEK": "WEEKLY", "WEEK": "WEEKLY",
"MONTH": "MONTHLY" "MONTH": "MONTHLY",
} }
mapped_interval = interval_mapping.get(interval.upper(), None) mapped_interval = interval_mapping.get(interval.upper(), None)
if mapped_interval and mapped_interval in CompensationInterval.__members__: if mapped_interval and mapped_interval in CompensationInterval.__members__:
@@ -343,43 +339,44 @@ class IndeedScraper(Scraper):
else: else:
raise ValueError(f"Unsupported interval: {interval}") raise ValueError(f"Unsupported interval: {interval}")
headers = {
'Host': 'www.indeed.com',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'sec-fetch-site': 'same-origin',
'sec-fetch-dest': 'document',
'accept-language': 'en-US,en;q=0.9',
'sec-fetch-mode': 'navigate',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
}
api_headers = { api_headers = {
'Host': 'apis.indeed.com', "Host": "apis.indeed.com",
'content-type': 'application/json', "content-type": "application/json",
'indeed-api-key': '161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8', "indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
'accept': 'application/json', "accept": "application/json",
'indeed-locale': 'en-US', "indeed-locale": "en-US",
'accept-language': 'en-US,en;q=0.9', "accept-language": "en-US,en;q=0.9",
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1', "user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone', "indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
'indeed-co': 'US',
} }
api_payload = { job_search_query = """
"query": """
query GetJobData {{ query GetJobData {{
jobData(input: {{ jobSearch(
jobKeys: {job_keys_gql} {what}
}}) {{ {location}
includeSponsoredResults: NONE
limit: 100
sort: DATE
{cursor}
{filters}
) {{
pageInfo {{
nextCursor
}}
results {{ results {{
trackingKey
job {{ job {{
key key
title title
datePublished
dateOnIndeed
description {{ description {{
html html
}} }}
location {{ location {{
countryName countryName
countryCode countryCode
admin1Code
city city
postalCode postalCode
streetAddress streetAddress
@@ -401,10 +398,30 @@ class IndeedScraper(Scraper):
currencyCode currencyCode
}} }}
attributes {{ attributes {{
key
label label
}} }}
employer {{ employer {{
relativeCompanyPageUrl relativeCompanyPageUrl
name
dossier {{
employerDetails {{
addresses
industry
employeesLocalizedLabel
revenueLocalizedLabel
briefDescription
ceoName
ceoPhotoUrl
}}
images {{
headerImageUrl
squareLogoUrl
}}
links {{
corporateWebsite
}}
}}
}} }}
recruit {{ recruit {{
viewJobUrl viewJobUrl
@@ -416,4 +433,3 @@ class IndeedScraper(Scraper):
}} }}
}} }}
""" """
}

View File

@@ -4,13 +4,16 @@ jobspy.scrapers.linkedin
This module contains routines to scrape LinkedIn. This module contains routines to scrape LinkedIn.
""" """
from __future__ import annotations
import time import time
import random import random
import regex as re
import urllib.parse
from typing import Optional from typing import Optional
from datetime import datetime from datetime import datetime
import requests
from requests.exceptions import ProxyError
from threading import Lock from threading import Lock
from bs4.element import Tag from bs4.element import Tag
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@@ -26,30 +29,31 @@ from ...jobs import (
JobType, JobType,
Country, Country,
Compensation, Compensation,
DescriptionFormat DescriptionFormat,
) )
from ..utils import ( from ..utils import (
logger, logger,
count_urgent_words,
extract_emails_from_text, extract_emails_from_text,
get_enum_from_job_type, get_enum_from_job_type,
currency_parser, currency_parser,
markdown_converter markdown_converter,
) )
class LinkedInScraper(Scraper): class LinkedInScraper(Scraper):
base_url = "https://www.linkedin.com" base_url = "https://www.linkedin.com"
delay = 3 delay = 3
band_delay = 4
jobs_per_page = 25
def __init__(self, proxy: Optional[str] = None): def __init__(self, proxy: Optional[str] = None):
""" """
Initializes LinkedInScraper with the LinkedIn job search url Initializes LinkedInScraper with the LinkedIn job search url
""" """
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
self.scraper_input = None self.scraper_input = None
site = Site(Site.LINKEDIN)
self.country = "worldwide" self.country = "worldwide"
super().__init__(site, proxy=proxy) self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
""" """
@@ -63,28 +67,35 @@ class LinkedInScraper(Scraper):
url_lock = Lock() url_lock = Lock()
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0 page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
seconds_old = ( seconds_old = (
scraper_input.hours_old * 3600 scraper_input.hours_old * 3600 if scraper_input.hours_old else None
if scraper_input.hours_old )
else None continue_search = (
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
) )
continue_search = lambda: len(job_list) < scraper_input.results_wanted and page < 1000
while continue_search(): while continue_search():
logger.info(f"LinkedIn search page: {page // 25 + 1}")
session = create_session(is_tls=False, has_retry=True, delay=5) session = create_session(is_tls=False, has_retry=True, delay=5)
params = { params = {
"keywords": scraper_input.search_term, "keywords": scraper_input.search_term,
"location": scraper_input.location, "location": scraper_input.location,
"distance": scraper_input.distance, "distance": scraper_input.distance,
"f_WT": 2 if scraper_input.is_remote else None, "f_WT": 2 if scraper_input.is_remote else None,
"f_JT": self.job_type_code(scraper_input.job_type) "f_JT": (
if scraper_input.job_type self.job_type_code(scraper_input.job_type)
else None, if scraper_input.job_type
else None
),
"pageNum": 0, "pageNum": 0,
"start": page + scraper_input.offset, "start": page + scraper_input.offset,
"f_AL": "true" if scraper_input.easy_apply else None, "f_AL": "true" if scraper_input.easy_apply else None,
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None, "f_C": (
"f_TPR": f"r{seconds_old}", ",".join(map(str, scraper_input.linkedin_company_ids))
if scraper_input.linkedin_company_ids
else None
),
} }
if seconds_old is not None:
params["f_TPR"] = f"r{seconds_old}"
params = {k: v for k, v in params.items() if v is not None} params = {k: v for k, v in params.items() if v is not None}
try: try:
@@ -98,16 +109,20 @@ class LinkedInScraper(Scraper):
) )
if response.status_code not in range(200, 400): if response.status_code not in range(200, 400):
if response.status_code == 429: if response.status_code == 429:
logger.error(f'429 Response - Blocked by LinkedIn for too many requests') err = (
f"429 Response - Blocked by LinkedIn for too many requests"
)
else: else:
logger.error(f'LinkedIn response status code {response.status_code}') err = f"LinkedIn response status code {response.status_code}"
return JobResponse(job_list=job_list) err += f" - {response.text}"
logger.error(err)
return JobResponse(jobs=job_list)
except Exception as e: except Exception as e:
if "Proxy responded with" in str(e): if "Proxy responded with" in str(e):
logger.error(f'LinkedIn: Bad proxy') logger.error(f"LinkedIn: Bad proxy")
else: else:
logger.error(f'LinkedIn: {str(e)}') logger.error(f"LinkedIn: {str(e)}")
return JobResponse(job_list=job_list) return JobResponse(jobs=job_list)
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
job_cards = soup.find_all("div", class_="base-search-card") job_cards = soup.find_all("div", class_="base-search-card")
@@ -127,23 +142,26 @@ class LinkedInScraper(Scraper):
continue continue
seen_urls.add(job_url) seen_urls.add(job_url)
try: try:
job_post = self._process_job(job_card, job_url, scraper_input.linkedin_fetch_description) fetch_desc = scraper_input.linkedin_fetch_description
job_post = self._process_job(job_card, job_url, fetch_desc)
if job_post: if job_post:
job_list.append(job_post) job_list.append(job_post)
if not continue_search(): if not continue_search():
break break
except Exception as e: except Exception as e:
raise LinkedInException(str(e)) raise LinkedInException(str(e))
if continue_search(): if continue_search():
time.sleep(random.uniform(self.delay, self.delay + 2)) time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
page += 25 page += self.jobs_per_page
job_list = job_list[: scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list) return JobResponse(jobs=job_list)
def _process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional[JobPost]: def _process_job(
salary_tag = job_card.find('span', class_='job-search-card__salary-info') self, job_card: Tag, job_url: str, full_descr: bool
) -> Optional[JobPost]:
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
compensation = None compensation = None
if salary_tag: if salary_tag:
@@ -179,19 +197,19 @@ class LinkedInScraper(Scraper):
if metadata_card if metadata_card
else None else None
) )
date_posted = description = job_type = None date_posted = None
if datetime_tag and "datetime" in datetime_tag.attrs: if datetime_tag and "datetime" in datetime_tag.attrs:
datetime_str = datetime_tag["datetime"] datetime_str = datetime_tag["datetime"]
try: try:
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d") date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
except: except:
date_posted = None date_posted = None
benefits_tag = job_card.find("span", class_="result-benefits__text") job_details = {}
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
if full_descr: if full_descr:
description, job_type = self._get_job_description(job_url) job_details = self._get_job_details(job_url)
return JobPost( return JobPost(
id=self._get_id(job_url),
title=title, title=title,
company_name=company, company_name=company,
company_url=company_url, company_url=company_url,
@@ -199,29 +217,39 @@ class LinkedInScraper(Scraper):
date_posted=date_posted, date_posted=date_posted,
job_url=job_url, job_url=job_url,
compensation=compensation, compensation=compensation,
benefits=benefits, job_type=job_details.get("job_type"),
job_type=job_type, description=job_details.get("description"),
description=description, job_url_direct=job_details.get("job_url_direct"),
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(job_details.get("description")),
num_urgent_words=count_urgent_words(description) if description else None, logo_photo_url=job_details.get("logo_photo_url"),
) )
def _get_job_description( def _get_id(self, url: str):
self, job_page_url: str
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
""" """
Retrieves job description by going to the job page url Extracts the job id from the job url
:param url:
:return: str
"""
if not url:
return None
return url.split("/")[-1]
def _get_job_details(self, job_page_url: str) -> dict:
"""
Retrieves job description and other job details by going to the job page url
:param job_page_url: :param job_page_url:
:return: description or None :return: dict
""" """
try: try:
session = create_session(is_tls=False, has_retry=True) session = create_session(is_tls=False, has_retry=True)
response = session.get(job_page_url, headers=self.headers, timeout=5, proxies=self.proxy) response = session.get(
job_page_url, headers=self.headers, timeout=5, proxies=self.proxy
)
response.raise_for_status() response.raise_for_status()
except: except:
return None, None return {}
if response.url == "https://www.linkedin.com/signup": if response.url == "https://www.linkedin.com/signup":
return None, None return {}
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
div_content = soup.find( div_content = soup.find(
@@ -229,15 +257,24 @@ class LinkedInScraper(Scraper):
) )
description = None description = None
if div_content is not None: if div_content is not None:
def remove_attributes(tag): def remove_attributes(tag):
for attr in list(tag.attrs): for attr in list(tag.attrs):
del tag[attr] del tag[attr]
return tag return tag
div_content = remove_attributes(div_content) div_content = remove_attributes(div_content)
description = div_content.prettify(formatter="html") description = div_content.prettify(formatter="html")
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN: if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
description = markdown_converter(description) description = markdown_converter(description)
return description, self._parse_job_type(soup) return {
"description": description,
"job_type": self._parse_job_type(soup),
"job_url_direct": self._parse_job_url_direct(soup),
"logo_photo_url": soup.find("img", {"class": "artdeco-entity-image"}).get(
"data-delayed-url"
),
}
def _get_location(self, metadata_card: Optional[Tag]) -> Location: def _get_location(self, metadata_card: Optional[Tag]) -> Location:
""" """
@@ -261,11 +298,8 @@ class LinkedInScraper(Scraper):
) )
elif len(parts) == 3: elif len(parts) == 3:
city, state, country = parts city, state, country = parts
location = Location( country = Country.from_string(country)
city=city, location = Location(city=city, state=state, country=country)
state=state,
country=Country.from_string(country)
)
return location return location
@staticmethod @staticmethod
@@ -293,6 +327,23 @@ class LinkedInScraper(Scraper):
return [get_enum_from_job_type(employment_type)] if employment_type else [] return [get_enum_from_job_type(employment_type)] if employment_type else []
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
"""
Gets the job url direct from job page
:param soup:
:return: str
"""
job_url_direct = None
job_url_direct_content = soup.find("code", id="applyUrl")
if job_url_direct_content:
job_url_direct_match = self.job_url_direct_regex.search(
job_url_direct_content.decode_contents().strip()
)
if job_url_direct_match:
job_url_direct = urllib.parse.unquote(job_url_direct_match.group())
return job_url_direct
@staticmethod @staticmethod
def job_type_code(job_type_enum: JobType) -> str: def job_type_code(job_type_enum: JobType) -> str:
return { return {

View File

@@ -1,49 +1,48 @@
from __future__ import annotations
import re import re
import logging import logging
import numpy as np
import html2text
import tls_client
import requests import requests
import tls_client
import numpy as np
from markdownify import markdownify as md
from requests.adapters import HTTPAdapter, Retry from requests.adapters import HTTPAdapter, Retry
from ..jobs import JobType from ..jobs import JobType
text_maker = html2text.HTML2Text()
logger = logging.getLogger("JobSpy") logger = logging.getLogger("JobSpy")
logger.propagate = False logger.propagate = False
if not logger.handlers: if not logger.handlers:
logger.setLevel(logging.ERROR) logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler() console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR) format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter(format)
console_handler.setFormatter(formatter) console_handler.setFormatter(formatter)
logger.addHandler(console_handler) logger.addHandler(console_handler)
def count_urgent_words(description: str) -> int: def set_logger_level(verbose: int = 2):
""" """
Count the number of urgent words or phrases in a job description. Adjusts the logger's level. This function allows the logging level to be changed at runtime.
"""
urgent_patterns = re.compile(
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
re.IGNORECASE,
)
matches = re.findall(urgent_patterns, description)
count = len(matches)
return count Parameters:
- verbose: int {0, 1, 2} (default=2, all logs)
"""
if verbose is None:
return
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
level = getattr(logging, level_name.upper(), None)
if level is not None:
logger.setLevel(level)
else:
raise ValueError(f"Invalid log level: {level_name}")
def markdown_converter(description_html: str): def markdown_converter(description_html: str):
if description_html is None: if description_html is None:
return "" return None
text_maker.ignore_links = False markdown = md(description_html)
try: return markdown.strip()
markdown = text_maker.handle(description_html)
return markdown.strip()
except AssertionError as e:
return ""
def extract_emails_from_text(text: str) -> list[str] | None: def extract_emails_from_text(text: str) -> list[str] | None:
@@ -53,7 +52,12 @@ def extract_emails_from_text(text: str) -> list[str] | None:
return email_regex.findall(text) return email_regex.findall(text)
def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session: def create_session(
proxy: dict | None = None,
is_tls: bool = True,
has_retry: bool = False,
delay: int = 1,
) -> requests.Session:
""" """
Creates a requests session with optional tls, proxy, and retry settings. Creates a requests session with optional tls, proxy, and retry settings.
:return: A session object :return: A session object
@@ -67,15 +71,17 @@ def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bo
if proxy: if proxy:
session.proxies.update(proxy) session.proxies.update(proxy)
if has_retry: if has_retry:
retries = Retry(total=3, retries = Retry(
connect=3, total=3,
status=3, connect=3,
status_forcelist=[500, 502, 503, 504, 429], status=3,
backoff_factor=delay) status_forcelist=[500, 502, 503, 504, 429],
backoff_factor=delay,
)
adapter = HTTPAdapter(max_retries=retries) adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter) session.mount("http://", adapter)
session.mount('https://', adapter) session.mount("https://", adapter)
return session return session
@@ -93,17 +99,15 @@ def get_enum_from_job_type(job_type_str: str) -> JobType | None:
def currency_parser(cur_str): def currency_parser(cur_str):
# Remove any non-numerical characters # Remove any non-numerical characters
# except for ',' '.' or '-' (e.g. EUR) # except for ',' '.' or '-' (e.g. EUR)
cur_str = re.sub("[^-0-9.,]", '', cur_str) cur_str = re.sub("[^-0-9.,]", "", cur_str)
# Remove any 000s separators (either , or .) # Remove any 000s separators (either , or .)
cur_str = re.sub("[.,]", '', cur_str[:-3]) + cur_str[-3:] cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
if '.' in list(cur_str[-3:]): if "." in list(cur_str[-3:]):
num = float(cur_str) num = float(cur_str)
elif ',' in list(cur_str[-3:]): elif "," in list(cur_str[-3:]):
num = float(cur_str.replace(',', '.')) num = float(cur_str.replace(",", "."))
else: else:
num = float(cur_str) num = float(cur_str)
return np.round(num, 2) return np.round(num, 2)

View File

@@ -4,6 +4,9 @@ jobspy.scrapers.ziprecruiter
This module contains routines to scrape ZipRecruiter. This module contains routines to scrape ZipRecruiter.
""" """
from __future__ import annotations
import math import math
import time import time
from datetime import datetime from datetime import datetime
@@ -14,10 +17,9 @@ from concurrent.futures import ThreadPoolExecutor
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..utils import ( from ..utils import (
logger, logger,
count_urgent_words,
extract_emails_from_text, extract_emails_from_text,
create_session, create_session,
markdown_converter markdown_converter,
) )
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
@@ -26,7 +28,7 @@ from ...jobs import (
JobResponse, JobResponse,
JobType, JobType,
Country, Country,
DescriptionFormat DescriptionFormat,
) )
@@ -63,7 +65,7 @@ class ZipRecruiterScraper(Scraper):
break break
if page > 1: if page > 1:
time.sleep(self.delay) time.sleep(self.delay)
logger.info(f"ZipRecruiter search page: {page}")
jobs_on_page, continue_token = self._find_jobs_in_page( jobs_on_page, continue_token = self._find_jobs_in_page(
scraper_input, continue_token scraper_input, continue_token
) )
@@ -89,25 +91,24 @@ class ZipRecruiterScraper(Scraper):
if continue_token: if continue_token:
params["continue_from"] = continue_token params["continue_from"] = continue_token
try: try:
res= self.session.get( res = self.session.get(
f"{self.api_url}/jobs-app/jobs", f"{self.api_url}/jobs-app/jobs", headers=self.headers, params=params
headers=self.headers,
params=params
) )
if res.status_code not in range(200, 400): if res.status_code not in range(200, 400):
if res.status_code == 429: if res.status_code == 429:
logger.error(f'429 Response - Blocked by ZipRecruiter for too many requests') err = "429 Response - Blocked by ZipRecruiter for too many requests"
else: else:
logger.error(f'ZipRecruiter response status code {res.status_code}') err = f"ZipRecruiter response status code {res.status_code}"
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
logger.error(err)
return jobs_list, "" return jobs_list, ""
except Exception as e: except Exception as e:
if "Proxy responded with" in str(e): if "Proxy responded with" in str(e):
logger.error(f'Indeed: Bad proxy') logger.error(f"Indeed: Bad proxy")
else: else:
logger.error(f'Indeed: {str(e)}') logger.error(f"Indeed: {str(e)}")
return jobs_list, "" return jobs_list, ""
res_data = res.json() res_data = res.json()
jobs_list = res_data.get("jobs", []) jobs_list = res_data.get("jobs", [])
next_continue_token = res_data.get("continue", None) next_continue_token = res_data.get("continue", None)
@@ -128,7 +129,11 @@ class ZipRecruiterScraper(Scraper):
self.seen_urls.add(job_url) self.seen_urls.add(job_url)
description = job.get("job_description", "").strip() description = job.get("job_description", "").strip()
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description description = (
markdown_converter(description)
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
else description
)
company = job.get("hiring_company", {}).get("name") company = job.get("hiring_company", {}).get("name")
country_value = "usa" if job.get("job_country") == "US" else "canada" country_value = "usa" if job.get("job_country") == "US" else "canada"
country_enum = Country.from_string(country_value) country_enum = Country.from_string(country_value)
@@ -139,34 +144,34 @@ class ZipRecruiterScraper(Scraper):
job_type = self._get_job_type_enum( job_type = self._get_job_type_enum(
job.get("employment_type", "").replace("_", "").lower() job.get("employment_type", "").replace("_", "").lower()
) )
date_posted = datetime.fromisoformat(job['posted_time'].rstrip("Z")).date() date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
comp_interval = job.get("compensation_interval")
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
comp_currency = job.get("compensation_currency")
return JobPost( return JobPost(
id=str(job['listing_key']),
title=title, title=title,
company_name=company, company_name=company,
location=location, location=location,
job_type=job_type, job_type=job_type,
compensation=Compensation( compensation=Compensation(
interval="yearly" interval=comp_interval,
if job.get("compensation_interval") == "annual" min_amount=comp_min,
else job.get("compensation_interval"), max_amount=comp_max,
min_amount=int(job["compensation_min"]) currency=comp_currency,
if "compensation_min" in job
else None,
max_amount=int(job["compensation_max"])
if "compensation_max" in job
else None,
currency=job.get("compensation_currency"),
), ),
date_posted=date_posted, date_posted=date_posted,
job_url=job_url, job_url=job_url,
description=description, description=description,
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None,
) )
def _get_cookies(self): def _get_cookies(self):
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple" data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
self.session.post(f"{self.api_url}/jobs-app/event", data=data, headers=self.headers) url = f"{self.api_url}/jobs-app/event"
self.session.post(url, data=data, headers=self.headers)
@staticmethod @staticmethod
def _get_job_type_enum(job_type_str: str) -> list[JobType] | None: def _get_job_type_enum(job_type_str: str) -> list[JobType] | None:
@@ -182,16 +187,13 @@ class ZipRecruiterScraper(Scraper):
"location": scraper_input.location, "location": scraper_input.location,
} }
if scraper_input.hours_old: if scraper_input.hours_old:
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None params["days"] = max(scraper_input.hours_old // 24, 1)
params['days'] = fromage job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
job_type_map = {
JobType.FULL_TIME: 'full_time',
JobType.PART_TIME: 'part_time'
}
if scraper_input.job_type: if scraper_input.job_type:
params['employment_type'] = job_type_map[scraper_input.job_type] if scraper_input.job_type in job_type_map else scraper_input.job_type.value[0] job_type = scraper_input.job_type
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
if scraper_input.easy_apply: if scraper_input.easy_apply:
params['zipapply'] = 1 params["zipapply"] = 1
if scraper_input.is_remote: if scraper_input.is_remote:
params["remote"] = 1 params["remote"] = 1
if scraper_input.distance: if scraper_input.distance: