mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cb0c518fc | ||
|
|
df70d4bc2e | ||
|
|
3006063875 | ||
|
|
1be009b8bc | ||
|
|
81ed9b3ddf | ||
|
|
11a9e9a56a | ||
|
|
c6ade14784 | ||
|
|
13c74a0fed | ||
|
|
333e9e6760 | ||
|
|
04032a0f91 | ||
|
|
496896d0b5 | ||
|
|
87ba1ad1bf | ||
|
|
4e7ac9a583 | ||
|
|
e44d13e1cf | ||
|
|
d52e366ef7 | ||
|
|
395ebf0017 | ||
|
|
63fddd9b7f | ||
|
|
58956868ae | ||
|
|
4fce836222 | ||
|
|
5ba25e7a7c | ||
|
|
f7cb3e9206 | ||
|
|
3ad3f121f7 | ||
|
|
ff3c782912 | ||
|
|
338d854b96 | ||
|
|
811d4c40b4 | ||
|
|
dba92d22c2 | ||
|
|
10a3592a0f | ||
|
|
b7905cc756 | ||
|
|
6867d58829 | ||
|
|
f6248c8386 | ||
|
|
f395597fdd | ||
|
|
6372e41bd9 | ||
|
|
6c869decb8 | ||
|
|
9f4083380d | ||
|
|
9207ab56f6 | ||
|
|
757a94853e | ||
|
|
6bc191d5c7 | ||
|
|
0cc34287f7 | ||
|
|
923979093b | ||
|
|
286f0e4487 | ||
|
|
f7b29d43a2 | ||
|
|
6f1490458c | ||
|
|
6bb7d81ba8 | ||
|
|
0e046432d1 | ||
|
|
209e0e65b6 | ||
|
|
8570c0651e | ||
|
|
8678b0bbe4 | ||
|
|
60d4d911c9 | ||
|
|
2a0cba8c7e | ||
|
|
de70189fa2 | ||
|
|
b55c0eb86d | ||
|
|
88c95c4ad5 | ||
|
|
d8d33d602f | ||
|
|
6330c14879 | ||
|
|
48631ea271 | ||
|
|
edffe18e65 | ||
|
|
0988230a24 |
41
.github/workflows/publish-to-pypi.yml
vendored
41
.github/workflows/publish-to-pypi.yml
vendored
@@ -1,33 +1,50 @@
|
||||
name: Publish Python 🐍 distributions 📦 to PyPI
|
||||
on: push
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build-n-publish:
|
||||
name: Build and publish Python 🐍 distributions 📦 to PyPI
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: github.event.pull_request.merged == true && github.event.pull_request.base.ref == 'main'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install toml
|
||||
|
||||
- name: Increment version
|
||||
run: python increment_version.py
|
||||
|
||||
- name: Commit version increment
|
||||
run: |
|
||||
git config --global user.name 'github-actions'
|
||||
git config --global user.email 'github-actions@github.com'
|
||||
git add pyproject.toml
|
||||
git commit -m 'Increment version'
|
||||
|
||||
- name: Push changes
|
||||
run: git push
|
||||
|
||||
- name: Install poetry
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
poetry
|
||||
--user
|
||||
run: pip install poetry --user
|
||||
|
||||
- name: Build distribution 📦
|
||||
run: >-
|
||||
python3 -m
|
||||
poetry
|
||||
build
|
||||
run: poetry build
|
||||
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
146
README.md
146
README.md
@@ -1,17 +1,12 @@
|
||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||
|
||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||
|
||||
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||
|
||||
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
|
||||
work with us.*
|
||||
**JobSpy** is a job scraping library with the goal of aggregating all the jobs from popular job boards with one tool.
|
||||
|
||||
## Features
|
||||
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||
- Aggregates the job postings in a Pandas DataFrame
|
||||
- Proxies support
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, **Google**, **ZipRecruiter**, & **Bayt** concurrently
|
||||
- Aggregates the job postings in a dataframe
|
||||
- Proxies support to bypass blocking
|
||||
|
||||

|
||||
|
||||
@@ -30,16 +25,16 @@ import csv
|
||||
from jobspy import scrape_jobs
|
||||
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor", "google", "bayt"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
google_search_term="software engineer jobs near San Francisco, CA since yesterday",
|
||||
location="San Francisco, CA",
|
||||
results_wanted=20,
|
||||
hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
|
||||
country_indeed='USA', # only needed for indeed / glassdoor
|
||||
hours_old=72,
|
||||
country_indeed='USA',
|
||||
|
||||
# linkedin_fetch_description=True # get full description and direct job url for linkedin (slower)
|
||||
# linkedin_fetch_description=True # gets more info such as description, direct job url (slower)
|
||||
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||
|
||||
)
|
||||
print(f"Found {len(jobs)} jobs")
|
||||
print(jobs.head())
|
||||
@@ -63,10 +58,13 @@ zip_recruiter Software Developer TEKsystems Phoenix
|
||||
```plaintext
|
||||
Optional
|
||||
├── site_name (list|str):
|
||||
| linkedin, zip_recruiter, indeed, glassdoor
|
||||
| (default is all four)
|
||||
| linkedin, zip_recruiter, indeed, glassdoor, google, bayt
|
||||
| (default is all)
|
||||
│
|
||||
├── search_term (str)
|
||||
|
|
||||
├── google_search_term (str)
|
||||
| search term for google jobs. This is the only param for filtering google jobs.
|
||||
│
|
||||
├── location (str)
|
||||
│
|
||||
@@ -78,15 +76,15 @@ Optional
|
||||
│
|
||||
├── proxies (list):
|
||||
| in format ['user:pass@host:port', 'localhost']
|
||||
| each job board will round robin through the proxies
|
||||
│
|
||||
| each job board scraper will round robin through the proxies
|
||||
|
|
||||
├── is_remote (bool)
|
||||
│
|
||||
├── results_wanted (int):
|
||||
| number of job results to retrieve for each site specified in 'site_name'
|
||||
│
|
||||
├── easy_apply (bool):
|
||||
| filters for jobs that are hosted on the job board site
|
||||
| filters for jobs that are hosted on the job board site (LinkedIn easy apply filter no longer works)
|
||||
│
|
||||
├── description_format (str):
|
||||
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||
@@ -110,6 +108,12 @@ Optional
|
||||
|
|
||||
├── country_indeed (str):
|
||||
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||
|
|
||||
├── enforce_annual_salary (bool):
|
||||
| converts wages to annual salary
|
||||
|
|
||||
├── ca_cert (str)
|
||||
| path to CA Certificate file for proxies
|
||||
```
|
||||
|
||||
```
|
||||
@@ -125,44 +129,6 @@ Optional
|
||||
| - easy_apply
|
||||
```
|
||||
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company (str)
|
||||
├── company_url (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
│ ├── city (str)
|
||||
│ ├── state (str)
|
||||
├── description (str)
|
||||
├── job_type (str): fulltime, parttime, internship, contract
|
||||
├── job_function (str)
|
||||
├── compensation (object)
|
||||
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount (int)
|
||||
│ ├── max_amount (int)
|
||||
│ └── currency (enum)
|
||||
├── date_posted (date)
|
||||
├── emails (str)
|
||||
└── is_remote (bool)
|
||||
|
||||
Indeed specific
|
||||
├── company_country (str)
|
||||
└── company_addresses (str)
|
||||
└── company_industry (str)
|
||||
└── company_employees_label (str)
|
||||
└── company_revenue_label (str)
|
||||
└── company_description (str)
|
||||
└── ceo_name (str)
|
||||
└── ceo_photo_url (str)
|
||||
└── logo_photo_url (str)
|
||||
└── banner_photo_url (str)
|
||||
```
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
### **LinkedIn**
|
||||
@@ -199,6 +165,11 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||
| Venezuela | Vietnam* | | |
|
||||
|
||||
### **Bayt**
|
||||
|
||||
Bayt only uses the search_term parameter currently and searches internationally
|
||||
|
||||
|
||||
|
||||
## Notes
|
||||
* Indeed is the best scraper currently with no rate limiting.
|
||||
@@ -208,10 +179,24 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
**Q: Why is Indeed giving unrelated roles?**
|
||||
**A:** Indeed searches the description too.
|
||||
|
||||
**Q: Encountering issues with your queries?**
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
- use - to remove words
|
||||
- "" for exact match
|
||||
|
||||
Example of a good Indeed query
|
||||
|
||||
```py
|
||||
search_term='"engineering intern" software summer (java OR python OR c++) 2025 -tax -marketing'
|
||||
```
|
||||
|
||||
This searches the description/title and must include software, summer, 2025, one of the languages, engineering intern exactly, no tax, no marketing.
|
||||
|
||||
---
|
||||
|
||||
**Q: No results when using "google"?**
|
||||
**A:** You have to use super specific syntax. Search for google jobs on your browser and then whatever pops up in the google jobs search box after applying some filters is what you need to copy & paste into the google_search_term.
|
||||
|
||||
---
|
||||
|
||||
@@ -222,3 +207,42 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
- Try using the proxies param to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title
|
||||
├── company
|
||||
├── company_url
|
||||
├── job_url
|
||||
├── location
|
||||
│ ├── country
|
||||
│ ├── city
|
||||
│ ├── state
|
||||
├── description
|
||||
├── job_type: fulltime, parttime, internship, contract
|
||||
├── job_function
|
||||
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount
|
||||
│ ├── max_amount
|
||||
│ ├── currency
|
||||
│ └── salary_source: direct_data, description (parsed from posting)
|
||||
├── date_posted
|
||||
├── emails
|
||||
└── is_remote
|
||||
|
||||
Linkedin specific
|
||||
└── job_level
|
||||
|
||||
Linkedin & Indeed specific
|
||||
└── company_industry
|
||||
|
||||
Indeed specific
|
||||
├── company_country
|
||||
├── company_addresses
|
||||
├── company_employees_label
|
||||
├── company_revenue_label
|
||||
├── company_description
|
||||
└── company_logo
|
||||
```
|
||||
|
||||
21
increment_version.py
Normal file
21
increment_version.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import toml
|
||||
|
||||
def increment_version(version):
|
||||
major, minor, patch = map(int, version.split('.'))
|
||||
patch += 1
|
||||
return f"{major}.{minor}.{patch}"
|
||||
|
||||
# Load pyproject.toml
|
||||
with open('pyproject.toml', 'r') as file:
|
||||
pyproject = toml.load(file)
|
||||
|
||||
# Increment the version
|
||||
current_version = pyproject['tool']['poetry']['version']
|
||||
new_version = increment_version(current_version)
|
||||
pyproject['tool']['poetry']['version'] = new_version
|
||||
|
||||
# Save the updated pyproject.toml
|
||||
with open('pyproject.toml', 'w') as file:
|
||||
toml.dump(pyproject, file)
|
||||
|
||||
print(f"Version updated from {current_version} to {new_version}")
|
||||
2183
poetry.lock
generated
2183
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,36 +1,35 @@
|
||||
[build-system]
|
||||
requires = [ "poetry-core",]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.57"
|
||||
version = "1.1.76"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
authors = [ "Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>",]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
readme = "README.md"
|
||||
keywords = [ "jobs-scraper", "linkedin", "indeed", "glassdoor", "ziprecruiter",]
|
||||
[[tool.poetry.packages]]
|
||||
include = "jobspy"
|
||||
from = "src"
|
||||
|
||||
packages = [
|
||||
{ include = "jobspy", from = "src" }
|
||||
]
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
requests = "^2.31.0"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
pandas = "^2.1.0"
|
||||
NUMPY = "1.24.2"
|
||||
NUMPY = "1.26.3"
|
||||
pydantic = "^2.3.0"
|
||||
tls-client = "^1.0.1"
|
||||
markdownify = "^0.11.6"
|
||||
markdownify = "^0.13.1"
|
||||
regex = "^2024.4.28"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^7.4.1"
|
||||
jupyter = "^1.0.0"
|
||||
black = "*"
|
||||
pre-commit = "*"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
|
||||
@@ -5,23 +5,27 @@ from typing import Tuple
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.utils import logger, set_logger_level, extract_salary
|
||||
from .scrapers.utils import set_logger_level, extract_salary, create_logger
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.glassdoor import GlassdoorScraper
|
||||
from .scrapers.google import GoogleJobsScraper
|
||||
from .scrapers.linkedin import LinkedInScraper
|
||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
||||
from .scrapers.bayt import BaytScraper
|
||||
from .scrapers import SalarySource, ScraperInput, Site, JobResponse, Country
|
||||
from .scrapers.exceptions import (
|
||||
LinkedInException,
|
||||
IndeedException,
|
||||
ZipRecruiterException,
|
||||
GlassdoorException,
|
||||
GoogleJobsException,
|
||||
)
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||
search_term: str | None = None,
|
||||
google_search_term: str | None = None,
|
||||
location: str | None = None,
|
||||
distance: int | None = 50,
|
||||
is_remote: bool = False,
|
||||
@@ -31,12 +35,14 @@ def scrape_jobs(
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxies: list[str] | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
description_format: str = "markdown",
|
||||
linkedin_fetch_description: bool | None = False,
|
||||
linkedin_company_ids: list[int] | None = None,
|
||||
offset: int | None = 0,
|
||||
hours_old: int = None,
|
||||
verbose: int = 2,
|
||||
enforce_annual_salary: bool = False,
|
||||
verbose: int = 0,
|
||||
**kwargs,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
@@ -48,6 +54,8 @@ def scrape_jobs(
|
||||
Site.INDEED: IndeedScraper,
|
||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||
Site.GLASSDOOR: GlassdoorScraper,
|
||||
Site.GOOGLE: GoogleJobsScraper,
|
||||
Site.BAYT: BaytScraper,
|
||||
}
|
||||
set_logger_level(verbose)
|
||||
|
||||
@@ -81,6 +89,7 @@ def scrape_jobs(
|
||||
site_type=get_site_type(),
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
google_search_term=google_search_term,
|
||||
location=location,
|
||||
distance=distance,
|
||||
is_remote=is_remote,
|
||||
@@ -96,11 +105,11 @@ def scrape_jobs(
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxies=proxies)
|
||||
scraper = scraper_class(proxies=proxies, ca_cert=ca_cert)
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
cap_name = site.value.capitalize()
|
||||
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||
logger.info(f"{site_name} finished scraping")
|
||||
create_logger(site_name).info(f"finished scraping")
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
@@ -165,7 +174,8 @@ def scrape_jobs(
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
if (
|
||||
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||
if enforce_annual_salary and (
|
||||
job_data["interval"]
|
||||
and job_data["interval"] != "yearly"
|
||||
and job_data["min_amount"]
|
||||
@@ -180,8 +190,17 @@ def scrape_jobs(
|
||||
job_data["min_amount"],
|
||||
job_data["max_amount"],
|
||||
job_data["currency"],
|
||||
) = extract_salary(job_data["description"])
|
||||
) = extract_salary(
|
||||
job_data["description"],
|
||||
enforce_annual_salary=enforce_annual_salary,
|
||||
)
|
||||
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||
|
||||
job_data["salary_source"] = (
|
||||
job_data["salary_source"]
|
||||
if "min_amount" in job_data and job_data["min_amount"]
|
||||
else None
|
||||
)
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
@@ -201,27 +220,27 @@ def scrape_jobs(
|
||||
"title",
|
||||
"company",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
"job_type",
|
||||
"salary_source",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"job_level",
|
||||
"job_function",
|
||||
"listing_type",
|
||||
"emails",
|
||||
"description",
|
||||
"company_industry",
|
||||
"company_url",
|
||||
"company_logo",
|
||||
"company_url_direct",
|
||||
"company_addresses",
|
||||
"company_industry",
|
||||
"company_num_employees",
|
||||
"company_revenue",
|
||||
"company_description",
|
||||
"logo_photo_url",
|
||||
"banner_photo_url",
|
||||
"ceo_name",
|
||||
"ceo_photo_url",
|
||||
]
|
||||
|
||||
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||
@@ -233,6 +252,8 @@ def scrape_jobs(
|
||||
jobs_df = jobs_df[desired_order]
|
||||
|
||||
# Step 4: Sort the DataFrame as required
|
||||
return jobs_df.sort_values(by=["site", "date_posted"], ascending=[True, False])
|
||||
return jobs_df.sort_values(
|
||||
by=["site", "date_posted"], ascending=[True, False]
|
||||
).reset_index(drop=True)
|
||||
else:
|
||||
return pd.DataFrame()
|
||||
|
||||
@@ -92,7 +92,8 @@ class Country(Enum):
|
||||
JAPAN = ("japan", "jp")
|
||||
KUWAIT = ("kuwait", "kw")
|
||||
LUXEMBOURG = ("luxembourg", "lu")
|
||||
MALAYSIA = ("malaysia", "malaysia")
|
||||
MALAYSIA = ("malaysia", "malaysia:my", "com")
|
||||
MALTA = ("malta", "malta:mt", "mt")
|
||||
MEXICO = ("mexico", "mx", "com.mx")
|
||||
MOROCCO = ("morocco", "ma")
|
||||
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||
@@ -117,7 +118,7 @@ class Country(Enum):
|
||||
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||
TAIWAN = ("taiwan", "tw")
|
||||
THAILAND = ("thailand", "th")
|
||||
TURKEY = ("turkey", "tr")
|
||||
TURKEY = ("türkiye,turkey", "tr")
|
||||
UKRAINE = ("ukraine", "ua")
|
||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||
@@ -242,16 +243,20 @@ class JobPost(BaseModel):
|
||||
date_posted: date | None = None
|
||||
emails: list[str] | None = None
|
||||
is_remote: bool | None = None
|
||||
listing_type: str | None = None
|
||||
|
||||
# linkedin specific
|
||||
job_level: str | None = None
|
||||
|
||||
# linkedin and indeed specific
|
||||
company_industry: str | None = None
|
||||
|
||||
# indeed specific
|
||||
company_addresses: str | None = None
|
||||
company_industry: str | None = None
|
||||
company_num_employees: str | None = None
|
||||
company_revenue: str | None = None
|
||||
company_description: str | None = None
|
||||
ceo_name: str | None = None
|
||||
ceo_photo_url: str | None = None
|
||||
logo_photo_url: str | None = None
|
||||
company_logo: str | None = None
|
||||
banner_photo_url: str | None = None
|
||||
|
||||
# linkedin only atm
|
||||
|
||||
@@ -17,11 +17,19 @@ class Site(Enum):
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
GOOGLE = "google"
|
||||
BAYT = "bayt"
|
||||
|
||||
|
||||
class SalarySource(Enum):
|
||||
DIRECT_DATA = "direct_data"
|
||||
DESCRIPTION = "description"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: list[Site]
|
||||
search_term: str | None = None
|
||||
google_search_term: str | None = None
|
||||
|
||||
location: str | None = None
|
||||
country: Country | None = Country.USA
|
||||
@@ -39,9 +47,12 @@ class ScraperInput(BaseModel):
|
||||
|
||||
|
||||
class Scraper(ABC):
|
||||
def __init__(self, site: Site, proxies: list[str] | None = None):
|
||||
self.proxies = proxies
|
||||
def __init__(
|
||||
self, site: Site, proxies: list[str] | None = None, ca_cert: str | None = None
|
||||
):
|
||||
self.site = site
|
||||
self.proxies = proxies
|
||||
self.ca_cert = ca_cert
|
||||
|
||||
@abstractmethod
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||
|
||||
145
src/jobspy/scrapers/bayt/__init__.py
Normal file
145
src/jobspy/scrapers/bayt/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
jobspy.scrapers.bayt
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Bayt.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import create_logger, create_session
|
||||
from ...jobs import JobPost, JobResponse, Location, Country
|
||||
|
||||
log = create_logger("Bayt")
|
||||
|
||||
|
||||
class BaytScraper(Scraper):
|
||||
base_url = "https://www.bayt.com"
|
||||
delay = 2
|
||||
band_delay = 3
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
super().__init__(Site.BAYT, proxies=proxies, ca_cert=ca_cert)
|
||||
self.scraper_input = None
|
||||
self.session = None
|
||||
self.country = "worldwide"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
self.scraper_input = scraper_input
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
job_list: list[JobPost] = []
|
||||
page = 1
|
||||
results_wanted = (
|
||||
scraper_input.results_wanted if scraper_input.results_wanted else 10
|
||||
)
|
||||
|
||||
while len(job_list) < results_wanted:
|
||||
log.info(f"Fetching Bayt jobs page {page}")
|
||||
job_elements = self._fetch_jobs(self.scraper_input.search_term, page)
|
||||
if not job_elements:
|
||||
break
|
||||
|
||||
if job_elements:
|
||||
log.debug(
|
||||
"First job element snippet:\n" + job_elements[0].prettify()[:500]
|
||||
)
|
||||
|
||||
initial_count = len(job_list)
|
||||
for job in job_elements:
|
||||
try:
|
||||
job_post = self._extract_job_info(job)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if len(job_list) >= results_wanted:
|
||||
break
|
||||
else:
|
||||
log.debug(
|
||||
"Extraction returned None. Job snippet:\n"
|
||||
+ job.prettify()[:500]
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error extracting job info: {str(e)}")
|
||||
continue
|
||||
|
||||
if len(job_list) == initial_count:
|
||||
log.info(f"No new jobs found on page {page}. Ending pagination.")
|
||||
break
|
||||
|
||||
page += 1
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _fetch_jobs(self, query: str, page: int) -> list | None:
|
||||
"""
|
||||
Grabs the job results for the given query and page number.
|
||||
"""
|
||||
try:
|
||||
url = f"{self.base_url}/en/international/jobs/{query}-jobs/?page={page}"
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_listings = soup.find_all("li", attrs={"data-js-job": ""})
|
||||
log.debug(f"Found {len(job_listings)} job listing elements")
|
||||
return job_listings
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error fetching jobs - {str(e)}")
|
||||
return None
|
||||
|
||||
def _extract_job_info(self, job: BeautifulSoup) -> JobPost | None:
|
||||
"""
|
||||
Extracts the job information from a single job listing.
|
||||
"""
|
||||
# Find the h2 element holding the title and link (no class filtering)
|
||||
job_general_information = job.find("h2")
|
||||
if not job_general_information:
|
||||
return
|
||||
|
||||
job_title = job_general_information.get_text(strip=True)
|
||||
job_url = self._extract_job_url(job_general_information)
|
||||
if not job_url:
|
||||
return
|
||||
|
||||
# Extract company name using the original approach:
|
||||
company_tag = job.find("div", class_="t-nowrap p10l")
|
||||
company_name = (
|
||||
company_tag.find("span").get_text(strip=True)
|
||||
if company_tag and company_tag.find("span")
|
||||
else None
|
||||
)
|
||||
|
||||
# Extract location using the original approach:
|
||||
location_tag = job.find("div", class_="t-mute t-small")
|
||||
location = location_tag.get_text(strip=True) if location_tag else None
|
||||
|
||||
job_id = f"bayt-{abs(hash(job_url))}"
|
||||
location_obj = Location(
|
||||
city=location,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
return JobPost(
|
||||
id=job_id,
|
||||
title=job_title,
|
||||
company_name=company_name,
|
||||
location=location_obj,
|
||||
job_url=job_url,
|
||||
)
|
||||
|
||||
def _extract_job_url(self, job_general_information: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Pulls the job URL from the 'a' within the h2 element.
|
||||
"""
|
||||
a_tag = job_general_information.find("a")
|
||||
if a_tag and a_tag.has_attr("href"):
|
||||
return self.base_url + a_tag["href"].strip()
|
||||
@@ -24,3 +24,13 @@ class ZipRecruiterException(Exception):
|
||||
class GlassdoorException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Glassdoor")
|
||||
|
||||
|
||||
class GoogleJobsException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Google Jobs")
|
||||
|
||||
|
||||
class BaytException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Bayt")
|
||||
|
||||
@@ -14,13 +14,13 @@ from typing import Optional, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from .constants import fallback_token, query_template, headers
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import extract_emails_from_text
|
||||
from ..utils import extract_emails_from_text, create_logger
|
||||
from ..exceptions import GlassdoorException
|
||||
from ..utils import (
|
||||
create_session,
|
||||
markdown_converter,
|
||||
logger,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -32,14 +32,18 @@ from ...jobs import (
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
log = create_logger("Glassdoor")
|
||||
|
||||
|
||||
class GlassdoorScraper(Scraper):
|
||||
def __init__(self, proxies: list[str] | str | None = None):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxies=proxies)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.base_url = None
|
||||
self.country = None
|
||||
@@ -59,15 +63,18 @@ class GlassdoorScraper(Scraper):
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||
|
||||
self.session = create_session(proxies=self.proxies, is_tls=True, has_retry=True)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, has_retry=True
|
||||
)
|
||||
token = self._get_csrf_token()
|
||||
self.headers["gd-csrf-token"] = token if token else self.fallback_token
|
||||
headers["gd-csrf-token"] = token if token else fallback_token
|
||||
self.session.headers.update(headers)
|
||||
|
||||
location_id, location_type = self._get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
if location_type is None:
|
||||
logger.error("Glassdoor: location not parsed")
|
||||
log.error("Glassdoor: location not parsed")
|
||||
return JobResponse(jobs=[])
|
||||
job_list: list[JobPost] = []
|
||||
cursor = None
|
||||
@@ -76,7 +83,7 @@ class GlassdoorScraper(Scraper):
|
||||
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||
range_end = min(tot_pages, self.max_pages + 1)
|
||||
for page in range(range_start, range_end):
|
||||
logger.info(f"Glassdoor search page: {page}")
|
||||
log.info(f"search page: {page} / {range_end - 1}")
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
@@ -86,7 +93,7 @@ class GlassdoorScraper(Scraper):
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Glassdoor: {str(e)}")
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
break
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@@ -107,7 +114,6 @@ class GlassdoorScraper(Scraper):
|
||||
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||
response = self.session.post(
|
||||
f"{self.base_url}/graph",
|
||||
headers=self.headers,
|
||||
timeout_seconds=15,
|
||||
data=payload,
|
||||
)
|
||||
@@ -123,7 +129,7 @@ class GlassdoorScraper(Scraper):
|
||||
ValueError,
|
||||
Exception,
|
||||
) as e:
|
||||
logger.error(f"Glassdoor: {str(e)}")
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
return jobs, None
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
@@ -148,9 +154,7 @@ class GlassdoorScraper(Scraper):
|
||||
"""
|
||||
Fetches csrf token needed for API by visiting a generic page
|
||||
"""
|
||||
res = self.session.get(
|
||||
f"{self.base_url}/Job/computer-science-jobs.htm", headers=self.headers
|
||||
)
|
||||
res = self.session.get(f"{self.base_url}/Job/computer-science-jobs.htm")
|
||||
pattern = r'"token":\s*"([^"]+)"'
|
||||
matches = re.findall(pattern, res.text)
|
||||
token = None
|
||||
@@ -189,8 +193,17 @@ class GlassdoorScraper(Scraper):
|
||||
except:
|
||||
description = None
|
||||
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||
company_logo = (
|
||||
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||
)
|
||||
listing_type = (
|
||||
job_data["jobview"]
|
||||
.get("header", {})
|
||||
.get("adOrderSponsorshipLevel", "")
|
||||
.lower()
|
||||
)
|
||||
return JobPost(
|
||||
id=str(job_id),
|
||||
id=f"gd-{job_id}",
|
||||
title=title,
|
||||
company_url=company_url if company_id else None,
|
||||
company_name=company_name,
|
||||
@@ -201,6 +214,8 @@ class GlassdoorScraper(Scraper):
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
company_logo=company_logo,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _fetch_job_description(self, job_id):
|
||||
@@ -232,7 +247,7 @@ class GlassdoorScraper(Scraper):
|
||||
""",
|
||||
}
|
||||
]
|
||||
res = requests.post(url, json=body, headers=self.headers)
|
||||
res = requests.post(url, json=body, headers=headers)
|
||||
if res.status_code != 200:
|
||||
return None
|
||||
data = res.json()[0]
|
||||
@@ -245,16 +260,16 @@ class GlassdoorScraper(Scraper):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
res = self.session.get(url, headers=self.headers)
|
||||
res = self.session.get(url)
|
||||
if res.status_code != 200:
|
||||
if res.status_code == 429:
|
||||
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||
logger.error(err)
|
||||
log.error(err)
|
||||
return None, None
|
||||
else:
|
||||
err = f"Glassdoor response status code {res.status_code}"
|
||||
err += f" - {res.text}"
|
||||
logger.error(f"Glassdoor response status code {res.status_code}")
|
||||
log.error(f"Glassdoor response status code {res.status_code}")
|
||||
return None, None
|
||||
items = res.json()
|
||||
|
||||
@@ -299,7 +314,7 @@ class GlassdoorScraper(Scraper):
|
||||
"fromage": fromage,
|
||||
"sort": "date",
|
||||
},
|
||||
"query": self.query_template,
|
||||
"query": query_template,
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
@@ -347,188 +362,3 @@ class GlassdoorScraper(Scraper):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
|
||||
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
query_template = """
|
||||
query JobSearchResultsQuery(
|
||||
$excludeJobListingIds: [Long!],
|
||||
$keyword: String,
|
||||
$locationId: Int,
|
||||
$locationType: LocationTypeEnum,
|
||||
$numJobsToShow: Int!,
|
||||
$pageCursor: String,
|
||||
$pageNumber: Int,
|
||||
$filterParams: [FilterParams],
|
||||
$originalPageUrl: String,
|
||||
$seoFriendlyUrlInput: String,
|
||||
$parameterUrlInput: String,
|
||||
$seoUrl: Boolean
|
||||
) {
|
||||
jobListings(
|
||||
contextHolder: {
|
||||
searchParams: {
|
||||
excludeJobListingIds: $excludeJobListingIds,
|
||||
keyword: $keyword,
|
||||
locationId: $locationId,
|
||||
locationType: $locationType,
|
||||
numPerPage: $numJobsToShow,
|
||||
pageCursor: $pageCursor,
|
||||
pageNumber: $pageNumber,
|
||||
filterParams: $filterParams,
|
||||
originalPageUrl: $originalPageUrl,
|
||||
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||
parameterUrlInput: $parameterUrlInput,
|
||||
seoUrl: $seoUrl,
|
||||
searchType: SR
|
||||
}
|
||||
}
|
||||
) {
|
||||
companyFilterOptions {
|
||||
id
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
filterOptions
|
||||
indeedCtk
|
||||
jobListings {
|
||||
...JobView
|
||||
__typename
|
||||
}
|
||||
jobListingSeoLinks {
|
||||
linkItems {
|
||||
position
|
||||
url
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
jobSearchTrackingKey
|
||||
jobsPageSeoData {
|
||||
pageMetaDescription
|
||||
pageTitle
|
||||
__typename
|
||||
}
|
||||
paginationCursors {
|
||||
cursor
|
||||
pageNumber
|
||||
__typename
|
||||
}
|
||||
indexablePageForSeo
|
||||
searchResultsMetadata {
|
||||
searchCriteria {
|
||||
implicitLocation {
|
||||
id
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
keyword
|
||||
location {
|
||||
id
|
||||
shortName
|
||||
localizedShortName
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
helpCenterDomain
|
||||
helpCenterLocale
|
||||
jobSerpJobOutlook {
|
||||
occupation
|
||||
paragraph
|
||||
__typename
|
||||
}
|
||||
showMachineReadableJobs
|
||||
__typename
|
||||
}
|
||||
totalJobsCount
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
fragment JobView on JobListingSearchResult {
|
||||
jobview {
|
||||
header {
|
||||
adOrderId
|
||||
advertiserType
|
||||
adOrderSponsorshipLevel
|
||||
ageInDays
|
||||
divisionEmployerName
|
||||
easyApply
|
||||
employer {
|
||||
id
|
||||
name
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
employerNameFromSearch
|
||||
goc
|
||||
gocConfidence
|
||||
gocId
|
||||
jobCountryId
|
||||
jobLink
|
||||
jobResultTrackingKey
|
||||
jobTitleText
|
||||
locationName
|
||||
locationType
|
||||
locId
|
||||
needsCommission
|
||||
payCurrency
|
||||
payPeriod
|
||||
payPeriodAdjustedPay {
|
||||
p10
|
||||
p50
|
||||
p90
|
||||
__typename
|
||||
}
|
||||
rating
|
||||
salarySource
|
||||
savedJobId
|
||||
sponsored
|
||||
__typename
|
||||
}
|
||||
job {
|
||||
description
|
||||
importConfigId
|
||||
jobTitleId
|
||||
jobTitleText
|
||||
listingId
|
||||
__typename
|
||||
}
|
||||
jobListingAdminDetails {
|
||||
cpcVal
|
||||
importConfigId
|
||||
jobListingId
|
||||
jobSourceId
|
||||
userEligibleForAdminJobDetails
|
||||
__typename
|
||||
}
|
||||
overview {
|
||||
shortName
|
||||
squareLogoUrl
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
"""
|
||||
|
||||
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
@@ -0,0 +1,184 @@
|
||||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
query_template = """
|
||||
query JobSearchResultsQuery(
|
||||
$excludeJobListingIds: [Long!],
|
||||
$keyword: String,
|
||||
$locationId: Int,
|
||||
$locationType: LocationTypeEnum,
|
||||
$numJobsToShow: Int!,
|
||||
$pageCursor: String,
|
||||
$pageNumber: Int,
|
||||
$filterParams: [FilterParams],
|
||||
$originalPageUrl: String,
|
||||
$seoFriendlyUrlInput: String,
|
||||
$parameterUrlInput: String,
|
||||
$seoUrl: Boolean
|
||||
) {
|
||||
jobListings(
|
||||
contextHolder: {
|
||||
searchParams: {
|
||||
excludeJobListingIds: $excludeJobListingIds,
|
||||
keyword: $keyword,
|
||||
locationId: $locationId,
|
||||
locationType: $locationType,
|
||||
numPerPage: $numJobsToShow,
|
||||
pageCursor: $pageCursor,
|
||||
pageNumber: $pageNumber,
|
||||
filterParams: $filterParams,
|
||||
originalPageUrl: $originalPageUrl,
|
||||
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||
parameterUrlInput: $parameterUrlInput,
|
||||
seoUrl: $seoUrl,
|
||||
searchType: SR
|
||||
}
|
||||
}
|
||||
) {
|
||||
companyFilterOptions {
|
||||
id
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
filterOptions
|
||||
indeedCtk
|
||||
jobListings {
|
||||
...JobView
|
||||
__typename
|
||||
}
|
||||
jobListingSeoLinks {
|
||||
linkItems {
|
||||
position
|
||||
url
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
jobSearchTrackingKey
|
||||
jobsPageSeoData {
|
||||
pageMetaDescription
|
||||
pageTitle
|
||||
__typename
|
||||
}
|
||||
paginationCursors {
|
||||
cursor
|
||||
pageNumber
|
||||
__typename
|
||||
}
|
||||
indexablePageForSeo
|
||||
searchResultsMetadata {
|
||||
searchCriteria {
|
||||
implicitLocation {
|
||||
id
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
keyword
|
||||
location {
|
||||
id
|
||||
shortName
|
||||
localizedShortName
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
helpCenterDomain
|
||||
helpCenterLocale
|
||||
jobSerpJobOutlook {
|
||||
occupation
|
||||
paragraph
|
||||
__typename
|
||||
}
|
||||
showMachineReadableJobs
|
||||
__typename
|
||||
}
|
||||
totalJobsCount
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
fragment JobView on JobListingSearchResult {
|
||||
jobview {
|
||||
header {
|
||||
adOrderId
|
||||
advertiserType
|
||||
adOrderSponsorshipLevel
|
||||
ageInDays
|
||||
divisionEmployerName
|
||||
easyApply
|
||||
employer {
|
||||
id
|
||||
name
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
employerNameFromSearch
|
||||
goc
|
||||
gocConfidence
|
||||
gocId
|
||||
jobCountryId
|
||||
jobLink
|
||||
jobResultTrackingKey
|
||||
jobTitleText
|
||||
locationName
|
||||
locationType
|
||||
locId
|
||||
needsCommission
|
||||
payCurrency
|
||||
payPeriod
|
||||
payPeriodAdjustedPay {
|
||||
p10
|
||||
p50
|
||||
p90
|
||||
__typename
|
||||
}
|
||||
rating
|
||||
salarySource
|
||||
savedJobId
|
||||
sponsored
|
||||
__typename
|
||||
}
|
||||
job {
|
||||
description
|
||||
importConfigId
|
||||
jobTitleId
|
||||
jobTitleText
|
||||
listingId
|
||||
__typename
|
||||
}
|
||||
jobListingAdminDetails {
|
||||
cpcVal
|
||||
importConfigId
|
||||
jobListingId
|
||||
jobSourceId
|
||||
userEligibleForAdminJobDetails
|
||||
__typename
|
||||
}
|
||||
overview {
|
||||
shortName
|
||||
squareLogoUrl
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
"""
|
||||
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||
247
src/jobspy/scrapers/google/__init__.py
Normal file
247
src/jobspy/scrapers/google/__init__.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""
|
||||
jobspy.scrapers.google
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Google.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import re
|
||||
import json
|
||||
from typing import Tuple
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from .constants import headers_jobs, headers_initial, async_param
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import extract_emails_from_text, create_logger, extract_job_type
|
||||
from ..utils import (
|
||||
create_session,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
JobResponse,
|
||||
Location,
|
||||
JobType,
|
||||
)
|
||||
|
||||
log = create_logger("Google")
|
||||
|
||||
|
||||
class GoogleJobsScraper(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes Google Scraper with the Goodle jobs search url
|
||||
"""
|
||||
site = Site(Site.GOOGLE)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.country = None
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 10
|
||||
self.seen_urls = set()
|
||||
self.url = "https://www.google.com/search"
|
||||
self.jobs_url = "https://www.google.com/async/callback:550"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Google for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
forward_cursor, job_list = self._get_initial_cursor_and_jobs()
|
||||
if forward_cursor is None:
|
||||
log.warning(
|
||||
"initial cursor not found, try changing your query or there was at most 10 results"
|
||||
)
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
page = 1
|
||||
|
||||
while (
|
||||
len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset
|
||||
and forward_cursor
|
||||
):
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
try:
|
||||
jobs, forward_cursor = self._get_jobs_next_page(forward_cursor)
|
||||
except Exception as e:
|
||||
log.error(f"failed to get jobs on page: {page}, {e}")
|
||||
break
|
||||
if not jobs:
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _get_initial_cursor_and_jobs(self) -> Tuple[str, list[JobPost]]:
|
||||
"""Gets initial cursor and jobs to paginate through job listings"""
|
||||
query = f"{self.scraper_input.search_term} jobs"
|
||||
|
||||
def get_time_range(hours_old):
|
||||
if hours_old <= 24:
|
||||
return "since yesterday"
|
||||
elif hours_old <= 72:
|
||||
return "in the last 3 days"
|
||||
elif hours_old <= 168:
|
||||
return "in the last week"
|
||||
else:
|
||||
return "in the last month"
|
||||
|
||||
job_type_mapping = {
|
||||
JobType.FULL_TIME: "Full time",
|
||||
JobType.PART_TIME: "Part time",
|
||||
JobType.INTERNSHIP: "Internship",
|
||||
JobType.CONTRACT: "Contract",
|
||||
}
|
||||
|
||||
if self.scraper_input.job_type in job_type_mapping:
|
||||
query += f" {job_type_mapping[self.scraper_input.job_type]}"
|
||||
|
||||
if self.scraper_input.location:
|
||||
query += f" near {self.scraper_input.location}"
|
||||
|
||||
if self.scraper_input.hours_old:
|
||||
time_filter = get_time_range(self.scraper_input.hours_old)
|
||||
query += f" {time_filter}"
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
query += " remote"
|
||||
|
||||
if self.scraper_input.google_search_term:
|
||||
query = self.scraper_input.google_search_term
|
||||
|
||||
params = {"q": query, "udm": "8"}
|
||||
response = self.session.get(self.url, headers=headers_initial, params=params)
|
||||
|
||||
pattern_fc = r'<div jsname="Yust4d"[^>]+data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, response.text)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_raw = self._find_job_info_initial_page(response.text)
|
||||
jobs = []
|
||||
for job_raw in jobs_raw:
|
||||
job_post = self._parse_job(job_raw)
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
return data_async_fc, jobs
|
||||
|
||||
def _get_jobs_next_page(self, forward_cursor: str) -> Tuple[list[JobPost], str]:
|
||||
params = {"fc": [forward_cursor], "fcv": ["3"], "async": [async_param]}
|
||||
response = self.session.get(self.jobs_url, headers=headers_jobs, params=params)
|
||||
return self._parse_jobs(response.text)
|
||||
|
||||
def _parse_jobs(self, job_data: str) -> Tuple[list[JobPost], str]:
|
||||
"""
|
||||
Parses jobs on a page with next page cursor
|
||||
"""
|
||||
start_idx = job_data.find("[[[")
|
||||
end_idx = job_data.rindex("]]]") + 3
|
||||
s = job_data[start_idx:end_idx]
|
||||
parsed = json.loads(s)[0]
|
||||
|
||||
pattern_fc = r'data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, job_data)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_on_page = []
|
||||
for array in parsed:
|
||||
_, job_data = array
|
||||
if not job_data.startswith("[[["):
|
||||
continue
|
||||
job_d = json.loads(job_data)
|
||||
|
||||
job_info = self._find_job_info(job_d)
|
||||
job_post = self._parse_job(job_info)
|
||||
if job_post:
|
||||
jobs_on_page.append(job_post)
|
||||
return jobs_on_page, data_async_fc
|
||||
|
||||
def _parse_job(self, job_info: list):
|
||||
job_url = job_info[3][0][0] if job_info[3] and job_info[3][0] else None
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
title = job_info[0]
|
||||
company_name = job_info[1]
|
||||
location = city = job_info[2]
|
||||
state = country = date_posted = None
|
||||
if location and "," in location:
|
||||
city, state, *country = [*map(lambda x: x.strip(), location.split(","))]
|
||||
|
||||
days_ago_str = job_info[12]
|
||||
if type(days_ago_str) == str:
|
||||
match = re.search(r"\d+", days_ago_str)
|
||||
days_ago = int(match.group()) if match else None
|
||||
date_posted = (datetime.now() - timedelta(days=days_ago)).date()
|
||||
|
||||
description = job_info[19]
|
||||
|
||||
job_post = JobPost(
|
||||
id=f"go-{job_info[28]}",
|
||||
title=title,
|
||||
company_name=company_name,
|
||||
location=Location(
|
||||
city=city, state=state, country=country[0] if country else None
|
||||
),
|
||||
job_url=job_url,
|
||||
date_posted=date_posted,
|
||||
is_remote="remote" in description.lower() or "wfh" in description.lower(),
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description),
|
||||
job_type=extract_job_type(description),
|
||||
)
|
||||
return job_post
|
||||
|
||||
@staticmethod
|
||||
def _find_job_info(jobs_data: list | dict) -> list | None:
|
||||
"""Iterates through the JSON data to find the job listings"""
|
||||
if isinstance(jobs_data, dict):
|
||||
for key, value in jobs_data.items():
|
||||
if key == "520084652" and isinstance(value, list):
|
||||
return value
|
||||
else:
|
||||
result = GoogleJobsScraper._find_job_info(value)
|
||||
if result:
|
||||
return result
|
||||
elif isinstance(jobs_data, list):
|
||||
for item in jobs_data:
|
||||
result = GoogleJobsScraper._find_job_info(item)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _find_job_info_initial_page(html_text: str):
|
||||
pattern = f'520084652":(' + r"\[.*?\]\s*])\s*}\s*]\s*]\s*]\s*]\s*]"
|
||||
results = []
|
||||
matches = re.finditer(pattern, html_text)
|
||||
|
||||
import json
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
parsed_data = json.loads(match.group(1))
|
||||
results.append(parsed_data)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
log.error(f"Failed to parse match: {str(e)}")
|
||||
results.append({"raw_match": match.group(0), "error": str(e)})
|
||||
return results
|
||||
52
src/jobspy/scrapers/google/constants.py
Normal file
52
src/jobspy/scrapers/google/constants.py
Normal file
@@ -0,0 +1,52 @@
|
||||
headers_initial = {
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=0, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
"x-browser-channel": "stable",
|
||||
"x-browser-copyright": "Copyright 2024 Google LLC. All rights reserved.",
|
||||
"x-browser-year": "2024",
|
||||
}
|
||||
|
||||
headers_jobs = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
async_param = "_basejs:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/am=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAACAAAoICAAAAAAAKMAfAAAAIAQAAAAAAAAAAAAACCAAAEJDAAACAAAAAGABAIAAARBAAABAAAAAgAgQAABAASKAfv8JAAABAAAAAAwAQAQACQAAAAAAcAEAQABoCAAAABAAAIABAACAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAACAQADoBwAAAAAAAAAAAAAQBAAAAATQAAoACOAHAAAAAAAAAQAAAIIAAAA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/dg=0/br=1/rs=ACT90oGxMeaFMCopIHq5tuQM-6_3M_VMjQ,_basecss:/xjs/_/ss/k=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAIAIAIAoEwCAADIC8AfsgEAawwAPkAAjgoAGAAAAAAAAEADAAAAAAIgAECHAAAAAAAAAAABAQAggAARQAAAQCEAAAAAIAAAABgAAAAAIAQIACCAAfB-AAFIQABoCEA_CgEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAAAAQEAAABAgAMCPAAA4AoE2BAEAggSAAIoAQAAAAAgAAAAACCAQAAAxEwA_ZAACAAAAAAAAAAkAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAQAEAAAAAAAAAAAAAAAAAAAAAQA/br=1/rs=ACT90oGZc36t3uUQkj0srnIvvbHjO2hgyg,_basecomb:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/ck=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAKAIAoIqEwCAADIK8AfsgEAawwAPkAAjgoAGAAACCAAAEJDAAACAAIgAGCHAIAAARBAAABBAQAggAgRQABAQSOAfv8JIAABABgAAAwAYAQICSCAAfB-cAFIQABoCEA_ChEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAACAQEDoBxAgAMCPAAA4AoE2BAEAggTQAIoASOAHAAgAAAAACSAQAIIxEwA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/d=1/ed=1/dg=0/br=1/ujg=1/rs=ACT90oFNLTjPzD_OAqhhtXwe2pg1T3WpBg,_fmt:prog,_id:fc_5FwaZ86OKsfdwN4P4La3yA4_2"
|
||||
@@ -10,15 +10,15 @@ from __future__ import annotations
|
||||
import math
|
||||
from typing import Tuple
|
||||
from datetime import datetime
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from .constants import job_search_query, api_headers
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import (
|
||||
extract_emails_from_text,
|
||||
get_enum_from_job_type,
|
||||
markdown_converter,
|
||||
logger,
|
||||
create_session,
|
||||
create_logger,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -30,15 +30,21 @@ from ...jobs import (
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
log = create_logger("Indeed")
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self, proxies: list[str] | str | None = None):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed API url
|
||||
"""
|
||||
super().__init__(Site.INDEED, proxies=proxies)
|
||||
|
||||
self.session = create_session(proxies=self.proxies, is_tls=False)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=ca_cert, is_tls=False
|
||||
)
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 100
|
||||
self.num_workers = 10
|
||||
@@ -57,29 +63,29 @@ class IndeedScraper(Scraper):
|
||||
self.scraper_input = scraper_input
|
||||
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
self.headers = self.api_headers.copy()
|
||||
self.headers = api_headers.copy()
|
||||
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||
job_list = []
|
||||
page = 1
|
||||
|
||||
cursor = None
|
||||
offset_pages = math.ceil(self.scraper_input.offset / 100)
|
||||
for _ in range(offset_pages):
|
||||
logger.info(f"Indeed skipping search page: {page}")
|
||||
__, cursor = self._scrape_page(cursor)
|
||||
if not __:
|
||||
logger.info(f"Indeed found no jobs on page: {page}")
|
||||
break
|
||||
|
||||
while len(self.seen_urls) < scraper_input.results_wanted:
|
||||
logger.info(f"Indeed search page: {page}")
|
||||
while len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset:
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
jobs, cursor = self._scrape_page(cursor)
|
||||
if not jobs:
|
||||
logger.info(f"Indeed found no jobs on page: {page}")
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
@@ -95,7 +101,7 @@ class IndeedScraper(Scraper):
|
||||
if self.scraper_input.search_term
|
||||
else ""
|
||||
)
|
||||
query = self.job_search_query.format(
|
||||
query = job_search_query.format(
|
||||
what=(f'what: "{search_term}"' if search_term else ""),
|
||||
location=(
|
||||
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||
@@ -109,28 +115,30 @@ class IndeedScraper(Scraper):
|
||||
payload = {
|
||||
"query": query,
|
||||
}
|
||||
api_headers = self.api_headers.copy()
|
||||
api_headers["indeed-co"] = self.api_country_code
|
||||
api_headers_temp = api_headers.copy()
|
||||
api_headers_temp["indeed-co"] = self.api_country_code
|
||||
response = self.session.post(
|
||||
self.api_url,
|
||||
headers=api_headers,
|
||||
headers=api_headers_temp,
|
||||
json=payload,
|
||||
timeout=10,
|
||||
verify=False,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
logger.info(
|
||||
f"Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||
if not response.ok:
|
||||
log.info(
|
||||
f"responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||
)
|
||||
return jobs, new_cursor
|
||||
data = response.json()
|
||||
jobs = data["data"]["jobSearch"]["results"]
|
||||
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(self._process_job, job["job"]) for job in jobs
|
||||
]
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
job_list = []
|
||||
for job in jobs:
|
||||
processed_job = self._process_job(job["job"])
|
||||
if processed_job:
|
||||
job_list.append(processed_job)
|
||||
|
||||
return job_list, new_cursor
|
||||
|
||||
def _build_filters(self):
|
||||
@@ -176,7 +184,7 @@ class IndeedScraper(Scraper):
|
||||
keys.append("DSQF7")
|
||||
|
||||
if keys:
|
||||
keys_str = '", "'.join(keys) # Prepare your keys string
|
||||
keys_str = '", "'.join(keys)
|
||||
filters_str = f"""
|
||||
filters: {{
|
||||
composite: {{
|
||||
@@ -212,7 +220,7 @@ class IndeedScraper(Scraper):
|
||||
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||
return JobPost(
|
||||
id=str(job["key"]),
|
||||
id=f'in-{job["key"]}',
|
||||
title=job["title"],
|
||||
description=description,
|
||||
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||
@@ -226,7 +234,7 @@ class IndeedScraper(Scraper):
|
||||
country=job.get("location", {}).get("countryCode"),
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=self._get_compensation(job),
|
||||
compensation=self._get_compensation(job["compensation"]),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_url_direct=(
|
||||
@@ -244,24 +252,18 @@ class IndeedScraper(Scraper):
|
||||
.replace("Iv1", "")
|
||||
.replace("_", " ")
|
||||
.title()
|
||||
.strip()
|
||||
if employer_details.get("industry")
|
||||
else None
|
||||
),
|
||||
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||
company_description=employer_details.get("briefDescription"),
|
||||
ceo_name=employer_details.get("ceoName"),
|
||||
ceo_photo_url=employer_details.get("ceoPhotoUrl"),
|
||||
logo_photo_url=(
|
||||
company_logo=(
|
||||
employer["images"].get("squareLogoUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
banner_photo_url=(
|
||||
employer["images"].get("headerImageUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -280,14 +282,19 @@ class IndeedScraper(Scraper):
|
||||
return job_types
|
||||
|
||||
@staticmethod
|
||||
def _get_compensation(job: dict) -> Compensation | None:
|
||||
def _get_compensation(compensation: dict) -> Compensation | None:
|
||||
"""
|
||||
Parses the job to get compensation
|
||||
:param job:
|
||||
:param job:
|
||||
:return: compensation object
|
||||
"""
|
||||
comp = job["compensation"]["baseSalary"]
|
||||
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||
return None
|
||||
comp = (
|
||||
compensation["baseSalary"]
|
||||
if compensation["baseSalary"]
|
||||
else compensation["estimated"]["baseSalary"]
|
||||
)
|
||||
if not comp:
|
||||
return None
|
||||
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
||||
@@ -299,7 +306,11 @@ class IndeedScraper(Scraper):
|
||||
interval=interval,
|
||||
min_amount=int(min_range) if min_range is not None else None,
|
||||
max_amount=int(max_range) if max_range is not None else None,
|
||||
currency=job["compensation"]["currencyCode"],
|
||||
currency=(
|
||||
compensation["estimated"]["currencyCode"]
|
||||
if compensation["estimated"]
|
||||
else compensation["currencyCode"]
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -337,98 +348,3 @@ class IndeedScraper(Scraper):
|
||||
return CompensationInterval[mapped_interval]
|
||||
else:
|
||||
raise ValueError(f"Unsupported interval: {interval}")
|
||||
|
||||
api_headers = {
|
||||
"Host": "apis.indeed.com",
|
||||
"content-type": "application/json",
|
||||
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||
"accept": "application/json",
|
||||
"indeed-locale": "en-US",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||
}
|
||||
job_search_query = """
|
||||
query GetJobData {{
|
||||
jobSearch(
|
||||
{what}
|
||||
{location}
|
||||
includeSponsoredResults: NONE
|
||||
limit: 100
|
||||
sort: DATE
|
||||
{cursor}
|
||||
{filters}
|
||||
) {{
|
||||
pageInfo {{
|
||||
nextCursor
|
||||
}}
|
||||
results {{
|
||||
trackingKey
|
||||
job {{
|
||||
key
|
||||
title
|
||||
datePublished
|
||||
dateOnIndeed
|
||||
description {{
|
||||
html
|
||||
}}
|
||||
location {{
|
||||
countryName
|
||||
countryCode
|
||||
admin1Code
|
||||
city
|
||||
postalCode
|
||||
streetAddress
|
||||
formatted {{
|
||||
short
|
||||
long
|
||||
}}
|
||||
}}
|
||||
compensation {{
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
currencyCode
|
||||
}}
|
||||
attributes {{
|
||||
key
|
||||
label
|
||||
}}
|
||||
employer {{
|
||||
relativeCompanyPageUrl
|
||||
name
|
||||
dossier {{
|
||||
employerDetails {{
|
||||
addresses
|
||||
industry
|
||||
employeesLocalizedLabel
|
||||
revenueLocalizedLabel
|
||||
briefDescription
|
||||
ceoName
|
||||
ceoPhotoUrl
|
||||
}}
|
||||
images {{
|
||||
headerImageUrl
|
||||
squareLogoUrl
|
||||
}}
|
||||
links {{
|
||||
corporateWebsite
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
recruit {{
|
||||
viewJobUrl
|
||||
detailedSalary
|
||||
workSchedule
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
109
src/jobspy/scrapers/indeed/constants.py
Normal file
109
src/jobspy/scrapers/indeed/constants.py
Normal file
@@ -0,0 +1,109 @@
|
||||
job_search_query = """
|
||||
query GetJobData {{
|
||||
jobSearch(
|
||||
{what}
|
||||
{location}
|
||||
limit: 100
|
||||
{cursor}
|
||||
sort: RELEVANCE
|
||||
{filters}
|
||||
) {{
|
||||
pageInfo {{
|
||||
nextCursor
|
||||
}}
|
||||
results {{
|
||||
trackingKey
|
||||
job {{
|
||||
source {{
|
||||
name
|
||||
}}
|
||||
key
|
||||
title
|
||||
datePublished
|
||||
dateOnIndeed
|
||||
description {{
|
||||
html
|
||||
}}
|
||||
location {{
|
||||
countryName
|
||||
countryCode
|
||||
admin1Code
|
||||
city
|
||||
postalCode
|
||||
streetAddress
|
||||
formatted {{
|
||||
short
|
||||
long
|
||||
}}
|
||||
}}
|
||||
compensation {{
|
||||
estimated {{
|
||||
currencyCode
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
currencyCode
|
||||
}}
|
||||
attributes {{
|
||||
key
|
||||
label
|
||||
}}
|
||||
employer {{
|
||||
relativeCompanyPageUrl
|
||||
name
|
||||
dossier {{
|
||||
employerDetails {{
|
||||
addresses
|
||||
industry
|
||||
employeesLocalizedLabel
|
||||
revenueLocalizedLabel
|
||||
briefDescription
|
||||
ceoName
|
||||
ceoPhotoUrl
|
||||
}}
|
||||
images {{
|
||||
headerImageUrl
|
||||
squareLogoUrl
|
||||
}}
|
||||
links {{
|
||||
corporateWebsite
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
recruit {{
|
||||
viewJobUrl
|
||||
detailedSalary
|
||||
workSchedule
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
api_headers = {
|
||||
"Host": "apis.indeed.com",
|
||||
"content-type": "application/json",
|
||||
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||
"accept": "application/json",
|
||||
"indeed-locale": "en-US",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||
}
|
||||
@@ -7,6 +7,7 @@ This module contains routines to scrape LinkedIn.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import time
|
||||
import random
|
||||
import regex as re
|
||||
@@ -17,9 +18,10 @@ from bs4.element import Tag
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlparse, urlunparse, unquote
|
||||
|
||||
from .constants import headers
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import LinkedInException
|
||||
from ..utils import create_session, remove_attributes
|
||||
from ..utils import create_session, remove_attributes, create_logger
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Location,
|
||||
@@ -30,13 +32,14 @@ from ...jobs import (
|
||||
DescriptionFormat,
|
||||
)
|
||||
from ..utils import (
|
||||
logger,
|
||||
extract_emails_from_text,
|
||||
get_enum_from_job_type,
|
||||
currency_parser,
|
||||
markdown_converter,
|
||||
)
|
||||
|
||||
log = create_logger("LinkedIn")
|
||||
|
||||
|
||||
class LinkedInScraper(Scraper):
|
||||
base_url = "https://www.linkedin.com"
|
||||
@@ -44,19 +47,22 @@ class LinkedInScraper(Scraper):
|
||||
band_delay = 4
|
||||
jobs_per_page = 25
|
||||
|
||||
def __init__(self, proxies: list[str] | str | None = None):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
super().__init__(Site.LINKEDIN, proxies=proxies)
|
||||
super().__init__(Site.LINKEDIN, proxies=proxies, ca_cert=ca_cert)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies,
|
||||
ca_cert=ca_cert,
|
||||
is_tls=False,
|
||||
has_retry=True,
|
||||
delay=5,
|
||||
clear_cookies=True,
|
||||
)
|
||||
self.session.headers.update(self.headers)
|
||||
self.session.headers.update(headers)
|
||||
self.scraper_input = None
|
||||
self.country = "worldwide"
|
||||
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||
@@ -70,17 +76,19 @@ class LinkedInScraper(Scraper):
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
seen_ids = set()
|
||||
page = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||
start = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||
request_count = 0
|
||||
seconds_old = (
|
||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||
)
|
||||
continue_search = (
|
||||
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||
lambda: len(job_list) < scraper_input.results_wanted and start < 1000
|
||||
)
|
||||
while continue_search():
|
||||
request_count += 1
|
||||
logger.info(f"LinkedIn search page: {request_count}")
|
||||
log.info(
|
||||
f"search page: {request_count} / {math.ceil(scraper_input.results_wanted / 10)}"
|
||||
)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
@@ -92,7 +100,7 @@ class LinkedInScraper(Scraper):
|
||||
else None
|
||||
),
|
||||
"pageNum": 0,
|
||||
"start": page,
|
||||
"start": start,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": (
|
||||
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||
@@ -118,13 +126,13 @@ class LinkedInScraper(Scraper):
|
||||
else:
|
||||
err = f"LinkedIn response status code {response.status_code}"
|
||||
err += f" - {response.text}"
|
||||
logger.error(err)
|
||||
log.error(err)
|
||||
return JobResponse(jobs=job_list)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f"LinkedIn: Bad proxy")
|
||||
log.error(f"LinkedIn: Bad proxy")
|
||||
else:
|
||||
logger.error(f"LinkedIn: {str(e)}")
|
||||
log.error(f"LinkedIn: {str(e)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
@@ -154,7 +162,7 @@ class LinkedInScraper(Scraper):
|
||||
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
page += len(job_list)
|
||||
start += len(job_list)
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
@@ -210,7 +218,7 @@ class LinkedInScraper(Scraper):
|
||||
job_details = self._get_job_details(job_id)
|
||||
|
||||
return JobPost(
|
||||
id=job_id,
|
||||
id=f"li-{job_id}",
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
@@ -219,10 +227,12 @@ class LinkedInScraper(Scraper):
|
||||
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||
compensation=compensation,
|
||||
job_type=job_details.get("job_type"),
|
||||
job_level=job_details.get("job_level", "").lower(),
|
||||
company_industry=job_details.get("company_industry"),
|
||||
description=job_details.get("description"),
|
||||
job_url_direct=job_details.get("job_url_direct"),
|
||||
emails=extract_emails_from_text(job_details.get("description")),
|
||||
logo_photo_url=job_details.get("logo_photo_url"),
|
||||
company_logo=job_details.get("company_logo"),
|
||||
job_function=job_details.get("job_function"),
|
||||
)
|
||||
|
||||
@@ -234,7 +244,7 @@ class LinkedInScraper(Scraper):
|
||||
"""
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/jobs-guest/jobs/api/jobPosting/{job_id}", timeout=5
|
||||
f"{self.base_url}/jobs/view/{job_id}", timeout=5
|
||||
)
|
||||
response.raise_for_status()
|
||||
except:
|
||||
@@ -264,13 +274,19 @@ class LinkedInScraper(Scraper):
|
||||
)
|
||||
if job_function_span:
|
||||
job_function = job_function_span.text.strip()
|
||||
|
||||
company_logo = (
|
||||
logo_image.get("data-delayed-url")
|
||||
if (logo_image := soup.find("img", {"class": "artdeco-entity-image"}))
|
||||
else None
|
||||
)
|
||||
return {
|
||||
"description": description,
|
||||
"job_level": self._parse_job_level(soup),
|
||||
"company_industry": self._parse_company_industry(soup),
|
||||
"job_type": self._parse_job_type(soup),
|
||||
"job_url_direct": self._parse_job_url_direct(soup),
|
||||
"logo_photo_url": soup.find("img", {"class": "artdeco-entity-image"}).get(
|
||||
"data-delayed-url"
|
||||
),
|
||||
"company_logo": company_logo,
|
||||
"job_function": job_function,
|
||||
}
|
||||
|
||||
@@ -325,6 +341,52 @@ class LinkedInScraper(Scraper):
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
@staticmethod
|
||||
def _parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job level from job page
|
||||
:param soup_job_level:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_job_level.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Seniority level" in text,
|
||||
)
|
||||
job_level = None
|
||||
if h3_tag:
|
||||
job_level_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if job_level_span:
|
||||
job_level = job_level_span.get_text(strip=True)
|
||||
|
||||
return job_level
|
||||
|
||||
@staticmethod
|
||||
def _parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the company industry from job page
|
||||
:param soup_industry:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_industry.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Industries" in text,
|
||||
)
|
||||
industry = None
|
||||
if h3_tag:
|
||||
industry_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if industry_span:
|
||||
industry = industry_span.get_text(strip=True)
|
||||
|
||||
return industry
|
||||
|
||||
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job url direct from job page
|
||||
@@ -351,12 +413,3 @@ class LinkedInScraper(Scraper):
|
||||
JobType.CONTRACT: "C",
|
||||
JobType.TEMPORARY: "T",
|
||||
}.get(job_type_enum, "")
|
||||
|
||||
headers = {
|
||||
"authority": "www.linkedin.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
@@ -0,0 +1,8 @@
|
||||
headers = {
|
||||
"authority": "www.linkedin.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
}
|
||||
@@ -1,26 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import logging
|
||||
import re
|
||||
from itertools import cycle
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
import tls_client
|
||||
import numpy as np
|
||||
import urllib3
|
||||
from markdownify import markdownify as md
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from ..jobs import JobType
|
||||
from ..jobs import CompensationInterval, JobType
|
||||
|
||||
logger = logging.getLogger("JobSpy")
|
||||
logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
formatter = logging.Formatter(format)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
def create_logger(name: str):
|
||||
logger = logging.getLogger(f"JobSpy:{name}")
|
||||
logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
||||
formatter = logging.Formatter(format)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
return logger
|
||||
|
||||
|
||||
class RotatingProxySession:
|
||||
@@ -100,6 +106,7 @@ class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||
def create_session(
|
||||
*,
|
||||
proxies: dict | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
is_tls: bool = True,
|
||||
has_retry: bool = False,
|
||||
delay: int = 1,
|
||||
@@ -119,10 +126,13 @@ def create_session(
|
||||
clear_cookies=clear_cookies,
|
||||
)
|
||||
|
||||
if ca_cert:
|
||||
session.verify = ca_cert
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def set_logger_level(verbose: int = 2):
|
||||
def set_logger_level(verbose: int):
|
||||
"""
|
||||
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||
|
||||
@@ -134,7 +144,9 @@ def set_logger_level(verbose: int = 2):
|
||||
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||
level = getattr(logging, level_name.upper(), None)
|
||||
if level is not None:
|
||||
logger.setLevel(level)
|
||||
for logger_name in logging.root.manager.loggerDict:
|
||||
if logger_name.startswith("JobSpy:"):
|
||||
logging.getLogger(logger_name).setLevel(level)
|
||||
else:
|
||||
raise ValueError(f"Invalid log level: {level_name}")
|
||||
|
||||
@@ -193,10 +205,16 @@ def extract_salary(
|
||||
upper_limit=700000,
|
||||
hourly_threshold=350,
|
||||
monthly_threshold=30000,
|
||||
enforce_annual_salary=False,
|
||||
):
|
||||
"""
|
||||
Extracts salary information from a string and returns the salary interval, min and max salary values, and currency.
|
||||
(TODO: Needs test cases as the regex is complicated and may not cover all edge cases)
|
||||
"""
|
||||
if not salary_str:
|
||||
return None, None, None, None
|
||||
|
||||
annual_max_salary = None
|
||||
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||
|
||||
def to_int(s):
|
||||
@@ -220,20 +238,51 @@ def extract_salary(
|
||||
|
||||
# Convert to annual if less than the hourly threshold
|
||||
if min_salary < hourly_threshold:
|
||||
min_salary = convert_hourly_to_annual(min_salary)
|
||||
interval = CompensationInterval.HOURLY.value
|
||||
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||
if max_salary < hourly_threshold:
|
||||
max_salary = convert_hourly_to_annual(max_salary)
|
||||
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||
|
||||
elif min_salary < monthly_threshold:
|
||||
min_salary = convert_monthly_to_annual(min_salary)
|
||||
interval = CompensationInterval.MONTHLY.value
|
||||
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||
if max_salary < monthly_threshold:
|
||||
max_salary = convert_monthly_to_annual(max_salary)
|
||||
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||
|
||||
else:
|
||||
interval = CompensationInterval.YEARLY.value
|
||||
annual_min_salary = min_salary
|
||||
annual_max_salary = max_salary
|
||||
|
||||
# Ensure salary range is within specified limits
|
||||
if not annual_max_salary:
|
||||
return None, None, None, None
|
||||
if (
|
||||
lower_limit <= min_salary <= upper_limit
|
||||
and lower_limit <= max_salary <= upper_limit
|
||||
and min_salary < max_salary
|
||||
lower_limit <= annual_min_salary <= upper_limit
|
||||
and lower_limit <= annual_max_salary <= upper_limit
|
||||
and annual_min_salary < annual_max_salary
|
||||
):
|
||||
return "yearly", min_salary, max_salary, "USD"
|
||||
if enforce_annual_salary:
|
||||
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||
else:
|
||||
return interval, min_salary, max_salary, "USD"
|
||||
return None, None, None, None
|
||||
|
||||
|
||||
def extract_job_type(description: str):
|
||||
if not description:
|
||||
return []
|
||||
|
||||
keywords = {
|
||||
JobType.FULL_TIME: r"full\s?time",
|
||||
JobType.PART_TIME: r"part\s?time",
|
||||
JobType.INTERNSHIP: r"internship",
|
||||
JobType.CONTRACT: r"contract",
|
||||
}
|
||||
|
||||
listing_types = []
|
||||
for key, pattern in keywords.items():
|
||||
if re.search(pattern, description, re.IGNORECASE):
|
||||
listing_types.append(key)
|
||||
|
||||
return listing_types if listing_types else None
|
||||
|
||||
@@ -11,20 +11,20 @@ import json
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from .constants import headers
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..utils import (
|
||||
logger,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
markdown_converter,
|
||||
remove_attributes,
|
||||
create_logger,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
@@ -36,19 +36,24 @@ from ...jobs import (
|
||||
DescriptionFormat,
|
||||
)
|
||||
|
||||
log = create_logger("ZipRecruiter")
|
||||
|
||||
|
||||
class ZipRecruiterScraper(Scraper):
|
||||
base_url = "https://www.ziprecruiter.com"
|
||||
api_url = "https://api.ziprecruiter.com"
|
||||
|
||||
def __init__(self, proxies: list[str] | str | None = None):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||
|
||||
self.scraper_input = None
|
||||
self.session = create_session(proxies=proxies)
|
||||
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
|
||||
self.session.headers.update(headers)
|
||||
self._get_cookies()
|
||||
|
||||
self.delay = 5
|
||||
@@ -71,7 +76,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
break
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
logger.info(f"ZipRecruiter search page: {page}")
|
||||
log.info(f"search page: {page} / {max_pages}")
|
||||
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
@@ -97,22 +102,20 @@ class ZipRecruiterScraper(Scraper):
|
||||
if continue_token:
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
res = self.session.get(
|
||||
f"{self.api_url}/jobs-app/jobs", headers=self.headers, params=params
|
||||
)
|
||||
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
|
||||
if res.status_code not in range(200, 400):
|
||||
if res.status_code == 429:
|
||||
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||
else:
|
||||
err = f"ZipRecruiter response status code {res.status_code}"
|
||||
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||
logger.error(err)
|
||||
log.error(err)
|
||||
return jobs_list, ""
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f"Indeed: Bad proxy")
|
||||
log.error(f"Indeed: Bad proxy")
|
||||
else:
|
||||
logger.error(f"Indeed: {str(e)}")
|
||||
log.error(f"Indeed: {str(e)}")
|
||||
return jobs_list, ""
|
||||
|
||||
res_data = res.json()
|
||||
@@ -135,6 +138,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
description = job.get("job_description", "").strip()
|
||||
listing_type = job.get("buyer_type", "")
|
||||
description = (
|
||||
markdown_converter(description)
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||
@@ -159,7 +163,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
description_full, job_url_direct = self._get_descr(job_url)
|
||||
|
||||
return JobPost(
|
||||
id=str(job["listing_key"]),
|
||||
id=f'zr-{job["listing_key"]}',
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
@@ -175,10 +179,11 @@ class ZipRecruiterScraper(Scraper):
|
||||
description=description_full if description_full else description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
job_url_direct=job_url_direct,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _get_descr(self, job_url):
|
||||
res = self.session.get(job_url, headers=self.headers, allow_redirects=True)
|
||||
res = self.session.get(job_url, allow_redirects=True)
|
||||
description_full = job_url_direct = None
|
||||
if res.ok:
|
||||
soup = BeautifulSoup(res.text, "html.parser")
|
||||
@@ -198,7 +203,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
script_tag = soup.find("script", type="application/json")
|
||||
if script_tag:
|
||||
job_json = json.loads(script_tag.string)
|
||||
job_url_val = job_json["model"]["saveJobURL"]
|
||||
job_url_val = job_json["model"].get("saveJobURL", "")
|
||||
m = re.search(r"job_url=(.+)", job_url_val)
|
||||
if m:
|
||||
job_url_direct = m.group(1)
|
||||
@@ -209,9 +214,30 @@ class ZipRecruiterScraper(Scraper):
|
||||
return description_full, job_url_direct
|
||||
|
||||
def _get_cookies(self):
|
||||
data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
"""
|
||||
Sends a session event to the API with device properties.
|
||||
"""
|
||||
data = [
|
||||
("event_type", "session"),
|
||||
("logged_in", "false"),
|
||||
("number_of_retry", "1"),
|
||||
("property", "model:iPhone"),
|
||||
("property", "os:iOS"),
|
||||
("property", "locale:en_us"),
|
||||
("property", "app_build_number:4734"),
|
||||
("property", "app_version:91.0"),
|
||||
("property", "manufacturer:Apple"),
|
||||
("property", "timestamp:2025-01-12T12:04:42-06:00"),
|
||||
("property", "screen_height:852"),
|
||||
("property", "os_version:16.6.1"),
|
||||
("property", "source:install"),
|
||||
("property", "screen_width:393"),
|
||||
("property", "device_model:iPhone 14 Pro"),
|
||||
("property", "brand:Apple"),
|
||||
]
|
||||
|
||||
url = f"{self.api_url}/jobs-app/event"
|
||||
self.session.post(url, data=data, headers=self.headers)
|
||||
self.session.post(url, data=data)
|
||||
|
||||
@staticmethod
|
||||
def _get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
@@ -239,14 +265,3 @@ class ZipRecruiterScraper(Scraper):
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
headers = {
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
|
||||
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
@@ -0,0 +1,10 @@
|
||||
headers = {
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_all():
|
||||
result = scrape_jobs(
|
||||
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
results_wanted=5,
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,11 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="glassdoor", search_term="software engineer", country_indeed="USA"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,11 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="indeed", search_term="software engineer", country_indeed="usa"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,12 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_linkedin():
|
||||
result = scrape_jobs(
|
||||
site_name="linkedin",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,13 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_ziprecruiter():
|
||||
result = scrape_jobs(
|
||||
site_name="zip_recruiter",
|
||||
search_term="software engineer",
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
Reference in New Issue
Block a user