mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
178 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cb0c518fc | ||
|
|
df70d4bc2e | ||
|
|
3006063875 | ||
|
|
1be009b8bc | ||
|
|
81ed9b3ddf | ||
|
|
11a9e9a56a | ||
|
|
c6ade14784 | ||
|
|
13c74a0fed | ||
|
|
333e9e6760 | ||
|
|
04032a0f91 | ||
|
|
496896d0b5 | ||
|
|
87ba1ad1bf | ||
|
|
4e7ac9a583 | ||
|
|
e44d13e1cf | ||
|
|
d52e366ef7 | ||
|
|
395ebf0017 | ||
|
|
63fddd9b7f | ||
|
|
58956868ae | ||
|
|
4fce836222 | ||
|
|
5ba25e7a7c | ||
|
|
f7cb3e9206 | ||
|
|
3ad3f121f7 | ||
|
|
ff3c782912 | ||
|
|
338d854b96 | ||
|
|
811d4c40b4 | ||
|
|
dba92d22c2 | ||
|
|
10a3592a0f | ||
|
|
b7905cc756 | ||
|
|
6867d58829 | ||
|
|
f6248c8386 | ||
|
|
f395597fdd | ||
|
|
6372e41bd9 | ||
|
|
6c869decb8 | ||
|
|
9f4083380d | ||
|
|
9207ab56f6 | ||
|
|
757a94853e | ||
|
|
6bc191d5c7 | ||
|
|
0cc34287f7 | ||
|
|
923979093b | ||
|
|
286f0e4487 | ||
|
|
f7b29d43a2 | ||
|
|
6f1490458c | ||
|
|
6bb7d81ba8 | ||
|
|
0e046432d1 | ||
|
|
209e0e65b6 | ||
|
|
8570c0651e | ||
|
|
8678b0bbe4 | ||
|
|
60d4d911c9 | ||
|
|
2a0cba8c7e | ||
|
|
de70189fa2 | ||
|
|
b55c0eb86d | ||
|
|
88c95c4ad5 | ||
|
|
d8d33d602f | ||
|
|
6330c14879 | ||
|
|
48631ea271 | ||
|
|
edffe18e65 | ||
|
|
0988230a24 | ||
|
|
d000a81eb3 | ||
|
|
ccb0c17660 | ||
|
|
df339610fa | ||
|
|
c501006bd8 | ||
|
|
89a3ee231c | ||
|
|
6439f71433 | ||
|
|
7f6271b2e0 | ||
|
|
5cb7ffe5fd | ||
|
|
cd29f79796 | ||
|
|
65d2e5e707 | ||
|
|
08d63a87a2 | ||
|
|
1ffdb1756f | ||
|
|
1185693422 | ||
|
|
dcd7144318 | ||
|
|
bf73c061bd | ||
|
|
8dd08ed9fd | ||
|
|
5d3df732e6 | ||
|
|
86f858e06d | ||
|
|
1089d1f0a5 | ||
|
|
3e93454738 | ||
|
|
0d150d519f | ||
|
|
cc3497f929 | ||
|
|
5986f75346 | ||
|
|
4b7bdb9313 | ||
|
|
80213f28d2 | ||
|
|
ada38532c3 | ||
|
|
3b0017964c | ||
|
|
94d8f555fd | ||
|
|
e8b4b376b8 | ||
|
|
54ac1bad16 | ||
|
|
0a669e9ba8 | ||
|
|
a4f6851c32 | ||
|
|
db01bc6bbb | ||
|
|
f8a4eccc6b | ||
|
|
ba3a16b228 | ||
|
|
aeb1a50d2c | ||
|
|
91b137ef86 | ||
|
|
2563c5ca08 | ||
|
|
32282305c8 | ||
|
|
ccbea51f3c | ||
|
|
6ec7c24f7f | ||
|
|
02caf1b38d | ||
|
|
8e2ab277da | ||
|
|
ce3bd84ee5 | ||
|
|
1ccf2290fe | ||
|
|
ec2eefc58a | ||
|
|
13c7694474 | ||
|
|
bbe46fe3f4 | ||
|
|
b97c73ffd6 | ||
|
|
5b3627b244 | ||
|
|
2ec3b04777 | ||
|
|
89a5264391 | ||
|
|
a7ad616567 | ||
|
|
53bc33a43a | ||
|
|
22870438c7 | ||
|
|
aeb93b99f5 | ||
|
|
a5916edcdd | ||
|
|
33d442bf1e | ||
|
|
6587e464fa | ||
|
|
eed7fca300 | ||
|
|
dfb8c18c51 | ||
|
|
81f70ff8a5 | ||
|
|
cc9e7866b7 | ||
|
|
a2c8fe046e | ||
|
|
2b7fea40a5 | ||
|
|
d37f86e1b9 | ||
|
|
0302ab14f5 | ||
|
|
3f2b582445 | ||
|
|
93223b6a38 | ||
|
|
e3fc222eb5 | ||
|
|
b303b3f841 | ||
|
|
1a0c75f323 | ||
|
|
e2f6885d61 | ||
|
|
8d65d1b652 | ||
|
|
216d3fd39f | ||
|
|
d3bfdc0a6e | ||
|
|
ba5ed803ca | ||
|
|
ff1eb0f7b0 | ||
|
|
f2cc74b7f2 | ||
|
|
5e71866630 | ||
|
|
4e67c6e5a3 | ||
|
|
caf655525a | ||
|
|
90fa4a4c4f | ||
|
|
e5353e604d | ||
|
|
628f4dee9c | ||
|
|
2e59ab03e3 | ||
|
|
008ca61e12 | ||
|
|
8fc4c3bf90 | ||
|
|
bff39a2625 | ||
|
|
c676050dc0 | ||
|
|
37976f7ec2 | ||
|
|
9fb2fdd80f | ||
|
|
af07c1ecbd | ||
|
|
286b9e1256 | ||
|
|
162dd40b0f | ||
|
|
558e352939 | ||
|
|
efad1a1b7d | ||
|
|
eaa481c2f4 | ||
|
|
b914aa6449 | ||
|
|
6adbfb8b29 | ||
|
|
a3b9dd50ff | ||
|
|
d3ba3a4878 | ||
|
|
f524789d74 | ||
|
|
f3890d4830 | ||
|
|
60c9728691 | ||
|
|
f79d975e5f | ||
|
|
d6368f909b | ||
|
|
6fcf7f666e | ||
|
|
4406f9350f | ||
|
|
ca5155f234 | ||
|
|
822a55783e | ||
|
|
59f739018a | ||
|
|
a37e7f235e | ||
|
|
690739e858 | ||
|
|
43eb2fe0e8 | ||
|
|
e50227bba6 | ||
|
|
45c2d76e15 | ||
|
|
fd883178be | ||
|
|
70e2218c67 | ||
|
|
d6947ecdd7 | ||
|
|
5191658562 |
61
.github/workflows/publish-to-pypi.yml
vendored
61
.github/workflows/publish-to-pypi.yml
vendored
@@ -1,33 +1,50 @@
|
|||||||
name: Publish Python 🐍 distributions 📦 to PyPI
|
name: Publish Python 🐍 distributions 📦 to PyPI
|
||||||
on: push
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- closed
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-n-publish:
|
build-n-publish:
|
||||||
name: Build and publish Python 🐍 distributions 📦 to PyPI
|
name: Build and publish Python 🐍 distributions 📦 to PyPI
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
if: github.event.pull_request.merged == true && github.event.pull_request.base.ref == 'main'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.10"
|
|
||||||
|
|
||||||
- name: Install poetry
|
- name: Set up Python
|
||||||
run: >-
|
uses: actions/setup-python@v4
|
||||||
python3 -m
|
with:
|
||||||
pip install
|
python-version: "3.10"
|
||||||
poetry
|
|
||||||
--user
|
|
||||||
|
|
||||||
- name: Build distribution 📦
|
- name: Install dependencies
|
||||||
run: >-
|
run: pip install toml
|
||||||
python3 -m
|
|
||||||
poetry
|
|
||||||
build
|
|
||||||
|
|
||||||
- name: Publish distribution 📦 to PyPI
|
- name: Increment version
|
||||||
if: startsWith(github.ref, 'refs/tags')
|
run: python increment_version.py
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
|
||||||
with:
|
- name: Commit version increment
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
run: |
|
||||||
|
git config --global user.name 'github-actions'
|
||||||
|
git config --global user.email 'github-actions@github.com'
|
||||||
|
git add pyproject.toml
|
||||||
|
git commit -m 'Increment version'
|
||||||
|
|
||||||
|
- name: Push changes
|
||||||
|
run: git push
|
||||||
|
|
||||||
|
- name: Install poetry
|
||||||
|
run: pip install poetry --user
|
||||||
|
|
||||||
|
- name: Build distribution 📦
|
||||||
|
run: poetry build
|
||||||
|
|
||||||
|
- name: Publish distribution 📦 to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
|||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
|||||||
/.idea
|
|
||||||
**/.DS_Store
|
|
||||||
/venv/
|
/venv/
|
||||||
/ven/
|
/.idea
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
**/.pytest_cache/
|
**/.pytest_cache/
|
||||||
|
/.ipynb_checkpoints/
|
||||||
|
**/output/
|
||||||
|
**/.DS_Store
|
||||||
*.pyc
|
*.pyc
|
||||||
.env
|
.env
|
||||||
dist
|
dist
|
||||||
/.ipynb_checkpoints/
|
|
||||||
7
.pre-commit-config.yaml
Normal file
7
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 24.2.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
language_version: python
|
||||||
|
args: [--line-length=88, --quiet]
|
||||||
1304
JobSpy_Demo.ipynb
1304
JobSpy_Demo.ipynb
File diff suppressed because it is too large
Load Diff
327
README.md
327
README.md
@@ -1,55 +1,50 @@
|
|||||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||||
|
|
||||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
**JobSpy** is a job scraping library with the goal of aggregating all the jobs from popular job boards with one tool.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
|
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, **Google**, **ZipRecruiter**, & **Bayt** concurrently
|
||||||
|
- Aggregates the job postings in a dataframe
|
||||||
|
- Proxies support to bypass blocking
|
||||||
|
|
||||||
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
|
||||||
- Aggregates the job postings in a Pandas DataFrame
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
`pip install python-jobspy`
|
|
||||||
|
```
|
||||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
pip install -U python-jobspy
|
||||||
|
```
|
||||||
|
|
||||||
|
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
import csv
|
||||||
from jobspy import scrape_jobs
|
from jobspy import scrape_jobs
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
jobs: pd.DataFrame = scrape_jobs(
|
jobs = scrape_jobs(
|
||||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor", "google", "bayt"],
|
||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
location="Dallas, TX",
|
google_search_term="software engineer jobs near San Francisco, CA since yesterday",
|
||||||
results_wanted=10,
|
location="San Francisco, CA",
|
||||||
|
results_wanted=20,
|
||||||
|
hours_old=72,
|
||||||
|
country_indeed='USA',
|
||||||
|
|
||||||
# country: only needed for indeed
|
# linkedin_fetch_description=True # gets more info such as description, direct job url (slower)
|
||||||
country='USA'
|
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||||
)
|
)
|
||||||
|
print(f"Found {len(jobs)} jobs")
|
||||||
if jobs.empty:
|
print(jobs.head())
|
||||||
print("No jobs found.")
|
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_excel
|
||||||
else:
|
|
||||||
#1 print
|
|
||||||
pd.set_option('display.max_columns', None)
|
|
||||||
pd.set_option('display.max_rows', None)
|
|
||||||
pd.set_option('display.width', None)
|
|
||||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
|
||||||
print(jobs)
|
|
||||||
|
|
||||||
#2 display in Jupyter Notebook
|
|
||||||
#display(jobs)
|
|
||||||
|
|
||||||
#3 output to .csv
|
|
||||||
#jobs.to_csv('jobs.csv', index=False)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Output
|
### Output
|
||||||
|
|
||||||
```
|
```
|
||||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||||
@@ -57,139 +52,197 @@ linkedin Full-Stack Software Engineer Rain New York
|
|||||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||||
```
|
```
|
||||||
|
|
||||||
### Parameters for `scrape_jobs()`
|
### Parameters for `scrape_jobs()`
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
Required
|
|
||||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
|
||||||
└── search_term (str)
|
|
||||||
Optional
|
Optional
|
||||||
├── location (int)
|
├── site_name (list|str):
|
||||||
├── distance (int): in miles
|
| linkedin, zip_recruiter, indeed, glassdoor, google, bayt
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
| (default is all)
|
||||||
|
│
|
||||||
|
├── search_term (str)
|
||||||
|
|
|
||||||
|
├── google_search_term (str)
|
||||||
|
| search term for google jobs. This is the only param for filtering google jobs.
|
||||||
|
│
|
||||||
|
├── location (str)
|
||||||
|
│
|
||||||
|
├── distance (int):
|
||||||
|
| in miles, default 50
|
||||||
|
│
|
||||||
|
├── job_type (str):
|
||||||
|
| fulltime, parttime, internship, contract
|
||||||
|
│
|
||||||
|
├── proxies (list):
|
||||||
|
| in format ['user:pass@host:port', 'localhost']
|
||||||
|
| each job board scraper will round robin through the proxies
|
||||||
|
|
|
||||||
├── is_remote (bool)
|
├── is_remote (bool)
|
||||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
│
|
||||||
├── easy_apply (bool): filters for jobs on LinkedIn that have the 'Easy Apply' option
|
├── results_wanted (int):
|
||||||
├── country (enum): uses the corresponding subdomain on Indeed (e.g. Canada on Indeed is ca.indeed.com
|
| number of job results to retrieve for each site specified in 'site_name'
|
||||||
|
│
|
||||||
|
├── easy_apply (bool):
|
||||||
|
| filters for jobs that are hosted on the job board site (LinkedIn easy apply filter no longer works)
|
||||||
|
│
|
||||||
|
├── description_format (str):
|
||||||
|
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||||
|
│
|
||||||
|
├── offset (int):
|
||||||
|
| starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||||
|
│
|
||||||
|
├── hours_old (int):
|
||||||
|
| filters jobs by the number of hours since the job was posted
|
||||||
|
| (ZipRecruiter and Glassdoor round up to next day.)
|
||||||
|
│
|
||||||
|
├── verbose (int) {0, 1, 2}:
|
||||||
|
| Controls the verbosity of the runtime printouts
|
||||||
|
| (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||||
|
|
||||||
|
├── linkedin_fetch_description (bool):
|
||||||
|
| fetches full description and direct job url for LinkedIn (Increases requests by O(n))
|
||||||
|
│
|
||||||
|
├── linkedin_company_ids (list[int]):
|
||||||
|
| searches for linkedin jobs with specific company ids
|
||||||
|
|
|
||||||
|
├── country_indeed (str):
|
||||||
|
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||||
|
|
|
||||||
|
├── enforce_annual_salary (bool):
|
||||||
|
| converts wages to annual salary
|
||||||
|
|
|
||||||
|
├── ca_cert (str)
|
||||||
|
| path to CA Certificate file for proxies
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```
|
||||||
### JobPost Schema
|
├── Indeed limitations:
|
||||||
```plaintext
|
| Only one from this list can be used in a search:
|
||||||
JobPost
|
| - hours_old
|
||||||
├── title (str)
|
| - job_type & is_remote
|
||||||
├── company_name (str)
|
| - easy_apply
|
||||||
├── job_url (str)
|
│
|
||||||
├── location (object)
|
└── LinkedIn limitations:
|
||||||
│ ├── country (str)
|
| Only one from this list can be used in a search:
|
||||||
│ ├── city (str)
|
| - hours_old
|
||||||
│ ├── state (str)
|
| - easy_apply
|
||||||
├── description (str)
|
|
||||||
├── job_type (enum)
|
|
||||||
├── compensation (object)
|
|
||||||
│ ├── interval (CompensationInterval): yearly, monthly, weekly, daily, hourly
|
|
||||||
│ ├── min_amount (int)
|
|
||||||
│ ├── max_amount (int)
|
|
||||||
│ └── currency (str)
|
|
||||||
└── date_posted (datetime)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Supported Countries for Job Searching
|
## Supported Countries for Job Searching
|
||||||
|
|
||||||
|
|
||||||
### **LinkedIn**
|
### **LinkedIn**
|
||||||
|
|
||||||
LinkedIn searches globally. Use the `location` parameter
|
LinkedIn searches globally & uses only the `location` parameter.
|
||||||
|
|
||||||
### **ZipRecruiter**
|
### **ZipRecruiter**
|
||||||
|
|
||||||
ZipRecruiter searches for jobs in US/Canada. Use the `location` parameter
|
ZipRecruiter searches for jobs in **US/Canada** & uses only the `location` parameter.
|
||||||
|
|
||||||
|
### **Indeed / Glassdoor**
|
||||||
|
|
||||||
|
Indeed & Glassdoor supports most countries, but the `country_indeed` parameter is required. Additionally, use the `location`
|
||||||
|
parameter to narrow down the location, e.g. city & state if necessary.
|
||||||
|
|
||||||
|
You can specify the following countries when searching on Indeed (use the exact name, * indicates support for Glassdoor):
|
||||||
|
|
||||||
|
| | | | |
|
||||||
|
|----------------------|--------------|------------|----------------|
|
||||||
|
| Argentina | Australia* | Austria* | Bahrain |
|
||||||
|
| Belgium* | Brazil* | Canada* | Chile |
|
||||||
|
| China | Colombia | Costa Rica | Czech Republic |
|
||||||
|
| Denmark | Ecuador | Egypt | Finland |
|
||||||
|
| France* | Germany* | Greece | Hong Kong* |
|
||||||
|
| Hungary | India* | Indonesia | Ireland* |
|
||||||
|
| Israel | Italy* | Japan | Kuwait |
|
||||||
|
| Luxembourg | Malaysia | Mexico* | Morocco |
|
||||||
|
| Netherlands* | New Zealand* | Nigeria | Norway |
|
||||||
|
| Oman | Pakistan | Panama | Peru |
|
||||||
|
| Philippines | Poland | Portugal | Qatar |
|
||||||
|
| Romania | Saudi Arabia | Singapore* | South Africa |
|
||||||
|
| South Korea | Spain* | Sweden | Switzerland* |
|
||||||
|
| Taiwan | Thailand | Turkey | Ukraine |
|
||||||
|
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||||
|
| Venezuela | Vietnam* | | |
|
||||||
|
|
||||||
|
### **Bayt**
|
||||||
|
|
||||||
|
Bayt only uses the search_term parameter currently and searches internationally
|
||||||
|
|
||||||
|
|
||||||
### **Indeed**
|
|
||||||
For Indeed, you `location` along with `country` param
|
|
||||||
|
|
||||||
You can specify the following countries when searching on Indeed (use the exact name):
|
## Notes
|
||||||
|
* Indeed is the best scraper currently with no rate limiting.
|
||||||
- Argentina
|
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||||
- Australia
|
* LinkedIn is the most restrictive and usually rate limits around the 10th page with one ip. Proxies are a must basically.
|
||||||
- Austria
|
|
||||||
- Bahrain
|
|
||||||
- Belgium
|
|
||||||
- Brazil
|
|
||||||
- Canada
|
|
||||||
- Chile
|
|
||||||
- China
|
|
||||||
- Colombia
|
|
||||||
- Costa Rica
|
|
||||||
- Czech Republic
|
|
||||||
- Denmark
|
|
||||||
- Ecuador
|
|
||||||
- Egypt
|
|
||||||
- Finland
|
|
||||||
- France
|
|
||||||
- Germany
|
|
||||||
- Greece
|
|
||||||
- Hong Kong
|
|
||||||
- Hungary
|
|
||||||
- India
|
|
||||||
- Indonesia
|
|
||||||
- Ireland
|
|
||||||
- Israel
|
|
||||||
- Italy
|
|
||||||
- Japan
|
|
||||||
- Kuwait
|
|
||||||
- Luxembourg
|
|
||||||
- Malaysia
|
|
||||||
- Mexico
|
|
||||||
- Morocco
|
|
||||||
- Netherlands
|
|
||||||
- New Zealand
|
|
||||||
- Nigeria
|
|
||||||
- Norway
|
|
||||||
- Oman
|
|
||||||
- Pakistan
|
|
||||||
- Panama
|
|
||||||
- Peru
|
|
||||||
- Philippines
|
|
||||||
- Poland
|
|
||||||
- Portugal
|
|
||||||
- Qatar
|
|
||||||
- Romania
|
|
||||||
- Saudi Arabia
|
|
||||||
- Singapore
|
|
||||||
- South Africa
|
|
||||||
- South Korea
|
|
||||||
- Spain
|
|
||||||
- Sweden
|
|
||||||
- Switzerland
|
|
||||||
- Taiwan
|
|
||||||
- Thailand
|
|
||||||
- Turkey
|
|
||||||
- Ukraine
|
|
||||||
- United Arab Emirates
|
|
||||||
- UK
|
|
||||||
- USA
|
|
||||||
- Uruguay
|
|
||||||
- Venezuela
|
|
||||||
- Vietnam
|
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
---
|
---
|
||||||
|
**Q: Why is Indeed giving unrelated roles?**
|
||||||
|
**A:** Indeed searches the description too.
|
||||||
|
|
||||||
**Q: Encountering issues with your queries?**
|
- use - to remove words
|
||||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems persist, [submit an issue](#).
|
- "" for exact match
|
||||||
|
|
||||||
|
Example of a good Indeed query
|
||||||
|
|
||||||
|
```py
|
||||||
|
search_term='"engineering intern" software summer (java OR python OR c++) 2025 -tax -marketing'
|
||||||
|
```
|
||||||
|
|
||||||
|
This searches the description/title and must include software, summer, 2025, one of the languages, engineering intern exactly, no tax, no marketing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Q: No results when using "google"?**
|
||||||
|
**A:** You have to use super specific syntax. Search for google jobs on your browser and then whatever pops up in the google jobs search box after applying some filters is what you need to copy & paste into the google_search_term.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Q: Received a response code 429?**
|
**Q: Received a response code 429?**
|
||||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, **ZipRecruiter** is particularly aggressive with blocking. We recommend:
|
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||||
|
|
||||||
- Waiting a few seconds between requests.
|
- Wait some time between scrapes (site-dependent).
|
||||||
- Trying a VPN to change your IP address.
|
- Try using the proxies param to change your IP address.
|
||||||
|
|
||||||
**Note:** Proxy support is in development and coming soon!
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### JobPost Schema
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
JobPost
|
||||||
|
├── title
|
||||||
|
├── company
|
||||||
|
├── company_url
|
||||||
|
├── job_url
|
||||||
|
├── location
|
||||||
|
│ ├── country
|
||||||
|
│ ├── city
|
||||||
|
│ ├── state
|
||||||
|
├── description
|
||||||
|
├── job_type: fulltime, parttime, internship, contract
|
||||||
|
├── job_function
|
||||||
|
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||||
|
│ ├── min_amount
|
||||||
|
│ ├── max_amount
|
||||||
|
│ ├── currency
|
||||||
|
│ └── salary_source: direct_data, description (parsed from posting)
|
||||||
|
├── date_posted
|
||||||
|
├── emails
|
||||||
|
└── is_remote
|
||||||
|
|
||||||
|
Linkedin specific
|
||||||
|
└── job_level
|
||||||
|
|
||||||
|
Linkedin & Indeed specific
|
||||||
|
└── company_industry
|
||||||
|
|
||||||
|
Indeed specific
|
||||||
|
├── company_country
|
||||||
|
├── company_addresses
|
||||||
|
├── company_employees_label
|
||||||
|
├── company_revenue_label
|
||||||
|
├── company_description
|
||||||
|
└── company_logo
|
||||||
|
```
|
||||||
|
|||||||
21
increment_version.py
Normal file
21
increment_version.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
import toml
|
||||||
|
|
||||||
|
def increment_version(version):
|
||||||
|
major, minor, patch = map(int, version.split('.'))
|
||||||
|
patch += 1
|
||||||
|
return f"{major}.{minor}.{patch}"
|
||||||
|
|
||||||
|
# Load pyproject.toml
|
||||||
|
with open('pyproject.toml', 'r') as file:
|
||||||
|
pyproject = toml.load(file)
|
||||||
|
|
||||||
|
# Increment the version
|
||||||
|
current_version = pyproject['tool']['poetry']['version']
|
||||||
|
new_version = increment_version(current_version)
|
||||||
|
pyproject['tool']['poetry']['version'] = new_version
|
||||||
|
|
||||||
|
# Save the updated pyproject.toml
|
||||||
|
with open('pyproject.toml', 'w') as file:
|
||||||
|
toml.dump(pyproject, file)
|
||||||
|
|
||||||
|
print(f"Version updated from {current_version} to {new_version}")
|
||||||
2665
poetry.lock
generated
2665
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,35 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = [ "poetry-core",]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.1.0"
|
version = "1.1.76"
|
||||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
authors = [ "Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>",]
|
||||||
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
keywords = [ "jobs-scraper", "linkedin", "indeed", "glassdoor", "ziprecruiter",]
|
||||||
|
[[tool.poetry.packages]]
|
||||||
|
include = "jobspy"
|
||||||
|
from = "src"
|
||||||
|
|
||||||
packages = [
|
[tool.black]
|
||||||
{ include = "jobspy", from = "src" }
|
line-length = 88
|
||||||
]
|
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.10"
|
python = "^3.10"
|
||||||
requests = "^2.31.0"
|
requests = "^2.31.0"
|
||||||
tls-client = "^0.2.1"
|
|
||||||
beautifulsoup4 = "^4.12.2"
|
beautifulsoup4 = "^4.12.2"
|
||||||
pandas = "^2.1.0"
|
pandas = "^2.1.0"
|
||||||
|
NUMPY = "1.26.3"
|
||||||
pydantic = "^2.3.0"
|
pydantic = "^2.3.0"
|
||||||
|
tls-client = "^1.0.1"
|
||||||
|
markdownify = "^0.13.1"
|
||||||
|
regex = "^2024.4.28"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
pytest = "^7.4.1"
|
pytest = "^7.4.1"
|
||||||
jupyter = "^1.0.0"
|
jupyter = "^1.0.0"
|
||||||
|
black = "*"
|
||||||
[build-system]
|
pre-commit = "*"
|
||||||
requires = ["poetry-core"]
|
|
||||||
build-backend = "poetry.core.masonry.api"
|
|
||||||
|
|||||||
@@ -1,121 +1,259 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from typing import List, Tuple
|
from typing import Tuple
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
from .jobs import JobType, Location
|
from .jobs import JobType, Location
|
||||||
|
from .scrapers.utils import set_logger_level, extract_salary, create_logger
|
||||||
from .scrapers.indeed import IndeedScraper
|
from .scrapers.indeed import IndeedScraper
|
||||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||||
|
from .scrapers.glassdoor import GlassdoorScraper
|
||||||
|
from .scrapers.google import GoogleJobsScraper
|
||||||
from .scrapers.linkedin import LinkedInScraper
|
from .scrapers.linkedin import LinkedInScraper
|
||||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
from .scrapers.bayt import BaytScraper
|
||||||
|
from .scrapers import SalarySource, ScraperInput, Site, JobResponse, Country
|
||||||
|
from .scrapers.exceptions import (
|
||||||
SCRAPER_MAPPING = {
|
LinkedInException,
|
||||||
Site.LINKEDIN: LinkedInScraper,
|
IndeedException,
|
||||||
Site.INDEED: IndeedScraper,
|
ZipRecruiterException,
|
||||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
GlassdoorException,
|
||||||
}
|
GoogleJobsException,
|
||||||
|
)
|
||||||
|
|
||||||
def _map_str_to_site(site_name: str) -> Site:
|
|
||||||
return Site[site_name.upper()]
|
|
||||||
|
|
||||||
|
|
||||||
def scrape_jobs(
|
def scrape_jobs(
|
||||||
site_name: str | Site | List[Site],
|
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||||
search_term: str,
|
search_term: str | None = None,
|
||||||
location: str = "",
|
google_search_term: str | None = None,
|
||||||
distance: int = None,
|
location: str | None = None,
|
||||||
|
distance: int | None = 50,
|
||||||
is_remote: bool = False,
|
is_remote: bool = False,
|
||||||
job_type: JobType = None,
|
job_type: str | None = None,
|
||||||
easy_apply: bool = False, # linkedin
|
easy_apply: bool | None = None,
|
||||||
results_wanted: int = 15,
|
results_wanted: int = 15,
|
||||||
country: str = "usa",
|
country_indeed: str = "usa",
|
||||||
|
hyperlinks: bool = False,
|
||||||
|
proxies: list[str] | str | None = None,
|
||||||
|
ca_cert: str | None = None,
|
||||||
|
description_format: str = "markdown",
|
||||||
|
linkedin_fetch_description: bool | None = False,
|
||||||
|
linkedin_company_ids: list[int] | None = None,
|
||||||
|
offset: int | None = 0,
|
||||||
|
hours_old: int = None,
|
||||||
|
enforce_annual_salary: bool = False,
|
||||||
|
verbose: int = 0,
|
||||||
|
**kwargs,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Asynchronously scrapes job data from multiple job sites.
|
Simultaneously scrapes job data from multiple job sites.
|
||||||
:return: results_wanted: pandas dataframe containing job data
|
:return: pandas dataframe containing job data
|
||||||
"""
|
"""
|
||||||
|
SCRAPER_MAPPING = {
|
||||||
|
Site.LINKEDIN: LinkedInScraper,
|
||||||
|
Site.INDEED: IndeedScraper,
|
||||||
|
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||||
|
Site.GLASSDOOR: GlassdoorScraper,
|
||||||
|
Site.GOOGLE: GoogleJobsScraper,
|
||||||
|
Site.BAYT: BaytScraper,
|
||||||
|
}
|
||||||
|
set_logger_level(verbose)
|
||||||
|
|
||||||
if type(site_name) == str:
|
def map_str_to_site(site_name: str) -> Site:
|
||||||
site_name = _map_str_to_site(site_name)
|
return Site[site_name.upper()]
|
||||||
|
|
||||||
country_enum = Country.from_string(country)
|
def get_enum_from_value(value_str):
|
||||||
|
for job_type in JobType:
|
||||||
|
if value_str in job_type.value:
|
||||||
|
return job_type
|
||||||
|
raise Exception(f"Invalid job type: {value_str}")
|
||||||
|
|
||||||
|
job_type = get_enum_from_value(job_type) if job_type else None
|
||||||
|
|
||||||
|
def get_site_type():
|
||||||
|
site_types = list(Site)
|
||||||
|
if isinstance(site_name, str):
|
||||||
|
site_types = [map_str_to_site(site_name)]
|
||||||
|
elif isinstance(site_name, Site):
|
||||||
|
site_types = [site_name]
|
||||||
|
elif isinstance(site_name, list):
|
||||||
|
site_types = [
|
||||||
|
map_str_to_site(site) if isinstance(site, str) else site
|
||||||
|
for site in site_name
|
||||||
|
]
|
||||||
|
return site_types
|
||||||
|
|
||||||
|
country_enum = Country.from_string(country_indeed)
|
||||||
|
|
||||||
site_type = [site_name] if type(site_name) == Site else site_name
|
|
||||||
scraper_input = ScraperInput(
|
scraper_input = ScraperInput(
|
||||||
site_type=site_type,
|
site_type=get_site_type(),
|
||||||
country=country_enum,
|
country=country_enum,
|
||||||
search_term=search_term,
|
search_term=search_term,
|
||||||
|
google_search_term=google_search_term,
|
||||||
location=location,
|
location=location,
|
||||||
distance=distance,
|
distance=distance,
|
||||||
is_remote=is_remote,
|
is_remote=is_remote,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
easy_apply=easy_apply,
|
easy_apply=easy_apply,
|
||||||
|
description_format=description_format,
|
||||||
|
linkedin_fetch_description=linkedin_fetch_description,
|
||||||
results_wanted=results_wanted,
|
results_wanted=results_wanted,
|
||||||
|
linkedin_company_ids=linkedin_company_ids,
|
||||||
|
offset=offset,
|
||||||
|
hours_old=hours_old,
|
||||||
)
|
)
|
||||||
|
|
||||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||||
scraper_class = SCRAPER_MAPPING[site]
|
scraper_class = SCRAPER_MAPPING[site]
|
||||||
scraper = scraper_class()
|
scraper = scraper_class(proxies=proxies, ca_cert=ca_cert)
|
||||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||||
|
cap_name = site.value.capitalize()
|
||||||
|
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||||
|
create_logger(site_name).info(f"finished scraping")
|
||||||
return site.value, scraped_data
|
return site.value, scraped_data
|
||||||
|
|
||||||
results = {}
|
site_to_jobs_dict = {}
|
||||||
for site in scraper_input.site_type:
|
|
||||||
site_value, scraped_data = scrape_site(site)
|
|
||||||
results[site_value] = scraped_data
|
|
||||||
|
|
||||||
dfs = []
|
def worker(site):
|
||||||
|
site_val, scraped_info = scrape_site(site)
|
||||||
|
return site_val, scraped_info
|
||||||
|
|
||||||
for site, job_response in results.items():
|
with ThreadPoolExecutor() as executor:
|
||||||
|
future_to_site = {
|
||||||
|
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||||
|
}
|
||||||
|
|
||||||
|
for future in as_completed(future_to_site):
|
||||||
|
site_value, scraped_data = future.result()
|
||||||
|
site_to_jobs_dict[site_value] = scraped_data
|
||||||
|
|
||||||
|
def convert_to_annual(job_data: dict):
|
||||||
|
if job_data["interval"] == "hourly":
|
||||||
|
job_data["min_amount"] *= 2080
|
||||||
|
job_data["max_amount"] *= 2080
|
||||||
|
if job_data["interval"] == "monthly":
|
||||||
|
job_data["min_amount"] *= 12
|
||||||
|
job_data["max_amount"] *= 12
|
||||||
|
if job_data["interval"] == "weekly":
|
||||||
|
job_data["min_amount"] *= 52
|
||||||
|
job_data["max_amount"] *= 52
|
||||||
|
if job_data["interval"] == "daily":
|
||||||
|
job_data["min_amount"] *= 260
|
||||||
|
job_data["max_amount"] *= 260
|
||||||
|
job_data["interval"] = "yearly"
|
||||||
|
|
||||||
|
jobs_dfs: list[pd.DataFrame] = []
|
||||||
|
|
||||||
|
for site, job_response in site_to_jobs_dict.items():
|
||||||
for job in job_response.jobs:
|
for job in job_response.jobs:
|
||||||
data = job.dict()
|
job_data = job.dict()
|
||||||
data["site"] = site
|
job_url = job_data["job_url"]
|
||||||
data["company"] = data["company_name"]
|
job_data["job_url_hyper"] = f'<a href="{job_url}">{job_url}</a>'
|
||||||
if data["job_type"]:
|
job_data["site"] = site
|
||||||
# Take the first value from the job type tuple
|
job_data["company"] = job_data["company_name"]
|
||||||
data["job_type"] = data["job_type"].value[0]
|
job_data["job_type"] = (
|
||||||
else:
|
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||||
data["job_type"] = None
|
if job_data["job_type"]
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
job_data["emails"] = (
|
||||||
|
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||||
|
)
|
||||||
|
if job_data["location"]:
|
||||||
|
job_data["location"] = Location(
|
||||||
|
**job_data["location"]
|
||||||
|
).display_location()
|
||||||
|
|
||||||
data["location"] = Location(**data["location"]).display_location()
|
compensation_obj = job_data.get("compensation")
|
||||||
|
|
||||||
compensation_obj = data.get("compensation")
|
|
||||||
if compensation_obj and isinstance(compensation_obj, dict):
|
if compensation_obj and isinstance(compensation_obj, dict):
|
||||||
data["interval"] = (
|
job_data["interval"] = (
|
||||||
compensation_obj.get("interval").value
|
compensation_obj.get("interval").value
|
||||||
if compensation_obj.get("interval")
|
if compensation_obj.get("interval")
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
data["min_amount"] = compensation_obj.get("min_amount")
|
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||||
data["max_amount"] = compensation_obj.get("max_amount")
|
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||||
data["currency"] = compensation_obj.get("currency", "USD")
|
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||||
|
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||||
|
if enforce_annual_salary and (
|
||||||
|
job_data["interval"]
|
||||||
|
and job_data["interval"] != "yearly"
|
||||||
|
and job_data["min_amount"]
|
||||||
|
and job_data["max_amount"]
|
||||||
|
):
|
||||||
|
convert_to_annual(job_data)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
data["interval"] = None
|
if country_enum == Country.USA:
|
||||||
data["min_amount"] = None
|
(
|
||||||
data["max_amount"] = None
|
job_data["interval"],
|
||||||
data["currency"] = None
|
job_data["min_amount"],
|
||||||
|
job_data["max_amount"],
|
||||||
|
job_data["currency"],
|
||||||
|
) = extract_salary(
|
||||||
|
job_data["description"],
|
||||||
|
enforce_annual_salary=enforce_annual_salary,
|
||||||
|
)
|
||||||
|
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||||
|
|
||||||
job_df = pd.DataFrame([data])
|
job_data["salary_source"] = (
|
||||||
dfs.append(job_df)
|
job_data["salary_source"]
|
||||||
|
if "min_amount" in job_data and job_data["min_amount"]
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
job_df = pd.DataFrame([job_data])
|
||||||
|
jobs_dfs.append(job_df)
|
||||||
|
|
||||||
if dfs:
|
if jobs_dfs:
|
||||||
df = pd.concat(dfs, ignore_index=True)
|
# Step 1: Filter out all-NA columns from each DataFrame before concatenation
|
||||||
|
filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
|
||||||
|
|
||||||
|
# Step 2: Concatenate the filtered DataFrames
|
||||||
|
jobs_df = pd.concat(filtered_dfs, ignore_index=True)
|
||||||
|
|
||||||
|
# Desired column order
|
||||||
desired_order = [
|
desired_order = [
|
||||||
|
"id",
|
||||||
"site",
|
"site",
|
||||||
|
"job_url_hyper" if hyperlinks else "job_url",
|
||||||
|
"job_url_direct",
|
||||||
"title",
|
"title",
|
||||||
"company",
|
"company",
|
||||||
"location",
|
"location",
|
||||||
|
"date_posted",
|
||||||
"job_type",
|
"job_type",
|
||||||
|
"salary_source",
|
||||||
"interval",
|
"interval",
|
||||||
"min_amount",
|
"min_amount",
|
||||||
"max_amount",
|
"max_amount",
|
||||||
"currency",
|
"currency",
|
||||||
"job_url",
|
"is_remote",
|
||||||
|
"job_level",
|
||||||
|
"job_function",
|
||||||
|
"listing_type",
|
||||||
|
"emails",
|
||||||
"description",
|
"description",
|
||||||
|
"company_industry",
|
||||||
|
"company_url",
|
||||||
|
"company_logo",
|
||||||
|
"company_url_direct",
|
||||||
|
"company_addresses",
|
||||||
|
"company_num_employees",
|
||||||
|
"company_revenue",
|
||||||
|
"company_description",
|
||||||
]
|
]
|
||||||
df = df[desired_order]
|
|
||||||
else:
|
|
||||||
df = pd.DataFrame()
|
|
||||||
|
|
||||||
return df
|
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||||
|
for column in desired_order:
|
||||||
|
if column not in jobs_df.columns:
|
||||||
|
jobs_df[column] = None # Add missing columns as empty
|
||||||
|
|
||||||
|
# Reorder the DataFrame according to the desired order
|
||||||
|
jobs_df = jobs_df[desired_order]
|
||||||
|
|
||||||
|
# Step 4: Sort the DataFrame as required
|
||||||
|
return jobs_df.sort_values(
|
||||||
|
by=["site", "date_posted"], ascending=[True, False]
|
||||||
|
).reset_index(drop=True)
|
||||||
|
else:
|
||||||
|
return pd.DataFrame()
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
from typing import Union, Optional
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
from datetime import date
|
from datetime import date
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from pydantic import BaseModel
|
||||||
from pydantic import BaseModel, validator
|
|
||||||
|
|
||||||
|
|
||||||
class JobType(Enum):
|
class JobType(Enum):
|
||||||
@@ -37,10 +38,16 @@ class JobType(Enum):
|
|||||||
"повназайнятість",
|
"повназайнятість",
|
||||||
"toànthờigian",
|
"toànthờigian",
|
||||||
)
|
)
|
||||||
PART_TIME = ("parttime", "teilzeit")
|
PART_TIME = ("parttime", "teilzeit", "částečnýúvazek", "deltid")
|
||||||
CONTRACT = ("contract", "contractor")
|
CONTRACT = ("contract", "contractor")
|
||||||
TEMPORARY = ("temporary",)
|
TEMPORARY = ("temporary",)
|
||||||
INTERNSHIP = ("internship", "prácticas", "ojt(onthejobtraining)", "praktikum")
|
INTERNSHIP = (
|
||||||
|
"internship",
|
||||||
|
"prácticas",
|
||||||
|
"ojt(onthejobtraining)",
|
||||||
|
"praktikum",
|
||||||
|
"praktik",
|
||||||
|
)
|
||||||
|
|
||||||
PER_DIEM = ("perdiem",)
|
PER_DIEM = ("perdiem",)
|
||||||
NIGHTS = ("nights",)
|
NIGHTS = ("nights",)
|
||||||
@@ -50,40 +57,47 @@ class JobType(Enum):
|
|||||||
|
|
||||||
|
|
||||||
class Country(Enum):
|
class Country(Enum):
|
||||||
ARGENTINA = ("argentina", "ar")
|
"""
|
||||||
AUSTRALIA = ("australia", "au")
|
Gets the subdomain for Indeed and Glassdoor.
|
||||||
AUSTRIA = ("austria", "at")
|
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||||
|
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||||
|
"""
|
||||||
|
|
||||||
|
ARGENTINA = ("argentina", "ar", "com.ar")
|
||||||
|
AUSTRALIA = ("australia", "au", "com.au")
|
||||||
|
AUSTRIA = ("austria", "at", "at")
|
||||||
BAHRAIN = ("bahrain", "bh")
|
BAHRAIN = ("bahrain", "bh")
|
||||||
BELGIUM = ("belgium", "be")
|
BELGIUM = ("belgium", "be", "fr:be")
|
||||||
BRAZIL = ("brazil", "br")
|
BRAZIL = ("brazil", "br", "com.br")
|
||||||
CANADA = ("canada", "ca")
|
CANADA = ("canada", "ca", "ca")
|
||||||
CHILE = ("chile", "cl")
|
CHILE = ("chile", "cl")
|
||||||
CHINA = ("china", "cn")
|
CHINA = ("china", "cn")
|
||||||
COLOMBIA = ("colombia", "co")
|
COLOMBIA = ("colombia", "co")
|
||||||
COSTARICA = ("costa rica", "cr")
|
COSTARICA = ("costa rica", "cr")
|
||||||
CZECHREPUBLIC = ("czech republic", "cz")
|
CZECHREPUBLIC = ("czech republic,czechia", "cz")
|
||||||
DENMARK = ("denmark", "dk")
|
DENMARK = ("denmark", "dk")
|
||||||
ECUADOR = ("ecuador", "ec")
|
ECUADOR = ("ecuador", "ec")
|
||||||
EGYPT = ("egypt", "eg")
|
EGYPT = ("egypt", "eg")
|
||||||
FINLAND = ("finland", "fi")
|
FINLAND = ("finland", "fi")
|
||||||
FRANCE = ("france", "fr")
|
FRANCE = ("france", "fr", "fr")
|
||||||
GERMANY = ("germany", "de")
|
GERMANY = ("germany", "de", "de")
|
||||||
GREECE = ("greece", "gr")
|
GREECE = ("greece", "gr")
|
||||||
HONGKONG = ("hong kong", "hk")
|
HONGKONG = ("hong kong", "hk", "com.hk")
|
||||||
HUNGARY = ("hungary", "hu")
|
HUNGARY = ("hungary", "hu")
|
||||||
INDIA = ("india", "in")
|
INDIA = ("india", "in", "co.in")
|
||||||
INDONESIA = ("indonesia", "id")
|
INDONESIA = ("indonesia", "id")
|
||||||
IRELAND = ("ireland", "ie")
|
IRELAND = ("ireland", "ie", "ie")
|
||||||
ISRAEL = ("israel", "il")
|
ISRAEL = ("israel", "il")
|
||||||
ITALY = ("italy", "it")
|
ITALY = ("italy", "it", "it")
|
||||||
JAPAN = ("japan", "jp")
|
JAPAN = ("japan", "jp")
|
||||||
KUWAIT = ("kuwait", "kw")
|
KUWAIT = ("kuwait", "kw")
|
||||||
LUXEMBOURG = ("luxembourg", "lu")
|
LUXEMBOURG = ("luxembourg", "lu")
|
||||||
MALAYSIA = ("malaysia", "malaysia")
|
MALAYSIA = ("malaysia", "malaysia:my", "com")
|
||||||
MEXICO = ("mexico", "mx")
|
MALTA = ("malta", "malta:mt", "mt")
|
||||||
|
MEXICO = ("mexico", "mx", "com.mx")
|
||||||
MOROCCO = ("morocco", "ma")
|
MOROCCO = ("morocco", "ma")
|
||||||
NETHERLANDS = ("netherlands", "nl")
|
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||||
NEWZEALAND = ("new zealand", "nz")
|
NEWZEALAND = ("new zealand", "nz", "co.nz")
|
||||||
NIGERIA = ("nigeria", "ng")
|
NIGERIA = ("nigeria", "ng")
|
||||||
NORWAY = ("norway", "no")
|
NORWAY = ("norway", "no")
|
||||||
OMAN = ("oman", "om")
|
OMAN = ("oman", "om")
|
||||||
@@ -96,54 +110,66 @@ class Country(Enum):
|
|||||||
QATAR = ("qatar", "qa")
|
QATAR = ("qatar", "qa")
|
||||||
ROMANIA = ("romania", "ro")
|
ROMANIA = ("romania", "ro")
|
||||||
SAUDIARABIA = ("saudi arabia", "sa")
|
SAUDIARABIA = ("saudi arabia", "sa")
|
||||||
SINGAPORE = ("singapore", "sg")
|
SINGAPORE = ("singapore", "sg", "sg")
|
||||||
SOUTHAFRICA = ("south africa", "za")
|
SOUTHAFRICA = ("south africa", "za")
|
||||||
SOUTHKOREA = ("south korea", "kr")
|
SOUTHKOREA = ("south korea", "kr")
|
||||||
SPAIN = ("spain", "es")
|
SPAIN = ("spain", "es", "es")
|
||||||
SWEDEN = ("sweden", "se")
|
SWEDEN = ("sweden", "se")
|
||||||
SWITZERLAND = ("switzerland", "ch")
|
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||||
TAIWAN = ("taiwan", "tw")
|
TAIWAN = ("taiwan", "tw")
|
||||||
THAILAND = ("thailand", "th")
|
THAILAND = ("thailand", "th")
|
||||||
TURKEY = ("turkey", "tr")
|
TURKEY = ("türkiye,turkey", "tr")
|
||||||
UKRAINE = ("ukraine", "ua")
|
UKRAINE = ("ukraine", "ua")
|
||||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||||
UK = ("uk", "uk")
|
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||||
USA = ("usa", "www")
|
USA = ("usa,us,united states", "www:us", "com")
|
||||||
URUGUAY = ("uruguay", "uy")
|
URUGUAY = ("uruguay", "uy")
|
||||||
VENEZUELA = ("venezuela", "ve")
|
VENEZUELA = ("venezuela", "ve")
|
||||||
VIETNAM = ("vietnam", "vn")
|
VIETNAM = ("vietnam", "vn", "com")
|
||||||
|
|
||||||
# internal for ziprecruiter
|
# internal for ziprecruiter
|
||||||
US_CANADA = ("usa/ca", "www")
|
US_CANADA = ("usa/ca", "www")
|
||||||
|
|
||||||
# internal for linkeind
|
# internal for linkedin
|
||||||
WORLDWIDE = ("worldwide", "www")
|
WORLDWIDE = ("worldwide", "www")
|
||||||
|
|
||||||
def __new__(cls, country, domain):
|
@property
|
||||||
obj = object.__new__(cls)
|
def indeed_domain_value(self):
|
||||||
obj._value_ = country
|
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||||
obj.domain = domain
|
if subdomain and api_country_code:
|
||||||
return obj
|
return subdomain, api_country_code.upper()
|
||||||
|
return self.value[1], self.value[1].upper()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def domain_value(self):
|
def glassdoor_domain_value(self):
|
||||||
return self.domain
|
if len(self.value) == 3:
|
||||||
|
subdomain, _, domain = self.value[2].partition(":")
|
||||||
|
if subdomain and domain:
|
||||||
|
return f"{subdomain}.glassdoor.{domain}"
|
||||||
|
else:
|
||||||
|
return f"www.glassdoor.{self.value[2]}"
|
||||||
|
else:
|
||||||
|
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||||
|
|
||||||
|
def get_glassdoor_url(self):
|
||||||
|
return f"https://{self.glassdoor_domain_value}/"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_string(cls, country_str: str):
|
def from_string(cls, country_str: str):
|
||||||
"""Convert a string to the corresponding Country enum."""
|
"""Convert a string to the corresponding Country enum."""
|
||||||
country_str = country_str.strip().lower()
|
country_str = country_str.strip().lower()
|
||||||
for country in cls:
|
for country in cls:
|
||||||
if country.value == country_str:
|
country_names = country.value[0].split(",")
|
||||||
|
if country_str in country_names:
|
||||||
return country
|
return country
|
||||||
valid_countries = [country.value for country in cls]
|
valid_countries = [country.value for country in cls]
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Invalid country string: '{country_str}'. Valid countries (only include this param for Indeed) are: {', '.join(valid_countries)}"
|
f"Invalid country string: '{country_str}'. Valid countries are: {', '.join([country[0] for country in valid_countries])}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class Location(BaseModel):
|
class Location(BaseModel):
|
||||||
country: Country = None
|
country: Country | str | None = None
|
||||||
city: Optional[str] = None
|
city: Optional[str] = None
|
||||||
state: Optional[str] = None
|
state: Optional[str] = None
|
||||||
|
|
||||||
@@ -153,11 +179,19 @@ class Location(BaseModel):
|
|||||||
location_parts.append(self.city)
|
location_parts.append(self.city)
|
||||||
if self.state:
|
if self.state:
|
||||||
location_parts.append(self.state)
|
location_parts.append(self.state)
|
||||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
if isinstance(self.country, str):
|
||||||
if self.country.value in ("usa", "uk"):
|
location_parts.append(self.country)
|
||||||
location_parts.append(self.country.value.upper())
|
elif self.country and self.country not in (
|
||||||
|
Country.US_CANADA,
|
||||||
|
Country.WORLDWIDE,
|
||||||
|
):
|
||||||
|
country_name = self.country.value[0]
|
||||||
|
if "," in country_name:
|
||||||
|
country_name = country_name.split(",")[0]
|
||||||
|
if country_name in ("usa", "uk"):
|
||||||
|
location_parts.append(country_name.upper())
|
||||||
else:
|
else:
|
||||||
location_parts.append(self.country.value.title())
|
location_parts.append(country_name.title())
|
||||||
return ", ".join(location_parts)
|
return ", ".join(location_parts)
|
||||||
|
|
||||||
|
|
||||||
@@ -168,43 +202,66 @@ class CompensationInterval(Enum):
|
|||||||
DAILY = "daily"
|
DAILY = "daily"
|
||||||
HOURLY = "hourly"
|
HOURLY = "hourly"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_interval(cls, pay_period):
|
||||||
|
interval_mapping = {
|
||||||
|
"YEAR": cls.YEARLY,
|
||||||
|
"HOUR": cls.HOURLY,
|
||||||
|
}
|
||||||
|
if pay_period in interval_mapping:
|
||||||
|
return interval_mapping[pay_period].value
|
||||||
|
else:
|
||||||
|
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||||
|
|
||||||
|
|
||||||
class Compensation(BaseModel):
|
class Compensation(BaseModel):
|
||||||
interval: CompensationInterval
|
interval: Optional[CompensationInterval] = None
|
||||||
min_amount: int = None
|
min_amount: float | None = None
|
||||||
max_amount: int = None
|
max_amount: float | None = None
|
||||||
currency: Optional[str] = "USD"
|
currency: Optional[str] = "USD"
|
||||||
|
|
||||||
|
|
||||||
|
class DescriptionFormat(Enum):
|
||||||
|
MARKDOWN = "markdown"
|
||||||
|
HTML = "html"
|
||||||
|
|
||||||
|
|
||||||
class JobPost(BaseModel):
|
class JobPost(BaseModel):
|
||||||
|
id: str | None = None
|
||||||
title: str
|
title: str
|
||||||
company_name: str
|
company_name: str | None
|
||||||
job_url: str
|
job_url: str
|
||||||
|
job_url_direct: str | None = None
|
||||||
location: Optional[Location]
|
location: Optional[Location]
|
||||||
|
|
||||||
description: Optional[str] = None
|
description: str | None = None
|
||||||
job_type: Optional[JobType] = None
|
company_url: str | None = None
|
||||||
compensation: Optional[Compensation] = None
|
company_url_direct: str | None = None
|
||||||
date_posted: Optional[date] = None
|
|
||||||
|
job_type: list[JobType] | None = None
|
||||||
|
compensation: Compensation | None = None
|
||||||
|
date_posted: date | None = None
|
||||||
|
emails: list[str] | None = None
|
||||||
|
is_remote: bool | None = None
|
||||||
|
listing_type: str | None = None
|
||||||
|
|
||||||
|
# linkedin specific
|
||||||
|
job_level: str | None = None
|
||||||
|
|
||||||
|
# linkedin and indeed specific
|
||||||
|
company_industry: str | None = None
|
||||||
|
|
||||||
|
# indeed specific
|
||||||
|
company_addresses: str | None = None
|
||||||
|
company_num_employees: str | None = None
|
||||||
|
company_revenue: str | None = None
|
||||||
|
company_description: str | None = None
|
||||||
|
company_logo: str | None = None
|
||||||
|
banner_photo_url: str | None = None
|
||||||
|
|
||||||
|
# linkedin only atm
|
||||||
|
job_function: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class JobResponse(BaseModel):
|
class JobResponse(BaseModel):
|
||||||
success: bool
|
|
||||||
error: str = None
|
|
||||||
|
|
||||||
total_results: Optional[int] = None
|
|
||||||
|
|
||||||
jobs: list[JobPost] = []
|
jobs: list[JobPost] = []
|
||||||
|
|
||||||
returned_results: int = None
|
|
||||||
|
|
||||||
@validator("returned_results", pre=True, always=True)
|
|
||||||
def set_returned_results(cls, v, values):
|
|
||||||
jobs_list = values.get("jobs")
|
|
||||||
|
|
||||||
if v is None:
|
|
||||||
if jobs_list is not None:
|
|
||||||
return len(jobs_list)
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
return v
|
|
||||||
|
|||||||
@@ -1,43 +1,58 @@
|
|||||||
from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
from __future__ import annotations
|
||||||
from typing import List, Optional, Any
|
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
class StatusException(Exception):
|
from ..jobs import (
|
||||||
def __init__(self, status_code: int):
|
Enum,
|
||||||
self.status_code = status_code
|
BaseModel,
|
||||||
|
JobType,
|
||||||
|
JobResponse,
|
||||||
|
Country,
|
||||||
|
DescriptionFormat,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Site(Enum):
|
class Site(Enum):
|
||||||
LINKEDIN = "linkedin"
|
LINKEDIN = "linkedin"
|
||||||
INDEED = "indeed"
|
INDEED = "indeed"
|
||||||
ZIP_RECRUITER = "zip_recruiter"
|
ZIP_RECRUITER = "zip_recruiter"
|
||||||
|
GLASSDOOR = "glassdoor"
|
||||||
|
GOOGLE = "google"
|
||||||
|
BAYT = "bayt"
|
||||||
|
|
||||||
|
|
||||||
|
class SalarySource(Enum):
|
||||||
|
DIRECT_DATA = "direct_data"
|
||||||
|
DESCRIPTION = "description"
|
||||||
|
|
||||||
|
|
||||||
class ScraperInput(BaseModel):
|
class ScraperInput(BaseModel):
|
||||||
site_type: List[Site]
|
site_type: list[Site]
|
||||||
search_term: str
|
search_term: str | None = None
|
||||||
|
google_search_term: str | None = None
|
||||||
|
|
||||||
location: str = None
|
location: str | None = None
|
||||||
country: Optional[Country] = Country.USA
|
country: Country | None = Country.USA
|
||||||
distance: Optional[int] = None
|
distance: int | None = None
|
||||||
is_remote: bool = False
|
is_remote: bool = False
|
||||||
job_type: Optional[JobType] = None
|
job_type: JobType | None = None
|
||||||
easy_apply: bool = None # linkedin
|
easy_apply: bool | None = None
|
||||||
|
offset: int = 0
|
||||||
|
linkedin_fetch_description: bool = False
|
||||||
|
linkedin_company_ids: list[int] | None = None
|
||||||
|
description_format: DescriptionFormat | None = DescriptionFormat.MARKDOWN
|
||||||
|
|
||||||
results_wanted: int = 15
|
results_wanted: int = 15
|
||||||
|
hours_old: int | None = None
|
||||||
|
|
||||||
|
|
||||||
class CommonResponse(BaseModel):
|
class Scraper(ABC):
|
||||||
status: Optional[str]
|
def __init__(
|
||||||
error: Optional[str]
|
self, site: Site, proxies: list[str] | None = None, ca_cert: str | None = None
|
||||||
linkedin: Optional[Any] = None
|
):
|
||||||
indeed: Optional[Any] = None
|
|
||||||
zip_recruiter: Optional[Any] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Scraper:
|
|
||||||
def __init__(self, site: Site):
|
|
||||||
self.site = site
|
self.site = site
|
||||||
|
self.proxies = proxies
|
||||||
|
self.ca_cert = ca_cert
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
@abstractmethod
|
||||||
...
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||||
|
|||||||
145
src/jobspy/scrapers/bayt/__init__.py
Normal file
145
src/jobspy/scrapers/bayt/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.bayt
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Bayt.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import create_logger, create_session
|
||||||
|
from ...jobs import JobPost, JobResponse, Location, Country
|
||||||
|
|
||||||
|
log = create_logger("Bayt")
|
||||||
|
|
||||||
|
|
||||||
|
class BaytScraper(Scraper):
|
||||||
|
base_url = "https://www.bayt.com"
|
||||||
|
delay = 2
|
||||||
|
band_delay = 3
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
super().__init__(Site.BAYT, proxies=proxies, ca_cert=ca_cert)
|
||||||
|
self.scraper_input = None
|
||||||
|
self.session = None
|
||||||
|
self.country = "worldwide"
|
||||||
|
|
||||||
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||||
|
)
|
||||||
|
job_list: list[JobPost] = []
|
||||||
|
page = 1
|
||||||
|
results_wanted = (
|
||||||
|
scraper_input.results_wanted if scraper_input.results_wanted else 10
|
||||||
|
)
|
||||||
|
|
||||||
|
while len(job_list) < results_wanted:
|
||||||
|
log.info(f"Fetching Bayt jobs page {page}")
|
||||||
|
job_elements = self._fetch_jobs(self.scraper_input.search_term, page)
|
||||||
|
if not job_elements:
|
||||||
|
break
|
||||||
|
|
||||||
|
if job_elements:
|
||||||
|
log.debug(
|
||||||
|
"First job element snippet:\n" + job_elements[0].prettify()[:500]
|
||||||
|
)
|
||||||
|
|
||||||
|
initial_count = len(job_list)
|
||||||
|
for job in job_elements:
|
||||||
|
try:
|
||||||
|
job_post = self._extract_job_info(job)
|
||||||
|
if job_post:
|
||||||
|
job_list.append(job_post)
|
||||||
|
if len(job_list) >= results_wanted:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
log.debug(
|
||||||
|
"Extraction returned None. Job snippet:\n"
|
||||||
|
+ job.prettify()[:500]
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Bayt: Error extracting job info: {str(e)}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(job_list) == initial_count:
|
||||||
|
log.info(f"No new jobs found on page {page}. Ending pagination.")
|
||||||
|
break
|
||||||
|
|
||||||
|
page += 1
|
||||||
|
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||||
|
|
||||||
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
|
def _fetch_jobs(self, query: str, page: int) -> list | None:
|
||||||
|
"""
|
||||||
|
Grabs the job results for the given query and page number.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
url = f"{self.base_url}/en/international/jobs/{query}-jobs/?page={page}"
|
||||||
|
response = self.session.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
job_listings = soup.find_all("li", attrs={"data-js-job": ""})
|
||||||
|
log.debug(f"Found {len(job_listings)} job listing elements")
|
||||||
|
return job_listings
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Bayt: Error fetching jobs - {str(e)}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _extract_job_info(self, job: BeautifulSoup) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Extracts the job information from a single job listing.
|
||||||
|
"""
|
||||||
|
# Find the h2 element holding the title and link (no class filtering)
|
||||||
|
job_general_information = job.find("h2")
|
||||||
|
if not job_general_information:
|
||||||
|
return
|
||||||
|
|
||||||
|
job_title = job_general_information.get_text(strip=True)
|
||||||
|
job_url = self._extract_job_url(job_general_information)
|
||||||
|
if not job_url:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract company name using the original approach:
|
||||||
|
company_tag = job.find("div", class_="t-nowrap p10l")
|
||||||
|
company_name = (
|
||||||
|
company_tag.find("span").get_text(strip=True)
|
||||||
|
if company_tag and company_tag.find("span")
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract location using the original approach:
|
||||||
|
location_tag = job.find("div", class_="t-mute t-small")
|
||||||
|
location = location_tag.get_text(strip=True) if location_tag else None
|
||||||
|
|
||||||
|
job_id = f"bayt-{abs(hash(job_url))}"
|
||||||
|
location_obj = Location(
|
||||||
|
city=location,
|
||||||
|
country=Country.from_string(self.country),
|
||||||
|
)
|
||||||
|
return JobPost(
|
||||||
|
id=job_id,
|
||||||
|
title=job_title,
|
||||||
|
company_name=company_name,
|
||||||
|
location=location_obj,
|
||||||
|
job_url=job_url,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _extract_job_url(self, job_general_information: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Pulls the job URL from the 'a' within the h2 element.
|
||||||
|
"""
|
||||||
|
a_tag = job_general_information.find("a")
|
||||||
|
if a_tag and a_tag.has_attr("href"):
|
||||||
|
return self.base_url + a_tag["href"].strip()
|
||||||
36
src/jobspy/scrapers/exceptions.py
Normal file
36
src/jobspy/scrapers/exceptions.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.exceptions
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains the set of Scrapers' exceptions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class LinkedInException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with LinkedIn")
|
||||||
|
|
||||||
|
|
||||||
|
class IndeedException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Indeed")
|
||||||
|
|
||||||
|
|
||||||
|
class ZipRecruiterException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with ZipRecruiter")
|
||||||
|
|
||||||
|
|
||||||
|
class GlassdoorException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Glassdoor")
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleJobsException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Google Jobs")
|
||||||
|
|
||||||
|
|
||||||
|
class BaytException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Bayt")
|
||||||
364
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
364
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.glassdoor
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Glassdoor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
|
from .constants import fallback_token, query_template, headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import extract_emails_from_text, create_logger
|
||||||
|
from ..exceptions import GlassdoorException
|
||||||
|
from ..utils import (
|
||||||
|
create_session,
|
||||||
|
markdown_converter,
|
||||||
|
)
|
||||||
|
from ...jobs import (
|
||||||
|
JobPost,
|
||||||
|
Compensation,
|
||||||
|
CompensationInterval,
|
||||||
|
Location,
|
||||||
|
JobResponse,
|
||||||
|
JobType,
|
||||||
|
DescriptionFormat,
|
||||||
|
)
|
||||||
|
|
||||||
|
log = create_logger("Glassdoor")
|
||||||
|
|
||||||
|
|
||||||
|
class GlassdoorScraper(Scraper):
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||||
|
"""
|
||||||
|
site = Site(Site.GLASSDOOR)
|
||||||
|
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||||
|
|
||||||
|
self.base_url = None
|
||||||
|
self.country = None
|
||||||
|
self.session = None
|
||||||
|
self.scraper_input = None
|
||||||
|
self.jobs_per_page = 30
|
||||||
|
self.max_pages = 30
|
||||||
|
self.seen_urls = set()
|
||||||
|
|
||||||
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
|
"""
|
||||||
|
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||||
|
:param scraper_input: Information about job search criteria.
|
||||||
|
:return: JobResponse containing a list of jobs.
|
||||||
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||||
|
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||||
|
|
||||||
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=self.ca_cert, has_retry=True
|
||||||
|
)
|
||||||
|
token = self._get_csrf_token()
|
||||||
|
headers["gd-csrf-token"] = token if token else fallback_token
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
|
||||||
|
location_id, location_type = self._get_location(
|
||||||
|
scraper_input.location, scraper_input.is_remote
|
||||||
|
)
|
||||||
|
if location_type is None:
|
||||||
|
log.error("Glassdoor: location not parsed")
|
||||||
|
return JobResponse(jobs=[])
|
||||||
|
job_list: list[JobPost] = []
|
||||||
|
cursor = None
|
||||||
|
|
||||||
|
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||||
|
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||||
|
range_end = min(tot_pages, self.max_pages + 1)
|
||||||
|
for page in range(range_start, range_end):
|
||||||
|
log.info(f"search page: {page} / {range_end - 1}")
|
||||||
|
try:
|
||||||
|
jobs, cursor = self._fetch_jobs_page(
|
||||||
|
scraper_input, location_id, location_type, page, cursor
|
||||||
|
)
|
||||||
|
job_list.extend(jobs)
|
||||||
|
if not jobs or len(job_list) >= scraper_input.results_wanted:
|
||||||
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Glassdoor: {str(e)}")
|
||||||
|
break
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
|
def _fetch_jobs_page(
|
||||||
|
self,
|
||||||
|
scraper_input: ScraperInput,
|
||||||
|
location_id: int,
|
||||||
|
location_type: str,
|
||||||
|
page_num: int,
|
||||||
|
cursor: str | None,
|
||||||
|
) -> Tuple[list[JobPost], str | None]:
|
||||||
|
"""
|
||||||
|
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||||
|
"""
|
||||||
|
jobs = []
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
try:
|
||||||
|
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||||
|
response = self.session.post(
|
||||||
|
f"{self.base_url}/graph",
|
||||||
|
timeout_seconds=15,
|
||||||
|
data=payload,
|
||||||
|
)
|
||||||
|
if response.status_code != 200:
|
||||||
|
exc_msg = f"bad response status code: {response.status_code}"
|
||||||
|
raise GlassdoorException(exc_msg)
|
||||||
|
res_json = response.json()[0]
|
||||||
|
if "errors" in res_json:
|
||||||
|
raise ValueError("Error encountered in API response")
|
||||||
|
except (
|
||||||
|
requests.exceptions.ReadTimeout,
|
||||||
|
GlassdoorException,
|
||||||
|
ValueError,
|
||||||
|
Exception,
|
||||||
|
) as e:
|
||||||
|
log.error(f"Glassdoor: {str(e)}")
|
||||||
|
return jobs, None
|
||||||
|
|
||||||
|
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||||
|
future_to_job_data = {
|
||||||
|
executor.submit(self._process_job, job): job for job in jobs_data
|
||||||
|
}
|
||||||
|
for future in as_completed(future_to_job_data):
|
||||||
|
try:
|
||||||
|
job_post = future.result()
|
||||||
|
if job_post:
|
||||||
|
jobs.append(job_post)
|
||||||
|
except Exception as exc:
|
||||||
|
raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
|
||||||
|
|
||||||
|
return jobs, self.get_cursor_for_page(
|
||||||
|
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_csrf_token(self):
|
||||||
|
"""
|
||||||
|
Fetches csrf token needed for API by visiting a generic page
|
||||||
|
"""
|
||||||
|
res = self.session.get(f"{self.base_url}/Job/computer-science-jobs.htm")
|
||||||
|
pattern = r'"token":\s*"([^"]+)"'
|
||||||
|
matches = re.findall(pattern, res.text)
|
||||||
|
token = None
|
||||||
|
if matches:
|
||||||
|
token = matches[0]
|
||||||
|
return token
|
||||||
|
|
||||||
|
def _process_job(self, job_data):
|
||||||
|
"""
|
||||||
|
Processes a single job and fetches its description.
|
||||||
|
"""
|
||||||
|
job_id = job_data["jobview"]["job"]["listingId"]
|
||||||
|
job_url = f"{self.base_url}job-listing/j?jl={job_id}"
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return None
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
job = job_data["jobview"]
|
||||||
|
title = job["job"]["jobTitleText"]
|
||||||
|
company_name = job["header"]["employerNameFromSearch"]
|
||||||
|
company_id = job_data["jobview"]["header"]["employer"]["id"]
|
||||||
|
location_name = job["header"].get("locationName", "")
|
||||||
|
location_type = job["header"].get("locationType", "")
|
||||||
|
age_in_days = job["header"].get("ageInDays")
|
||||||
|
is_remote, location = False, None
|
||||||
|
date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
|
||||||
|
date_posted = date_diff if age_in_days is not None else None
|
||||||
|
|
||||||
|
if location_type == "S":
|
||||||
|
is_remote = True
|
||||||
|
else:
|
||||||
|
location = self.parse_location(location_name)
|
||||||
|
|
||||||
|
compensation = self.parse_compensation(job["header"])
|
||||||
|
try:
|
||||||
|
description = self._fetch_job_description(job_id)
|
||||||
|
except:
|
||||||
|
description = None
|
||||||
|
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||||
|
company_logo = (
|
||||||
|
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||||
|
)
|
||||||
|
listing_type = (
|
||||||
|
job_data["jobview"]
|
||||||
|
.get("header", {})
|
||||||
|
.get("adOrderSponsorshipLevel", "")
|
||||||
|
.lower()
|
||||||
|
)
|
||||||
|
return JobPost(
|
||||||
|
id=f"gd-{job_id}",
|
||||||
|
title=title,
|
||||||
|
company_url=company_url if company_id else None,
|
||||||
|
company_name=company_name,
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=job_url,
|
||||||
|
location=location,
|
||||||
|
compensation=compensation,
|
||||||
|
is_remote=is_remote,
|
||||||
|
description=description,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
company_logo=company_logo,
|
||||||
|
listing_type=listing_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _fetch_job_description(self, job_id):
|
||||||
|
"""
|
||||||
|
Fetches the job description for a single job ID.
|
||||||
|
"""
|
||||||
|
url = f"{self.base_url}/graph"
|
||||||
|
body = [
|
||||||
|
{
|
||||||
|
"operationName": "JobDetailQuery",
|
||||||
|
"variables": {
|
||||||
|
"jl": job_id,
|
||||||
|
"queryString": "q",
|
||||||
|
"pageTypeEnum": "SERP",
|
||||||
|
},
|
||||||
|
"query": """
|
||||||
|
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||||
|
jobview: jobView(
|
||||||
|
listingId: $jl
|
||||||
|
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||||
|
) {
|
||||||
|
job {
|
||||||
|
description
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
""",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
res = requests.post(url, json=body, headers=headers)
|
||||||
|
if res.status_code != 200:
|
||||||
|
return None
|
||||||
|
data = res.json()[0]
|
||||||
|
desc = data["data"]["jobview"]["job"]["description"]
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
desc = markdown_converter(desc)
|
||||||
|
return desc
|
||||||
|
|
||||||
|
def _get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||||
|
if not location or is_remote:
|
||||||
|
return "11047", "STATE" # remote options
|
||||||
|
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||||
|
res = self.session.get(url)
|
||||||
|
if res.status_code != 200:
|
||||||
|
if res.status_code == 429:
|
||||||
|
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||||
|
log.error(err)
|
||||||
|
return None, None
|
||||||
|
else:
|
||||||
|
err = f"Glassdoor response status code {res.status_code}"
|
||||||
|
err += f" - {res.text}"
|
||||||
|
log.error(f"Glassdoor response status code {res.status_code}")
|
||||||
|
return None, None
|
||||||
|
items = res.json()
|
||||||
|
|
||||||
|
if not items:
|
||||||
|
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||||
|
location_type = items[0]["locationType"]
|
||||||
|
if location_type == "C":
|
||||||
|
location_type = "CITY"
|
||||||
|
elif location_type == "S":
|
||||||
|
location_type = "STATE"
|
||||||
|
elif location_type == "N":
|
||||||
|
location_type = "COUNTRY"
|
||||||
|
return int(items[0]["locationId"]), location_type
|
||||||
|
|
||||||
|
def _add_payload(
|
||||||
|
self,
|
||||||
|
location_id: int,
|
||||||
|
location_type: str,
|
||||||
|
page_num: int,
|
||||||
|
cursor: str | None = None,
|
||||||
|
) -> str:
|
||||||
|
fromage = None
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
fromage = max(self.scraper_input.hours_old // 24, 1)
|
||||||
|
filter_params = []
|
||||||
|
if self.scraper_input.easy_apply:
|
||||||
|
filter_params.append({"filterKey": "applicationType", "values": "1"})
|
||||||
|
if fromage:
|
||||||
|
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
|
||||||
|
payload = {
|
||||||
|
"operationName": "JobSearchResultsQuery",
|
||||||
|
"variables": {
|
||||||
|
"excludeJobListingIds": [],
|
||||||
|
"filterParams": filter_params,
|
||||||
|
"keyword": self.scraper_input.search_term,
|
||||||
|
"numJobsToShow": 30,
|
||||||
|
"locationType": location_type,
|
||||||
|
"locationId": int(location_id),
|
||||||
|
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||||
|
"pageNumber": page_num,
|
||||||
|
"pageCursor": cursor,
|
||||||
|
"fromage": fromage,
|
||||||
|
"sort": "date",
|
||||||
|
},
|
||||||
|
"query": query_template,
|
||||||
|
}
|
||||||
|
if self.scraper_input.job_type:
|
||||||
|
payload["variables"]["filterParams"].append(
|
||||||
|
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||||
|
)
|
||||||
|
return json.dumps([payload])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||||
|
pay_period = data.get("payPeriod")
|
||||||
|
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||||
|
currency = data.get("payCurrency", "USD")
|
||||||
|
if not pay_period or not adjusted_pay:
|
||||||
|
return None
|
||||||
|
|
||||||
|
interval = None
|
||||||
|
if pay_period == "ANNUAL":
|
||||||
|
interval = CompensationInterval.YEARLY
|
||||||
|
elif pay_period:
|
||||||
|
interval = CompensationInterval.get_interval(pay_period)
|
||||||
|
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||||
|
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||||
|
return Compensation(
|
||||||
|
interval=interval,
|
||||||
|
min_amount=min_amount,
|
||||||
|
max_amount=max_amount,
|
||||||
|
currency=currency,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
|
for job_type in JobType:
|
||||||
|
if job_type_str in job_type.value:
|
||||||
|
return [job_type]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_location(location_name: str) -> Location | None:
|
||||||
|
if not location_name or location_name == "Remote":
|
||||||
|
return
|
||||||
|
city, _, state = location_name.partition(", ")
|
||||||
|
return Location(city=city, state=state)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_cursor_for_page(pagination_cursors, page_num):
|
||||||
|
for cursor_data in pagination_cursors:
|
||||||
|
if cursor_data["pageNumber"] == page_num:
|
||||||
|
return cursor_data["cursor"]
|
||||||
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
headers = {
|
||||||
|
"authority": "www.glassdoor.com",
|
||||||
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"apollographql-client-name": "job-search-next",
|
||||||
|
"apollographql-client-version": "4.65.5",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"origin": "https://www.glassdoor.com",
|
||||||
|
"referer": "https://www.glassdoor.com/",
|
||||||
|
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-platform": '"macOS"',
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
|
query_template = """
|
||||||
|
query JobSearchResultsQuery(
|
||||||
|
$excludeJobListingIds: [Long!],
|
||||||
|
$keyword: String,
|
||||||
|
$locationId: Int,
|
||||||
|
$locationType: LocationTypeEnum,
|
||||||
|
$numJobsToShow: Int!,
|
||||||
|
$pageCursor: String,
|
||||||
|
$pageNumber: Int,
|
||||||
|
$filterParams: [FilterParams],
|
||||||
|
$originalPageUrl: String,
|
||||||
|
$seoFriendlyUrlInput: String,
|
||||||
|
$parameterUrlInput: String,
|
||||||
|
$seoUrl: Boolean
|
||||||
|
) {
|
||||||
|
jobListings(
|
||||||
|
contextHolder: {
|
||||||
|
searchParams: {
|
||||||
|
excludeJobListingIds: $excludeJobListingIds,
|
||||||
|
keyword: $keyword,
|
||||||
|
locationId: $locationId,
|
||||||
|
locationType: $locationType,
|
||||||
|
numPerPage: $numJobsToShow,
|
||||||
|
pageCursor: $pageCursor,
|
||||||
|
pageNumber: $pageNumber,
|
||||||
|
filterParams: $filterParams,
|
||||||
|
originalPageUrl: $originalPageUrl,
|
||||||
|
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||||
|
parameterUrlInput: $parameterUrlInput,
|
||||||
|
seoUrl: $seoUrl,
|
||||||
|
searchType: SR
|
||||||
|
}
|
||||||
|
}
|
||||||
|
) {
|
||||||
|
companyFilterOptions {
|
||||||
|
id
|
||||||
|
shortName
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
filterOptions
|
||||||
|
indeedCtk
|
||||||
|
jobListings {
|
||||||
|
...JobView
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobListingSeoLinks {
|
||||||
|
linkItems {
|
||||||
|
position
|
||||||
|
url
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobSearchTrackingKey
|
||||||
|
jobsPageSeoData {
|
||||||
|
pageMetaDescription
|
||||||
|
pageTitle
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
paginationCursors {
|
||||||
|
cursor
|
||||||
|
pageNumber
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
indexablePageForSeo
|
||||||
|
searchResultsMetadata {
|
||||||
|
searchCriteria {
|
||||||
|
implicitLocation {
|
||||||
|
id
|
||||||
|
localizedDisplayName
|
||||||
|
type
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
keyword
|
||||||
|
location {
|
||||||
|
id
|
||||||
|
shortName
|
||||||
|
localizedShortName
|
||||||
|
localizedDisplayName
|
||||||
|
type
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
helpCenterDomain
|
||||||
|
helpCenterLocale
|
||||||
|
jobSerpJobOutlook {
|
||||||
|
occupation
|
||||||
|
paragraph
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
showMachineReadableJobs
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
totalJobsCount
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment JobView on JobListingSearchResult {
|
||||||
|
jobview {
|
||||||
|
header {
|
||||||
|
adOrderId
|
||||||
|
advertiserType
|
||||||
|
adOrderSponsorshipLevel
|
||||||
|
ageInDays
|
||||||
|
divisionEmployerName
|
||||||
|
easyApply
|
||||||
|
employer {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
shortName
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
employerNameFromSearch
|
||||||
|
goc
|
||||||
|
gocConfidence
|
||||||
|
gocId
|
||||||
|
jobCountryId
|
||||||
|
jobLink
|
||||||
|
jobResultTrackingKey
|
||||||
|
jobTitleText
|
||||||
|
locationName
|
||||||
|
locationType
|
||||||
|
locId
|
||||||
|
needsCommission
|
||||||
|
payCurrency
|
||||||
|
payPeriod
|
||||||
|
payPeriodAdjustedPay {
|
||||||
|
p10
|
||||||
|
p50
|
||||||
|
p90
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
rating
|
||||||
|
salarySource
|
||||||
|
savedJobId
|
||||||
|
sponsored
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
job {
|
||||||
|
description
|
||||||
|
importConfigId
|
||||||
|
jobTitleId
|
||||||
|
jobTitleText
|
||||||
|
listingId
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobListingAdminDetails {
|
||||||
|
cpcVal
|
||||||
|
importConfigId
|
||||||
|
jobListingId
|
||||||
|
jobSourceId
|
||||||
|
userEligibleForAdminJobDetails
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
overview {
|
||||||
|
shortName
|
||||||
|
squareLogoUrl
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||||
247
src/jobspy/scrapers/google/__init__.py
Normal file
247
src/jobspy/scrapers/google/__init__.py
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.google
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Google.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from typing import Tuple
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
from .constants import headers_jobs, headers_initial, async_param
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import extract_emails_from_text, create_logger, extract_job_type
|
||||||
|
from ..utils import (
|
||||||
|
create_session,
|
||||||
|
)
|
||||||
|
from ...jobs import (
|
||||||
|
JobPost,
|
||||||
|
JobResponse,
|
||||||
|
Location,
|
||||||
|
JobType,
|
||||||
|
)
|
||||||
|
|
||||||
|
log = create_logger("Google")
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleJobsScraper(Scraper):
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes Google Scraper with the Goodle jobs search url
|
||||||
|
"""
|
||||||
|
site = Site(Site.GOOGLE)
|
||||||
|
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||||
|
|
||||||
|
self.country = None
|
||||||
|
self.session = None
|
||||||
|
self.scraper_input = None
|
||||||
|
self.jobs_per_page = 10
|
||||||
|
self.seen_urls = set()
|
||||||
|
self.url = "https://www.google.com/search"
|
||||||
|
self.jobs_url = "https://www.google.com/async/callback:550"
|
||||||
|
|
||||||
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
|
"""
|
||||||
|
Scrapes Google for jobs with scraper_input criteria.
|
||||||
|
:param scraper_input: Information about job search criteria.
|
||||||
|
:return: JobResponse containing a list of jobs.
|
||||||
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||||
|
|
||||||
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||||
|
)
|
||||||
|
forward_cursor, job_list = self._get_initial_cursor_and_jobs()
|
||||||
|
if forward_cursor is None:
|
||||||
|
log.warning(
|
||||||
|
"initial cursor not found, try changing your query or there was at most 10 results"
|
||||||
|
)
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
|
page = 1
|
||||||
|
|
||||||
|
while (
|
||||||
|
len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset
|
||||||
|
and forward_cursor
|
||||||
|
):
|
||||||
|
log.info(
|
||||||
|
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
jobs, forward_cursor = self._get_jobs_next_page(forward_cursor)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"failed to get jobs on page: {page}, {e}")
|
||||||
|
break
|
||||||
|
if not jobs:
|
||||||
|
log.info(f"found no jobs on page: {page}")
|
||||||
|
break
|
||||||
|
job_list += jobs
|
||||||
|
page += 1
|
||||||
|
return JobResponse(
|
||||||
|
jobs=job_list[
|
||||||
|
scraper_input.offset : scraper_input.offset
|
||||||
|
+ scraper_input.results_wanted
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_initial_cursor_and_jobs(self) -> Tuple[str, list[JobPost]]:
|
||||||
|
"""Gets initial cursor and jobs to paginate through job listings"""
|
||||||
|
query = f"{self.scraper_input.search_term} jobs"
|
||||||
|
|
||||||
|
def get_time_range(hours_old):
|
||||||
|
if hours_old <= 24:
|
||||||
|
return "since yesterday"
|
||||||
|
elif hours_old <= 72:
|
||||||
|
return "in the last 3 days"
|
||||||
|
elif hours_old <= 168:
|
||||||
|
return "in the last week"
|
||||||
|
else:
|
||||||
|
return "in the last month"
|
||||||
|
|
||||||
|
job_type_mapping = {
|
||||||
|
JobType.FULL_TIME: "Full time",
|
||||||
|
JobType.PART_TIME: "Part time",
|
||||||
|
JobType.INTERNSHIP: "Internship",
|
||||||
|
JobType.CONTRACT: "Contract",
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.scraper_input.job_type in job_type_mapping:
|
||||||
|
query += f" {job_type_mapping[self.scraper_input.job_type]}"
|
||||||
|
|
||||||
|
if self.scraper_input.location:
|
||||||
|
query += f" near {self.scraper_input.location}"
|
||||||
|
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
time_filter = get_time_range(self.scraper_input.hours_old)
|
||||||
|
query += f" {time_filter}"
|
||||||
|
|
||||||
|
if self.scraper_input.is_remote:
|
||||||
|
query += " remote"
|
||||||
|
|
||||||
|
if self.scraper_input.google_search_term:
|
||||||
|
query = self.scraper_input.google_search_term
|
||||||
|
|
||||||
|
params = {"q": query, "udm": "8"}
|
||||||
|
response = self.session.get(self.url, headers=headers_initial, params=params)
|
||||||
|
|
||||||
|
pattern_fc = r'<div jsname="Yust4d"[^>]+data-async-fc="([^"]+)"'
|
||||||
|
match_fc = re.search(pattern_fc, response.text)
|
||||||
|
data_async_fc = match_fc.group(1) if match_fc else None
|
||||||
|
jobs_raw = self._find_job_info_initial_page(response.text)
|
||||||
|
jobs = []
|
||||||
|
for job_raw in jobs_raw:
|
||||||
|
job_post = self._parse_job(job_raw)
|
||||||
|
if job_post:
|
||||||
|
jobs.append(job_post)
|
||||||
|
return data_async_fc, jobs
|
||||||
|
|
||||||
|
def _get_jobs_next_page(self, forward_cursor: str) -> Tuple[list[JobPost], str]:
|
||||||
|
params = {"fc": [forward_cursor], "fcv": ["3"], "async": [async_param]}
|
||||||
|
response = self.session.get(self.jobs_url, headers=headers_jobs, params=params)
|
||||||
|
return self._parse_jobs(response.text)
|
||||||
|
|
||||||
|
def _parse_jobs(self, job_data: str) -> Tuple[list[JobPost], str]:
|
||||||
|
"""
|
||||||
|
Parses jobs on a page with next page cursor
|
||||||
|
"""
|
||||||
|
start_idx = job_data.find("[[[")
|
||||||
|
end_idx = job_data.rindex("]]]") + 3
|
||||||
|
s = job_data[start_idx:end_idx]
|
||||||
|
parsed = json.loads(s)[0]
|
||||||
|
|
||||||
|
pattern_fc = r'data-async-fc="([^"]+)"'
|
||||||
|
match_fc = re.search(pattern_fc, job_data)
|
||||||
|
data_async_fc = match_fc.group(1) if match_fc else None
|
||||||
|
jobs_on_page = []
|
||||||
|
for array in parsed:
|
||||||
|
_, job_data = array
|
||||||
|
if not job_data.startswith("[[["):
|
||||||
|
continue
|
||||||
|
job_d = json.loads(job_data)
|
||||||
|
|
||||||
|
job_info = self._find_job_info(job_d)
|
||||||
|
job_post = self._parse_job(job_info)
|
||||||
|
if job_post:
|
||||||
|
jobs_on_page.append(job_post)
|
||||||
|
return jobs_on_page, data_async_fc
|
||||||
|
|
||||||
|
def _parse_job(self, job_info: list):
|
||||||
|
job_url = job_info[3][0][0] if job_info[3] and job_info[3][0] else None
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
|
||||||
|
title = job_info[0]
|
||||||
|
company_name = job_info[1]
|
||||||
|
location = city = job_info[2]
|
||||||
|
state = country = date_posted = None
|
||||||
|
if location and "," in location:
|
||||||
|
city, state, *country = [*map(lambda x: x.strip(), location.split(","))]
|
||||||
|
|
||||||
|
days_ago_str = job_info[12]
|
||||||
|
if type(days_ago_str) == str:
|
||||||
|
match = re.search(r"\d+", days_ago_str)
|
||||||
|
days_ago = int(match.group()) if match else None
|
||||||
|
date_posted = (datetime.now() - timedelta(days=days_ago)).date()
|
||||||
|
|
||||||
|
description = job_info[19]
|
||||||
|
|
||||||
|
job_post = JobPost(
|
||||||
|
id=f"go-{job_info[28]}",
|
||||||
|
title=title,
|
||||||
|
company_name=company_name,
|
||||||
|
location=Location(
|
||||||
|
city=city, state=state, country=country[0] if country else None
|
||||||
|
),
|
||||||
|
job_url=job_url,
|
||||||
|
date_posted=date_posted,
|
||||||
|
is_remote="remote" in description.lower() or "wfh" in description.lower(),
|
||||||
|
description=description,
|
||||||
|
emails=extract_emails_from_text(description),
|
||||||
|
job_type=extract_job_type(description),
|
||||||
|
)
|
||||||
|
return job_post
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _find_job_info(jobs_data: list | dict) -> list | None:
|
||||||
|
"""Iterates through the JSON data to find the job listings"""
|
||||||
|
if isinstance(jobs_data, dict):
|
||||||
|
for key, value in jobs_data.items():
|
||||||
|
if key == "520084652" and isinstance(value, list):
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
result = GoogleJobsScraper._find_job_info(value)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
elif isinstance(jobs_data, list):
|
||||||
|
for item in jobs_data:
|
||||||
|
result = GoogleJobsScraper._find_job_info(item)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _find_job_info_initial_page(html_text: str):
|
||||||
|
pattern = f'520084652":(' + r"\[.*?\]\s*])\s*}\s*]\s*]\s*]\s*]\s*]"
|
||||||
|
results = []
|
||||||
|
matches = re.finditer(pattern, html_text)
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
for match in matches:
|
||||||
|
try:
|
||||||
|
parsed_data = json.loads(match.group(1))
|
||||||
|
results.append(parsed_data)
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
log.error(f"Failed to parse match: {str(e)}")
|
||||||
|
results.append({"raw_match": match.group(0), "error": str(e)})
|
||||||
|
return results
|
||||||
52
src/jobspy/scrapers/google/constants.py
Normal file
52
src/jobspy/scrapers/google/constants.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
headers_initial = {
|
||||||
|
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"priority": "u=0, i",
|
||||||
|
"referer": "https://www.google.com/",
|
||||||
|
"sec-ch-prefers-color-scheme": "dark",
|
||||||
|
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||||
|
"sec-ch-ua-arch": '"arm"',
|
||||||
|
"sec-ch-ua-bitness": '"64"',
|
||||||
|
"sec-ch-ua-form-factors": '"Desktop"',
|
||||||
|
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||||
|
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-model": '""',
|
||||||
|
"sec-ch-ua-platform": '"macOS"',
|
||||||
|
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||||
|
"sec-ch-ua-wow64": "?0",
|
||||||
|
"sec-fetch-dest": "document",
|
||||||
|
"sec-fetch-mode": "navigate",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"sec-fetch-user": "?1",
|
||||||
|
"upgrade-insecure-requests": "1",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||||
|
"x-browser-channel": "stable",
|
||||||
|
"x-browser-copyright": "Copyright 2024 Google LLC. All rights reserved.",
|
||||||
|
"x-browser-year": "2024",
|
||||||
|
}
|
||||||
|
|
||||||
|
headers_jobs = {
|
||||||
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"priority": "u=1, i",
|
||||||
|
"referer": "https://www.google.com/",
|
||||||
|
"sec-ch-prefers-color-scheme": "dark",
|
||||||
|
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||||
|
"sec-ch-ua-arch": '"arm"',
|
||||||
|
"sec-ch-ua-bitness": '"64"',
|
||||||
|
"sec-ch-ua-form-factors": '"Desktop"',
|
||||||
|
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||||
|
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-model": '""',
|
||||||
|
"sec-ch-ua-platform": '"macOS"',
|
||||||
|
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||||
|
"sec-ch-ua-wow64": "?0",
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
|
|
||||||
|
async_param = "_basejs:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/am=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAACAAAoICAAAAAAAKMAfAAAAIAQAAAAAAAAAAAAACCAAAEJDAAACAAAAAGABAIAAARBAAABAAAAAgAgQAABAASKAfv8JAAABAAAAAAwAQAQACQAAAAAAcAEAQABoCAAAABAAAIABAACAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAACAQADoBwAAAAAAAAAAAAAQBAAAAATQAAoACOAHAAAAAAAAAQAAAIIAAAA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/dg=0/br=1/rs=ACT90oGxMeaFMCopIHq5tuQM-6_3M_VMjQ,_basecss:/xjs/_/ss/k=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAIAIAIAoEwCAADIC8AfsgEAawwAPkAAjgoAGAAAAAAAAEADAAAAAAIgAECHAAAAAAAAAAABAQAggAARQAAAQCEAAAAAIAAAABgAAAAAIAQIACCAAfB-AAFIQABoCEA_CgEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAAAAQEAAABAgAMCPAAA4AoE2BAEAggSAAIoAQAAAAAgAAAAACCAQAAAxEwA_ZAACAAAAAAAAAAkAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAQAEAAAAAAAAAAAAAAAAAAAAAQA/br=1/rs=ACT90oGZc36t3uUQkj0srnIvvbHjO2hgyg,_basecomb:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/ck=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAKAIAoIqEwCAADIK8AfsgEAawwAPkAAjgoAGAAACCAAAEJDAAACAAIgAGCHAIAAARBAAABBAQAggAgRQABAQSOAfv8JIAABABgAAAwAYAQICSCAAfB-cAFIQABoCEA_ChEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAACAQEDoBxAgAMCPAAA4AoE2BAEAggTQAIoASOAHAAgAAAAACSAQAIIxEwA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/d=1/ed=1/dg=0/br=1/ujg=1/rs=ACT90oFNLTjPzD_OAqhhtXwe2pg1T3WpBg,_fmt:prog,_id:fc_5FwaZ86OKsfdwN4P4La3yA4_2"
|
||||||
@@ -1,17 +1,25 @@
|
|||||||
import re
|
"""
|
||||||
|
jobspy.scrapers.indeed
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Indeed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import io
|
from typing import Tuple
|
||||||
import json
|
|
||||||
import traceback
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import tls_client
|
|
||||||
import urllib.parse
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from bs4.element import Tag
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
|
from .constants import job_search_query, api_headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
get_enum_from_job_type,
|
||||||
|
markdown_converter,
|
||||||
|
create_session,
|
||||||
|
create_logger,
|
||||||
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Compensation,
|
Compensation,
|
||||||
@@ -19,143 +27,32 @@ from ...jobs import (
|
|||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
from .. import Scraper, ScraperInput, Site, Country, StatusException
|
|
||||||
|
|
||||||
|
log = create_logger("Indeed")
|
||||||
class ParsingException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class IndeedScraper(Scraper):
|
class IndeedScraper(Scraper):
|
||||||
def __init__(self):
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes IndeedScraper with the Indeed job search url
|
Initializes IndeedScraper with the Indeed API url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.INDEED)
|
super().__init__(Site.INDEED, proxies=proxies)
|
||||||
super().__init__(site)
|
|
||||||
|
|
||||||
self.jobs_per_page = 15
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=ca_cert, is_tls=False
|
||||||
|
)
|
||||||
|
self.scraper_input = None
|
||||||
|
self.jobs_per_page = 100
|
||||||
|
self.num_workers = 10
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
|
self.headers = None
|
||||||
def scrape_page(
|
self.api_country_code = None
|
||||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
self.base_url = None
|
||||||
) -> tuple[list[JobPost], int]:
|
self.api_url = "https://apis.indeed.com/graphql"
|
||||||
"""
|
|
||||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
|
||||||
:param scraper_input:
|
|
||||||
:param page:
|
|
||||||
:param session:
|
|
||||||
:return: jobs found on page, total number of jobs found for search
|
|
||||||
"""
|
|
||||||
self.country = scraper_input.country
|
|
||||||
domain = self.country.domain_value
|
|
||||||
self.url = f"https://{domain}.indeed.com"
|
|
||||||
|
|
||||||
job_list = []
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"q": scraper_input.search_term,
|
|
||||||
"l": scraper_input.location,
|
|
||||||
"filter": 0,
|
|
||||||
"start": 0 + page * 10,
|
|
||||||
}
|
|
||||||
if scraper_input.distance:
|
|
||||||
params["radius"] = scraper_input.distance
|
|
||||||
|
|
||||||
sc_values = []
|
|
||||||
if scraper_input.is_remote:
|
|
||||||
sc_values.append("attr(DSQF7)")
|
|
||||||
if scraper_input.job_type:
|
|
||||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
|
||||||
|
|
||||||
if sc_values:
|
|
||||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
|
||||||
response = session.get(self.url + "/jobs", params=params, allow_redirects=True)
|
|
||||||
# print(response.status_code)
|
|
||||||
|
|
||||||
if response.status_code not in range(200, 400):
|
|
||||||
raise StatusException(response.status_code)
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.content, "html.parser")
|
|
||||||
with open("text2.html", "w", encoding="utf-8") as f:
|
|
||||||
f.write(str(soup))
|
|
||||||
if "did not match any jobs" in str(soup):
|
|
||||||
raise ParsingException("Search did not match any jobs")
|
|
||||||
|
|
||||||
jobs = IndeedScraper.parse_jobs(
|
|
||||||
soup
|
|
||||||
) #: can raise exception, handled by main scrape function
|
|
||||||
total_num_jobs = IndeedScraper.total_jobs(soup)
|
|
||||||
|
|
||||||
if (
|
|
||||||
not jobs.get("metaData", {})
|
|
||||||
.get("mosaicProviderJobCardsModel", {})
|
|
||||||
.get("results")
|
|
||||||
):
|
|
||||||
raise Exception("No jobs found.")
|
|
||||||
|
|
||||||
def process_job(job) -> Optional[JobPost]:
|
|
||||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
|
||||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
extracted_salary = job.get("extractedSalary")
|
|
||||||
compensation = None
|
|
||||||
if extracted_salary:
|
|
||||||
salary_snippet = job.get("salarySnippet")
|
|
||||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
|
||||||
interval = (extracted_salary.get("type"),)
|
|
||||||
if isinstance(interval, tuple):
|
|
||||||
interval = interval[0]
|
|
||||||
|
|
||||||
interval = interval.upper()
|
|
||||||
if interval in CompensationInterval.__members__:
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=CompensationInterval[interval],
|
|
||||||
min_amount=int(extracted_salary.get("max")),
|
|
||||||
max_amount=int(extracted_salary.get("min")),
|
|
||||||
currency=currency,
|
|
||||||
)
|
|
||||||
|
|
||||||
job_type = IndeedScraper.get_job_type(job)
|
|
||||||
timestamp_seconds = job["pubDate"] / 1000
|
|
||||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
|
||||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
|
||||||
|
|
||||||
description = self.get_description(job_url, session)
|
|
||||||
with io.StringIO(job["snippet"]) as f:
|
|
||||||
soup = BeautifulSoup(f, "html.parser")
|
|
||||||
li_elements = soup.find_all("li")
|
|
||||||
if description is None and li_elements:
|
|
||||||
description = " ".join(li.text for li in li_elements)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=job["normTitle"],
|
|
||||||
description=description,
|
|
||||||
company_name=job["company"],
|
|
||||||
location=Location(
|
|
||||||
city=job.get("jobLocationCity"),
|
|
||||||
state=job.get("jobLocationState"),
|
|
||||||
country=self.country,
|
|
||||||
),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=compensation,
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url_client,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
|
||||||
job_results: list[Future] = [
|
|
||||||
executor.submit(process_job, job)
|
|
||||||
for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
|
||||||
|
|
||||||
return job_list, total_num_jobs
|
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -163,167 +60,291 @@ class IndeedScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
session = tls_client.Session(
|
self.scraper_input = scraper_input
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||||
|
self.base_url = f"https://{domain}.indeed.com"
|
||||||
|
self.headers = api_headers.copy()
|
||||||
|
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||||
|
job_list = []
|
||||||
|
page = 1
|
||||||
|
|
||||||
|
cursor = None
|
||||||
|
|
||||||
|
while len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset:
|
||||||
|
log.info(
|
||||||
|
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||||
|
)
|
||||||
|
jobs, cursor = self._scrape_page(cursor)
|
||||||
|
if not jobs:
|
||||||
|
log.info(f"found no jobs on page: {page}")
|
||||||
|
break
|
||||||
|
job_list += jobs
|
||||||
|
page += 1
|
||||||
|
return JobResponse(
|
||||||
|
jobs=job_list[
|
||||||
|
scraper_input.offset : scraper_input.offset
|
||||||
|
+ scraper_input.results_wanted
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
pages_to_process = (
|
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
#: get first page to initialize session
|
|
||||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
|
||||||
for page in range(1, pages_to_process + 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
|
||||||
jobs, _ = future.result()
|
|
||||||
|
|
||||||
job_list += jobs
|
|
||||||
except StatusException as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed returned status code {e.status_code}",
|
|
||||||
)
|
|
||||||
|
|
||||||
except ParsingException as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed failed to parse response: {e}",
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"LinkedIn failed to scrape: {e}\n{traceback.format_exc()}")
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed failed to scrape: {e}",
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(job_list) > scraper_input.results_wanted:
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
|
||||||
|
|
||||||
job_response = JobResponse(
|
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=total_results,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> str:
|
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||||
:param job_page_url:
|
:param cursor:
|
||||||
:param session:
|
:return: jobs found on page, next page cursor
|
||||||
:return: description
|
|
||||||
"""
|
"""
|
||||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
jobs = []
|
||||||
params = urllib.parse.parse_qs(parsed_url.query)
|
new_cursor = None
|
||||||
jk_value = params.get("jk", [None])[0]
|
filters = self._build_filters()
|
||||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
search_term = (
|
||||||
|
self.scraper_input.search_term.replace('"', '\\"')
|
||||||
try:
|
if self.scraper_input.search_term
|
||||||
response = session.get(
|
else ""
|
||||||
formatted_url, allow_redirects=True, timeout_seconds=5
|
)
|
||||||
|
query = job_search_query.format(
|
||||||
|
what=(f'what: "{search_term}"' if search_term else ""),
|
||||||
|
location=(
|
||||||
|
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||||
|
if self.scraper_input.location
|
||||||
|
else ""
|
||||||
|
),
|
||||||
|
dateOnIndeed=self.scraper_input.hours_old,
|
||||||
|
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||||
|
filters=filters,
|
||||||
|
)
|
||||||
|
payload = {
|
||||||
|
"query": query,
|
||||||
|
}
|
||||||
|
api_headers_temp = api_headers.copy()
|
||||||
|
api_headers_temp["indeed-co"] = self.api_country_code
|
||||||
|
response = self.session.post(
|
||||||
|
self.api_url,
|
||||||
|
headers=api_headers_temp,
|
||||||
|
json=payload,
|
||||||
|
timeout=10,
|
||||||
|
verify=False,
|
||||||
|
)
|
||||||
|
if not response.ok:
|
||||||
|
log.info(
|
||||||
|
f"responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||||
)
|
)
|
||||||
except requests.exceptions.Timeout:
|
return jobs, new_cursor
|
||||||
print("The request timed out.")
|
data = response.json()
|
||||||
return None
|
jobs = data["data"]["jobSearch"]["results"]
|
||||||
|
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||||
|
|
||||||
if response.status_code not in range(200, 400):
|
job_list = []
|
||||||
print("status code not in range")
|
for job in jobs:
|
||||||
return None
|
processed_job = self._process_job(job["job"])
|
||||||
|
if processed_job:
|
||||||
|
job_list.append(processed_job)
|
||||||
|
|
||||||
raw_description = response.json()["body"]["jobInfoWrapperModel"][
|
return job_list, new_cursor
|
||||||
"jobInfoModel"
|
|
||||||
]["sanitizedJobDescription"]
|
def _build_filters(self):
|
||||||
with io.StringIO(raw_description) as f:
|
"""
|
||||||
soup = BeautifulSoup(f, "html.parser")
|
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||||
text_content = " ".join(soup.get_text().split()).strip()
|
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||||
return text_content
|
"""
|
||||||
|
filters_str = ""
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
filters_str = """
|
||||||
|
filters: {{
|
||||||
|
date: {{
|
||||||
|
field: "dateOnIndeed",
|
||||||
|
start: "{start}h"
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
""".format(
|
||||||
|
start=self.scraper_input.hours_old
|
||||||
|
)
|
||||||
|
elif self.scraper_input.easy_apply:
|
||||||
|
filters_str = """
|
||||||
|
filters: {
|
||||||
|
keyword: {
|
||||||
|
field: "indeedApplyScope",
|
||||||
|
keys: ["DESKTOP"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||||
|
job_type_key_mapping = {
|
||||||
|
JobType.FULL_TIME: "CF3CP",
|
||||||
|
JobType.PART_TIME: "75GKK",
|
||||||
|
JobType.CONTRACT: "NJXCK",
|
||||||
|
JobType.INTERNSHIP: "VDTG7",
|
||||||
|
}
|
||||||
|
|
||||||
|
keys = []
|
||||||
|
if self.scraper_input.job_type:
|
||||||
|
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||||
|
keys.append(key)
|
||||||
|
|
||||||
|
if self.scraper_input.is_remote:
|
||||||
|
keys.append("DSQF7")
|
||||||
|
|
||||||
|
if keys:
|
||||||
|
keys_str = '", "'.join(keys)
|
||||||
|
filters_str = f"""
|
||||||
|
filters: {{
|
||||||
|
composite: {{
|
||||||
|
filters: [{{
|
||||||
|
keyword: {{
|
||||||
|
field: "attributes",
|
||||||
|
keys: ["{keys_str}"]
|
||||||
|
}}
|
||||||
|
}}]
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
return filters_str
|
||||||
|
|
||||||
|
def _process_job(self, job: dict) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Parses the job dict into JobPost model
|
||||||
|
:param job: dict to parse
|
||||||
|
:return: JobPost if it's a new job
|
||||||
|
"""
|
||||||
|
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
description = job["description"]["html"]
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description = markdown_converter(description)
|
||||||
|
|
||||||
|
job_type = self._get_job_type(job["attributes"])
|
||||||
|
timestamp_seconds = job["datePublished"] / 1000
|
||||||
|
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||||
|
employer = job["employer"].get("dossier") if job["employer"] else None
|
||||||
|
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||||
|
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||||
|
return JobPost(
|
||||||
|
id=f'in-{job["key"]}',
|
||||||
|
title=job["title"],
|
||||||
|
description=description,
|
||||||
|
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||||
|
company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
|
||||||
|
company_url_direct=(
|
||||||
|
employer["links"]["corporateWebsite"] if employer else None
|
||||||
|
),
|
||||||
|
location=Location(
|
||||||
|
city=job.get("location", {}).get("city"),
|
||||||
|
state=job.get("location", {}).get("admin1Code"),
|
||||||
|
country=job.get("location", {}).get("countryCode"),
|
||||||
|
),
|
||||||
|
job_type=job_type,
|
||||||
|
compensation=self._get_compensation(job["compensation"]),
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=job_url,
|
||||||
|
job_url_direct=(
|
||||||
|
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
|
||||||
|
),
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
is_remote=self._is_job_remote(job, description),
|
||||||
|
company_addresses=(
|
||||||
|
employer_details["addresses"][0]
|
||||||
|
if employer_details.get("addresses")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
company_industry=(
|
||||||
|
employer_details["industry"]
|
||||||
|
.replace("Iv1", "")
|
||||||
|
.replace("_", " ")
|
||||||
|
.title()
|
||||||
|
.strip()
|
||||||
|
if employer_details.get("industry")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||||
|
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||||
|
company_description=employer_details.get("briefDescription"),
|
||||||
|
company_logo=(
|
||||||
|
employer["images"].get("squareLogoUrl")
|
||||||
|
if employer and employer.get("images")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_type(job: dict) -> Optional[JobType]:
|
def _get_job_type(attributes: list) -> list[JobType]:
|
||||||
"""
|
"""
|
||||||
Parses the job to get JobTypeIndeed
|
Parses the attributes to get list of job types
|
||||||
|
:param attributes:
|
||||||
|
:return: list of JobType
|
||||||
|
"""
|
||||||
|
job_types: list[JobType] = []
|
||||||
|
for attribute in attributes:
|
||||||
|
job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
|
||||||
|
job_type = get_enum_from_job_type(job_type_str)
|
||||||
|
if job_type:
|
||||||
|
job_types.append(job_type)
|
||||||
|
return job_types
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_compensation(compensation: dict) -> Compensation | None:
|
||||||
|
"""
|
||||||
|
Parses the job to get compensation
|
||||||
:param job:
|
:param job:
|
||||||
:return:
|
:return: compensation object
|
||||||
"""
|
"""
|
||||||
for taxonomy in job["taxonomyAttributes"]:
|
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||||
if taxonomy["label"] == "job-types":
|
|
||||||
if len(taxonomy["attributes"]) > 0:
|
|
||||||
label = taxonomy["attributes"][0].get("label")
|
|
||||||
if label:
|
|
||||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
|
||||||
# print(f"Debug: job_type_str = {job_type_str}")
|
|
||||||
return IndeedScraper.get_enum_from_value(job_type_str)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_enum_from_value(value_str):
|
|
||||||
for job_type in JobType:
|
|
||||||
if value_str in job_type.value:
|
|
||||||
return job_type
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
|
||||||
"""
|
|
||||||
Parses the jobs from the soup object
|
|
||||||
:param soup:
|
|
||||||
:return: jobs
|
|
||||||
"""
|
|
||||||
|
|
||||||
def find_mosaic_script() -> Optional[Tag]:
|
|
||||||
"""
|
|
||||||
Finds jobcards script tag
|
|
||||||
:return: script_tag
|
|
||||||
"""
|
|
||||||
script_tags = soup.find_all("script")
|
|
||||||
|
|
||||||
for tag in script_tags:
|
|
||||||
if (
|
|
||||||
tag.string
|
|
||||||
and "mosaic.providerData" in tag.string
|
|
||||||
and "mosaic-provider-jobcards" in tag.string
|
|
||||||
):
|
|
||||||
return tag
|
|
||||||
return None
|
return None
|
||||||
|
comp = (
|
||||||
script_tag = find_mosaic_script()
|
compensation["baseSalary"]
|
||||||
|
if compensation["baseSalary"]
|
||||||
if script_tag:
|
else compensation["estimated"]["baseSalary"]
|
||||||
script_str = script_tag.string
|
)
|
||||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
if not comp:
|
||||||
p = re.compile(pattern, re.DOTALL)
|
return None
|
||||||
m = p.search(script_str)
|
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
||||||
if m:
|
if not interval:
|
||||||
jobs = json.loads(m.group(1).strip())
|
return None
|
||||||
return jobs
|
min_range = comp["range"].get("min")
|
||||||
else:
|
max_range = comp["range"].get("max")
|
||||||
raise ParsingException("Could not find mosaic provider job cards data")
|
return Compensation(
|
||||||
else:
|
interval=interval,
|
||||||
raise ParsingException(
|
min_amount=int(min_range) if min_range is not None else None,
|
||||||
"Could not find a script tag containing mosaic provider data"
|
max_amount=int(max_range) if max_range is not None else None,
|
||||||
)
|
currency=(
|
||||||
|
compensation["estimated"]["currencyCode"]
|
||||||
|
if compensation["estimated"]
|
||||||
|
else compensation["currencyCode"]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def total_jobs(soup: BeautifulSoup) -> int:
|
def _is_job_remote(job: dict, description: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Parses the total jobs for that search from soup object
|
Searches the description, location, and attributes to check if job is remote
|
||||||
:param soup:
|
|
||||||
:return: total_num_jobs
|
|
||||||
"""
|
"""
|
||||||
script = soup.find("script", string=lambda t: t and "window._initialData" in t)
|
remote_keywords = ["remote", "work from home", "wfh"]
|
||||||
|
is_remote_in_attributes = any(
|
||||||
|
any(keyword in attr["label"].lower() for keyword in remote_keywords)
|
||||||
|
for attr in job["attributes"]
|
||||||
|
)
|
||||||
|
is_remote_in_description = any(
|
||||||
|
keyword in description.lower() for keyword in remote_keywords
|
||||||
|
)
|
||||||
|
is_remote_in_location = any(
|
||||||
|
keyword in job["location"]["formatted"]["long"].lower()
|
||||||
|
for keyword in remote_keywords
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
is_remote_in_attributes or is_remote_in_description or is_remote_in_location
|
||||||
|
)
|
||||||
|
|
||||||
pattern = re.compile(r"window._initialData\s*=\s*({.*})\s*;", re.DOTALL)
|
@staticmethod
|
||||||
match = pattern.search(script.string)
|
def _get_compensation_interval(interval: str) -> CompensationInterval:
|
||||||
total_num_jobs = 0
|
interval_mapping = {
|
||||||
if match:
|
"DAY": "DAILY",
|
||||||
json_str = match.group(1)
|
"YEAR": "YEARLY",
|
||||||
data = json.loads(json_str)
|
"HOUR": "HOURLY",
|
||||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
"WEEK": "WEEKLY",
|
||||||
return total_num_jobs
|
"MONTH": "MONTHLY",
|
||||||
|
}
|
||||||
|
mapped_interval = interval_mapping.get(interval.upper(), None)
|
||||||
|
if mapped_interval and mapped_interval in CompensationInterval.__members__:
|
||||||
|
return CompensationInterval[mapped_interval]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported interval: {interval}")
|
||||||
|
|||||||
109
src/jobspy/scrapers/indeed/constants.py
Normal file
109
src/jobspy/scrapers/indeed/constants.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
job_search_query = """
|
||||||
|
query GetJobData {{
|
||||||
|
jobSearch(
|
||||||
|
{what}
|
||||||
|
{location}
|
||||||
|
limit: 100
|
||||||
|
{cursor}
|
||||||
|
sort: RELEVANCE
|
||||||
|
{filters}
|
||||||
|
) {{
|
||||||
|
pageInfo {{
|
||||||
|
nextCursor
|
||||||
|
}}
|
||||||
|
results {{
|
||||||
|
trackingKey
|
||||||
|
job {{
|
||||||
|
source {{
|
||||||
|
name
|
||||||
|
}}
|
||||||
|
key
|
||||||
|
title
|
||||||
|
datePublished
|
||||||
|
dateOnIndeed
|
||||||
|
description {{
|
||||||
|
html
|
||||||
|
}}
|
||||||
|
location {{
|
||||||
|
countryName
|
||||||
|
countryCode
|
||||||
|
admin1Code
|
||||||
|
city
|
||||||
|
postalCode
|
||||||
|
streetAddress
|
||||||
|
formatted {{
|
||||||
|
short
|
||||||
|
long
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
compensation {{
|
||||||
|
estimated {{
|
||||||
|
currencyCode
|
||||||
|
baseSalary {{
|
||||||
|
unitOfWork
|
||||||
|
range {{
|
||||||
|
... on Range {{
|
||||||
|
min
|
||||||
|
max
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
baseSalary {{
|
||||||
|
unitOfWork
|
||||||
|
range {{
|
||||||
|
... on Range {{
|
||||||
|
min
|
||||||
|
max
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
currencyCode
|
||||||
|
}}
|
||||||
|
attributes {{
|
||||||
|
key
|
||||||
|
label
|
||||||
|
}}
|
||||||
|
employer {{
|
||||||
|
relativeCompanyPageUrl
|
||||||
|
name
|
||||||
|
dossier {{
|
||||||
|
employerDetails {{
|
||||||
|
addresses
|
||||||
|
industry
|
||||||
|
employeesLocalizedLabel
|
||||||
|
revenueLocalizedLabel
|
||||||
|
briefDescription
|
||||||
|
ceoName
|
||||||
|
ceoPhotoUrl
|
||||||
|
}}
|
||||||
|
images {{
|
||||||
|
headerImageUrl
|
||||||
|
squareLogoUrl
|
||||||
|
}}
|
||||||
|
links {{
|
||||||
|
corporateWebsite
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
recruit {{
|
||||||
|
viewJobUrl
|
||||||
|
detailedSalary
|
||||||
|
workSchedule
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
|
||||||
|
api_headers = {
|
||||||
|
"Host": "apis.indeed.com",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||||
|
"accept": "application/json",
|
||||||
|
"indeed-locale": "en-US",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||||
|
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||||
|
}
|
||||||
@@ -1,29 +1,71 @@
|
|||||||
from typing import Optional, Tuple
|
"""
|
||||||
|
jobspy.scrapers.linkedin
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape LinkedIn.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import regex as re
|
||||||
|
from typing import Optional
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from bs4.element import Tag
|
from bs4.element import Tag
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from urllib.parse import urlparse, urlunparse, unquote
|
||||||
|
|
||||||
|
from .constants import headers
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..exceptions import LinkedInException
|
||||||
|
from ..utils import create_session, remove_attributes, create_logger
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
|
Country,
|
||||||
Compensation,
|
Compensation,
|
||||||
CompensationInterval,
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
get_enum_from_job_type,
|
||||||
|
currency_parser,
|
||||||
|
markdown_converter,
|
||||||
|
)
|
||||||
|
|
||||||
|
log = create_logger("LinkedIn")
|
||||||
|
|
||||||
|
|
||||||
class LinkedInScraper(Scraper):
|
class LinkedInScraper(Scraper):
|
||||||
def __init__(self):
|
base_url = "https://www.linkedin.com"
|
||||||
|
delay = 3
|
||||||
|
band_delay = 4
|
||||||
|
jobs_per_page = 25
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes LinkedInScraper with the LinkedIn job search url
|
Initializes LinkedInScraper with the LinkedIn job search url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.LINKEDIN)
|
super().__init__(Site.LINKEDIN, proxies=proxies, ca_cert=ca_cert)
|
||||||
self.url = "https://www.linkedin.com"
|
self.session = create_session(
|
||||||
super().__init__(site)
|
proxies=self.proxies,
|
||||||
|
ca_cert=ca_cert,
|
||||||
|
is_tls=False,
|
||||||
|
has_retry=True,
|
||||||
|
delay=5,
|
||||||
|
clear_cookies=True,
|
||||||
|
)
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
self.scraper_input = None
|
||||||
|
self.country = "worldwide"
|
||||||
|
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -31,189 +73,230 @@ class LinkedInScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
self.country = "worldwide"
|
self.scraper_input = scraper_input
|
||||||
job_list: list[JobPost] = []
|
job_list: list[JobPost] = []
|
||||||
seen_urls = set()
|
seen_ids = set()
|
||||||
page, processed_jobs, job_count = 0, 0, 0
|
start = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||||
|
request_count = 0
|
||||||
def job_type_code(job_type):
|
seconds_old = (
|
||||||
mapping = {
|
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||||
JobType.FULL_TIME: "F",
|
)
|
||||||
JobType.PART_TIME: "P",
|
continue_search = (
|
||||||
JobType.INTERNSHIP: "I",
|
lambda: len(job_list) < scraper_input.results_wanted and start < 1000
|
||||||
JobType.CONTRACT: "C",
|
)
|
||||||
JobType.TEMPORARY: "T",
|
while continue_search():
|
||||||
}
|
request_count += 1
|
||||||
|
log.info(
|
||||||
return mapping.get(job_type, "")
|
f"search page: {request_count} / {math.ceil(scraper_input.results_wanted / 10)}"
|
||||||
|
)
|
||||||
with requests.Session() as session:
|
params = {
|
||||||
while len(job_list) < scraper_input.results_wanted:
|
"keywords": scraper_input.search_term,
|
||||||
params = {
|
"location": scraper_input.location,
|
||||||
"keywords": scraper_input.search_term,
|
"distance": scraper_input.distance,
|
||||||
"location": scraper_input.location,
|
"f_WT": 2 if scraper_input.is_remote else None,
|
||||||
"distance": scraper_input.distance,
|
"f_JT": (
|
||||||
"f_WT": 2 if scraper_input.is_remote else None,
|
self.job_type_code(scraper_input.job_type)
|
||||||
"f_JT": job_type_code(scraper_input.job_type)
|
|
||||||
if scraper_input.job_type
|
if scraper_input.job_type
|
||||||
else None,
|
else None
|
||||||
"pageNum": page,
|
),
|
||||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
"pageNum": 0,
|
||||||
}
|
"start": start,
|
||||||
|
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||||
|
"f_C": (
|
||||||
|
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||||
|
if scraper_input.linkedin_company_ids
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
}
|
||||||
|
if seconds_old is not None:
|
||||||
|
params["f_TPR"] = f"r{seconds_old}"
|
||||||
|
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
response = session.get(
|
try:
|
||||||
f"{self.url}/jobs/search", params=params, allow_redirects=True
|
response = self.session.get(
|
||||||
|
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||||
|
params=params,
|
||||||
|
timeout=10,
|
||||||
)
|
)
|
||||||
|
if response.status_code not in range(200, 400):
|
||||||
if response.status_code != 200:
|
if response.status_code == 429:
|
||||||
return JobResponse(
|
err = (
|
||||||
success=False,
|
f"429 Response - Blocked by LinkedIn for too many requests"
|
||||||
error=f"Response returned {response.status_code}",
|
)
|
||||||
)
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
|
||||||
|
|
||||||
if page == 0:
|
|
||||||
job_count_text = soup.find(
|
|
||||||
"span", class_="results-context-header__job-count"
|
|
||||||
).text
|
|
||||||
job_count = int("".join(filter(str.isdigit, job_count_text)))
|
|
||||||
|
|
||||||
for job_card in soup.find_all(
|
|
||||||
"div",
|
|
||||||
class_="base-card relative w-full hover:no-underline focus:no-underline base-card--link base-search-card base-search-card--link job-search-card",
|
|
||||||
):
|
|
||||||
processed_jobs += 1
|
|
||||||
data_entity_urn = job_card.get("data-entity-urn", "")
|
|
||||||
job_id = (
|
|
||||||
data_entity_urn.split(":")[-1] if data_entity_urn else "N/A"
|
|
||||||
)
|
|
||||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
|
||||||
if job_url in seen_urls:
|
|
||||||
continue
|
|
||||||
seen_urls.add(job_url)
|
|
||||||
job_info = job_card.find("div", class_="base-search-card__info")
|
|
||||||
if job_info is None:
|
|
||||||
continue
|
|
||||||
title_tag = job_info.find("h3", class_="base-search-card__title")
|
|
||||||
title = title_tag.text.strip() if title_tag else "N/A"
|
|
||||||
|
|
||||||
company_tag = job_info.find("a", class_="hidden-nested-link")
|
|
||||||
company = company_tag.text.strip() if company_tag else "N/A"
|
|
||||||
|
|
||||||
metadata_card = job_info.find(
|
|
||||||
"div", class_="base-search-card__metadata"
|
|
||||||
)
|
|
||||||
location: Location = self.get_location(metadata_card)
|
|
||||||
|
|
||||||
datetime_tag = metadata_card.find(
|
|
||||||
"time", class_="job-search-card__listdate"
|
|
||||||
)
|
|
||||||
description, job_type = LinkedInScraper.get_description(job_url)
|
|
||||||
if datetime_tag:
|
|
||||||
datetime_str = datetime_tag["datetime"]
|
|
||||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
|
||||||
else:
|
else:
|
||||||
date_posted = None
|
err = f"LinkedIn response status code {response.status_code}"
|
||||||
|
err += f" - {response.text}"
|
||||||
|
log.error(err)
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
except Exception as e:
|
||||||
|
if "Proxy responded with" in str(e):
|
||||||
|
log.error(f"LinkedIn: Bad proxy")
|
||||||
|
else:
|
||||||
|
log.error(f"LinkedIn: {str(e)}")
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
job_post = JobPost(
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
title=title,
|
job_cards = soup.find_all("div", class_="base-search-card")
|
||||||
description=description,
|
if len(job_cards) == 0:
|
||||||
company_name=company,
|
return JobResponse(jobs=job_list)
|
||||||
location=location,
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=Compensation(
|
|
||||||
interval=CompensationInterval.YEARLY, currency=None
|
|
||||||
),
|
|
||||||
)
|
|
||||||
job_list.append(job_post)
|
|
||||||
if (
|
|
||||||
len(job_list) >= scraper_input.results_wanted
|
|
||||||
or processed_jobs >= job_count
|
|
||||||
):
|
|
||||||
break
|
|
||||||
if (
|
|
||||||
len(job_list) >= scraper_input.results_wanted
|
|
||||||
or processed_jobs >= job_count
|
|
||||||
):
|
|
||||||
break
|
|
||||||
|
|
||||||
page += 1
|
for job_card in job_cards:
|
||||||
|
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||||
|
if href_tag and "href" in href_tag.attrs:
|
||||||
|
href = href_tag.attrs["href"].split("?")[0]
|
||||||
|
job_id = href.split("-")[-1]
|
||||||
|
|
||||||
|
if job_id in seen_ids:
|
||||||
|
continue
|
||||||
|
seen_ids.add(job_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
fetch_desc = scraper_input.linkedin_fetch_description
|
||||||
|
job_post = self._process_job(job_card, job_id, fetch_desc)
|
||||||
|
if job_post:
|
||||||
|
job_list.append(job_post)
|
||||||
|
if not continue_search():
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
raise LinkedInException(str(e))
|
||||||
|
|
||||||
|
if continue_search():
|
||||||
|
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||||
|
start += len(job_list)
|
||||||
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
job_response = JobResponse(
|
return JobResponse(jobs=job_list)
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=job_count,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
@staticmethod
|
def _process_job(
|
||||||
def get_description(job_page_url: str) -> Optional[str]:
|
self, job_card: Tag, job_id: str, full_descr: bool
|
||||||
|
) -> Optional[JobPost]:
|
||||||
|
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||||
|
|
||||||
|
compensation = None
|
||||||
|
if salary_tag:
|
||||||
|
salary_text = salary_tag.get_text(separator=" ").strip()
|
||||||
|
salary_values = [currency_parser(value) for value in salary_text.split("-")]
|
||||||
|
salary_min = salary_values[0]
|
||||||
|
salary_max = salary_values[1]
|
||||||
|
currency = salary_text[0] if salary_text[0] != "$" else "USD"
|
||||||
|
|
||||||
|
compensation = Compensation(
|
||||||
|
min_amount=int(salary_min),
|
||||||
|
max_amount=int(salary_max),
|
||||||
|
currency=currency,
|
||||||
|
)
|
||||||
|
|
||||||
|
title_tag = job_card.find("span", class_="sr-only")
|
||||||
|
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||||
|
|
||||||
|
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||||
|
company_a_tag = company_tag.find("a") if company_tag else None
|
||||||
|
company_url = (
|
||||||
|
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||||
|
if company_a_tag and company_a_tag.has_attr("href")
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||||
|
|
||||||
|
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||||
|
location = self._get_location(metadata_card)
|
||||||
|
|
||||||
|
datetime_tag = (
|
||||||
|
metadata_card.find("time", class_="job-search-card__listdate")
|
||||||
|
if metadata_card
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
date_posted = None
|
||||||
|
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||||
|
datetime_str = datetime_tag["datetime"]
|
||||||
|
try:
|
||||||
|
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||||
|
except:
|
||||||
|
date_posted = None
|
||||||
|
job_details = {}
|
||||||
|
if full_descr:
|
||||||
|
job_details = self._get_job_details(job_id)
|
||||||
|
|
||||||
|
return JobPost(
|
||||||
|
id=f"li-{job_id}",
|
||||||
|
title=title,
|
||||||
|
company_name=company,
|
||||||
|
company_url=company_url,
|
||||||
|
location=location,
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||||
|
compensation=compensation,
|
||||||
|
job_type=job_details.get("job_type"),
|
||||||
|
job_level=job_details.get("job_level", "").lower(),
|
||||||
|
company_industry=job_details.get("company_industry"),
|
||||||
|
description=job_details.get("description"),
|
||||||
|
job_url_direct=job_details.get("job_url_direct"),
|
||||||
|
emails=extract_emails_from_text(job_details.get("description")),
|
||||||
|
company_logo=job_details.get("company_logo"),
|
||||||
|
job_function=job_details.get("job_function"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_job_details(self, job_id: str) -> dict:
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Retrieves job description and other job details by going to the job page url
|
||||||
:param job_page_url:
|
:param job_page_url:
|
||||||
:return: description or None
|
:return: dict
|
||||||
"""
|
"""
|
||||||
response = requests.get(job_page_url, allow_redirects=True)
|
try:
|
||||||
if response.status_code not in range(200, 400):
|
response = self.session.get(
|
||||||
return None, None
|
f"{self.base_url}/jobs/view/{job_id}", timeout=5
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
except:
|
||||||
|
return {}
|
||||||
|
if "linkedin.com/signup" in response.url:
|
||||||
|
return {}
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
div_content = soup.find(
|
div_content = soup.find(
|
||||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||||
)
|
)
|
||||||
|
description = None
|
||||||
|
if div_content is not None:
|
||||||
|
div_content = remove_attributes(div_content)
|
||||||
|
description = div_content.prettify(formatter="html")
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description = markdown_converter(description)
|
||||||
|
|
||||||
text_content = None
|
h3_tag = soup.find(
|
||||||
if div_content:
|
"h3", text=lambda text: text and "Job function" in text.strip()
|
||||||
text_content = " ".join(div_content.get_text().split()).strip()
|
)
|
||||||
|
|
||||||
def get_job_type(
|
job_function = None
|
||||||
soup: BeautifulSoup,
|
if h3_tag:
|
||||||
) -> Tuple[Optional[str], Optional[JobType]]:
|
job_function_span = h3_tag.find_next(
|
||||||
"""
|
"span", class_="description__job-criteria-text"
|
||||||
Gets the job type from job page
|
|
||||||
:param soup:
|
|
||||||
:return: JobType
|
|
||||||
"""
|
|
||||||
h3_tag = soup.find(
|
|
||||||
"h3",
|
|
||||||
class_="description__job-criteria-subheader",
|
|
||||||
string=lambda text: "Employment type" in text,
|
|
||||||
)
|
)
|
||||||
|
if job_function_span:
|
||||||
|
job_function = job_function_span.text.strip()
|
||||||
|
|
||||||
employment_type = None
|
company_logo = (
|
||||||
if h3_tag:
|
logo_image.get("data-delayed-url")
|
||||||
employment_type_span = h3_tag.find_next_sibling(
|
if (logo_image := soup.find("img", {"class": "artdeco-entity-image"}))
|
||||||
"span",
|
else None
|
||||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
)
|
||||||
)
|
return {
|
||||||
if employment_type_span:
|
"description": description,
|
||||||
employment_type = employment_type_span.get_text(strip=True)
|
"job_level": self._parse_job_level(soup),
|
||||||
employment_type = employment_type.lower()
|
"company_industry": self._parse_company_industry(soup),
|
||||||
employment_type = employment_type.replace("-", "")
|
"job_type": self._parse_job_type(soup),
|
||||||
|
"job_url_direct": self._parse_job_url_direct(soup),
|
||||||
|
"company_logo": company_logo,
|
||||||
|
"job_function": job_function,
|
||||||
|
}
|
||||||
|
|
||||||
return LinkedInScraper.get_enum_from_value(employment_type)
|
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||||
|
|
||||||
return text_content, get_job_type(soup)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_enum_from_value(value_str):
|
|
||||||
for job_type in JobType:
|
|
||||||
if value_str in job_type.value:
|
|
||||||
return job_type
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
|
||||||
"""
|
"""
|
||||||
Extracts the location data from the job metadata card.
|
Extracts the location data from the job metadata card.
|
||||||
:param metadata_card
|
:param metadata_card
|
||||||
:return: location
|
:return: location
|
||||||
"""
|
"""
|
||||||
location = Location(country=self.country)
|
location = Location(country=Country.from_string(self.country))
|
||||||
if metadata_card is not None:
|
if metadata_card is not None:
|
||||||
location_tag = metadata_card.find(
|
location_tag = metadata_card.find(
|
||||||
"span", class_="job-search-card__location"
|
"span", class_="job-search-card__location"
|
||||||
@@ -225,7 +308,108 @@ class LinkedInScraper(Scraper):
|
|||||||
location = Location(
|
location = Location(
|
||||||
city=city,
|
city=city,
|
||||||
state=state,
|
state=state,
|
||||||
country=self.country,
|
country=Country.from_string(self.country),
|
||||||
)
|
)
|
||||||
|
elif len(parts) == 3:
|
||||||
|
city, state, country = parts
|
||||||
|
country = Country.from_string(country)
|
||||||
|
location = Location(city=city, state=state, country=country)
|
||||||
return location
|
return location
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_job_type(soup_job_type: BeautifulSoup) -> list[JobType] | None:
|
||||||
|
"""
|
||||||
|
Gets the job type from job page
|
||||||
|
:param soup_job_type:
|
||||||
|
:return: JobType
|
||||||
|
"""
|
||||||
|
h3_tag = soup_job_type.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Employment type" in text,
|
||||||
|
)
|
||||||
|
employment_type = None
|
||||||
|
if h3_tag:
|
||||||
|
employment_type_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if employment_type_span:
|
||||||
|
employment_type = employment_type_span.get_text(strip=True)
|
||||||
|
employment_type = employment_type.lower()
|
||||||
|
employment_type = employment_type.replace("-", "")
|
||||||
|
|
||||||
|
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job level from job page
|
||||||
|
:param soup_job_level:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_job_level.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Seniority level" in text,
|
||||||
|
)
|
||||||
|
job_level = None
|
||||||
|
if h3_tag:
|
||||||
|
job_level_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if job_level_span:
|
||||||
|
job_level = job_level_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return job_level
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the company industry from job page
|
||||||
|
:param soup_industry:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_industry.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Industries" in text,
|
||||||
|
)
|
||||||
|
industry = None
|
||||||
|
if h3_tag:
|
||||||
|
industry_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if industry_span:
|
||||||
|
industry = industry_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return industry
|
||||||
|
|
||||||
|
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job url direct from job page
|
||||||
|
:param soup:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
job_url_direct = None
|
||||||
|
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||||
|
if job_url_direct_content:
|
||||||
|
job_url_direct_match = self.job_url_direct_regex.search(
|
||||||
|
job_url_direct_content.decode_contents().strip()
|
||||||
|
)
|
||||||
|
if job_url_direct_match:
|
||||||
|
job_url_direct = unquote(job_url_direct_match.group())
|
||||||
|
|
||||||
|
return job_url_direct
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def job_type_code(job_type_enum: JobType) -> str:
|
||||||
|
return {
|
||||||
|
JobType.FULL_TIME: "F",
|
||||||
|
JobType.PART_TIME: "P",
|
||||||
|
JobType.INTERNSHIP: "I",
|
||||||
|
JobType.CONTRACT: "C",
|
||||||
|
JobType.TEMPORARY: "T",
|
||||||
|
}.get(job_type_enum, "")
|
||||||
|
|||||||
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
headers = {
|
||||||
|
"authority": "www.linkedin.com",
|
||||||
|
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"cache-control": "max-age=0",
|
||||||
|
"upgrade-insecure-requests": "1",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
288
src/jobspy/scrapers/utils.py
Normal file
288
src/jobspy/scrapers/utils.py
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from itertools import cycle
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import requests
|
||||||
|
import tls_client
|
||||||
|
import urllib3
|
||||||
|
from markdownify import markdownify as md
|
||||||
|
from requests.adapters import HTTPAdapter, Retry
|
||||||
|
|
||||||
|
from ..jobs import CompensationInterval, JobType
|
||||||
|
|
||||||
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||||
|
|
||||||
|
|
||||||
|
def create_logger(name: str):
|
||||||
|
logger = logging.getLogger(f"JobSpy:{name}")
|
||||||
|
logger.propagate = False
|
||||||
|
if not logger.handlers:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
||||||
|
formatter = logging.Formatter(format)
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
|
||||||
|
class RotatingProxySession:
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
if isinstance(proxies, str):
|
||||||
|
self.proxy_cycle = cycle([self.format_proxy(proxies)])
|
||||||
|
elif isinstance(proxies, list):
|
||||||
|
self.proxy_cycle = (
|
||||||
|
cycle([self.format_proxy(proxy) for proxy in proxies])
|
||||||
|
if proxies
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.proxy_cycle = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_proxy(proxy):
|
||||||
|
"""Utility method to format a proxy string into a dictionary."""
|
||||||
|
if proxy.startswith("http://") or proxy.startswith("https://"):
|
||||||
|
return {"http": proxy, "https": proxy}
|
||||||
|
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
|
||||||
|
|
||||||
|
|
||||||
|
class RequestsRotating(RotatingProxySession, requests.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
requests.Session.__init__(self)
|
||||||
|
self.clear_cookies = clear_cookies
|
||||||
|
self.allow_redirects = True
|
||||||
|
self.setup_session(has_retry, delay)
|
||||||
|
|
||||||
|
def setup_session(self, has_retry, delay):
|
||||||
|
if has_retry:
|
||||||
|
retries = Retry(
|
||||||
|
total=3,
|
||||||
|
connect=3,
|
||||||
|
status=3,
|
||||||
|
status_forcelist=[500, 502, 503, 504, 429],
|
||||||
|
backoff_factor=delay,
|
||||||
|
)
|
||||||
|
adapter = HTTPAdapter(max_retries=retries)
|
||||||
|
self.mount("http://", adapter)
|
||||||
|
self.mount("https://", adapter)
|
||||||
|
|
||||||
|
def request(self, method, url, **kwargs):
|
||||||
|
if self.clear_cookies:
|
||||||
|
self.cookies.clear()
|
||||||
|
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
return requests.Session.request(self, method, url, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
tls_client.Session.__init__(self, random_tls_extension_order=True)
|
||||||
|
|
||||||
|
def execute_request(self, *args, **kwargs):
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
response = tls_client.Session.execute_request(self, *args, **kwargs)
|
||||||
|
response.ok = response.status_code in range(200, 400)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def create_session(
|
||||||
|
*,
|
||||||
|
proxies: dict | str | None = None,
|
||||||
|
ca_cert: str | None = None,
|
||||||
|
is_tls: bool = True,
|
||||||
|
has_retry: bool = False,
|
||||||
|
delay: int = 1,
|
||||||
|
clear_cookies: bool = False,
|
||||||
|
) -> requests.Session:
|
||||||
|
"""
|
||||||
|
Creates a requests session with optional tls, proxy, and retry settings.
|
||||||
|
:return: A session object
|
||||||
|
"""
|
||||||
|
if is_tls:
|
||||||
|
session = TLSRotating(proxies=proxies)
|
||||||
|
else:
|
||||||
|
session = RequestsRotating(
|
||||||
|
proxies=proxies,
|
||||||
|
has_retry=has_retry,
|
||||||
|
delay=delay,
|
||||||
|
clear_cookies=clear_cookies,
|
||||||
|
)
|
||||||
|
|
||||||
|
if ca_cert:
|
||||||
|
session.verify = ca_cert
|
||||||
|
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def set_logger_level(verbose: int):
|
||||||
|
"""
|
||||||
|
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||||
|
"""
|
||||||
|
if verbose is None:
|
||||||
|
return
|
||||||
|
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||||
|
level = getattr(logging, level_name.upper(), None)
|
||||||
|
if level is not None:
|
||||||
|
for logger_name in logging.root.manager.loggerDict:
|
||||||
|
if logger_name.startswith("JobSpy:"):
|
||||||
|
logging.getLogger(logger_name).setLevel(level)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid log level: {level_name}")
|
||||||
|
|
||||||
|
|
||||||
|
def markdown_converter(description_html: str):
|
||||||
|
if description_html is None:
|
||||||
|
return None
|
||||||
|
markdown = md(description_html)
|
||||||
|
return markdown.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||||
|
if not text:
|
||||||
|
return None
|
||||||
|
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||||
|
return email_regex.findall(text)
|
||||||
|
|
||||||
|
|
||||||
|
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||||
|
"""
|
||||||
|
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||||
|
"""
|
||||||
|
res = None
|
||||||
|
for job_type in JobType:
|
||||||
|
if job_type_str in job_type.value:
|
||||||
|
res = job_type
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def currency_parser(cur_str):
|
||||||
|
# Remove any non-numerical characters
|
||||||
|
# except for ',' '.' or '-' (e.g. EUR)
|
||||||
|
cur_str = re.sub("[^-0-9.,]", "", cur_str)
|
||||||
|
# Remove any 000s separators (either , or .)
|
||||||
|
cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
|
||||||
|
|
||||||
|
if "." in list(cur_str[-3:]):
|
||||||
|
num = float(cur_str)
|
||||||
|
elif "," in list(cur_str[-3:]):
|
||||||
|
num = float(cur_str.replace(",", "."))
|
||||||
|
else:
|
||||||
|
num = float(cur_str)
|
||||||
|
|
||||||
|
return np.round(num, 2)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_attributes(tag):
|
||||||
|
for attr in list(tag.attrs):
|
||||||
|
del tag[attr]
|
||||||
|
return tag
|
||||||
|
|
||||||
|
|
||||||
|
def extract_salary(
|
||||||
|
salary_str,
|
||||||
|
lower_limit=1000,
|
||||||
|
upper_limit=700000,
|
||||||
|
hourly_threshold=350,
|
||||||
|
monthly_threshold=30000,
|
||||||
|
enforce_annual_salary=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Extracts salary information from a string and returns the salary interval, min and max salary values, and currency.
|
||||||
|
(TODO: Needs test cases as the regex is complicated and may not cover all edge cases)
|
||||||
|
"""
|
||||||
|
if not salary_str:
|
||||||
|
return None, None, None, None
|
||||||
|
|
||||||
|
annual_max_salary = None
|
||||||
|
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||||
|
|
||||||
|
def to_int(s):
|
||||||
|
return int(float(s.replace(",", "")))
|
||||||
|
|
||||||
|
def convert_hourly_to_annual(hourly_wage):
|
||||||
|
return hourly_wage * 2080
|
||||||
|
|
||||||
|
def convert_monthly_to_annual(monthly_wage):
|
||||||
|
return monthly_wage * 12
|
||||||
|
|
||||||
|
match = re.search(min_max_pattern, salary_str)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
min_salary = to_int(match.group(1))
|
||||||
|
max_salary = to_int(match.group(3))
|
||||||
|
# Handle 'k' suffix for min and max salaries independently
|
||||||
|
if "k" in match.group(2).lower() or "k" in match.group(4).lower():
|
||||||
|
min_salary *= 1000
|
||||||
|
max_salary *= 1000
|
||||||
|
|
||||||
|
# Convert to annual if less than the hourly threshold
|
||||||
|
if min_salary < hourly_threshold:
|
||||||
|
interval = CompensationInterval.HOURLY.value
|
||||||
|
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||||
|
if max_salary < hourly_threshold:
|
||||||
|
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||||
|
|
||||||
|
elif min_salary < monthly_threshold:
|
||||||
|
interval = CompensationInterval.MONTHLY.value
|
||||||
|
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||||
|
if max_salary < monthly_threshold:
|
||||||
|
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||||
|
|
||||||
|
else:
|
||||||
|
interval = CompensationInterval.YEARLY.value
|
||||||
|
annual_min_salary = min_salary
|
||||||
|
annual_max_salary = max_salary
|
||||||
|
|
||||||
|
# Ensure salary range is within specified limits
|
||||||
|
if not annual_max_salary:
|
||||||
|
return None, None, None, None
|
||||||
|
if (
|
||||||
|
lower_limit <= annual_min_salary <= upper_limit
|
||||||
|
and lower_limit <= annual_max_salary <= upper_limit
|
||||||
|
and annual_min_salary < annual_max_salary
|
||||||
|
):
|
||||||
|
if enforce_annual_salary:
|
||||||
|
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||||
|
else:
|
||||||
|
return interval, min_salary, max_salary, "USD"
|
||||||
|
return None, None, None, None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_job_type(description: str):
|
||||||
|
if not description:
|
||||||
|
return []
|
||||||
|
|
||||||
|
keywords = {
|
||||||
|
JobType.FULL_TIME: r"full\s?time",
|
||||||
|
JobType.PART_TIME: r"part\s?time",
|
||||||
|
JobType.INTERNSHIP: r"internship",
|
||||||
|
JobType.CONTRACT: r"contract",
|
||||||
|
}
|
||||||
|
|
||||||
|
listing_types = []
|
||||||
|
for key, pattern in keywords.items():
|
||||||
|
if re.search(pattern, description, re.IGNORECASE):
|
||||||
|
listing_types.append(key)
|
||||||
|
|
||||||
|
return listing_types if listing_types else None
|
||||||
@@ -1,425 +1,267 @@
|
|||||||
import math
|
"""
|
||||||
|
jobspy.scrapers.ziprecruiter
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape ZipRecruiter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import re
|
import re
|
||||||
import traceback
|
import time
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple, Any
|
||||||
from urllib.parse import urlparse, parse_qs
|
|
||||||
|
|
||||||
import tls_client
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from bs4.element import Tag
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site, StatusException
|
from .constants import headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
create_session,
|
||||||
|
markdown_converter,
|
||||||
|
remove_attributes,
|
||||||
|
create_logger,
|
||||||
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Compensation,
|
Compensation,
|
||||||
CompensationInterval,
|
|
||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
Country,
|
Country,
|
||||||
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
log = create_logger("ZipRecruiter")
|
||||||
|
|
||||||
|
|
||||||
class ZipRecruiterScraper(Scraper):
|
class ZipRecruiterScraper(Scraper):
|
||||||
def __init__(self):
|
base_url = "https://www.ziprecruiter.com"
|
||||||
"""
|
api_url = "https://api.ziprecruiter.com"
|
||||||
Initializes LinkedInScraper with the ZipRecruiter job search url
|
|
||||||
"""
|
|
||||||
site = Site(Site.ZIP_RECRUITER)
|
|
||||||
self.url = "https://www.ziprecruiter.com"
|
|
||||||
super().__init__(site)
|
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||||
|
"""
|
||||||
|
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||||
|
|
||||||
|
self.scraper_input = None
|
||||||
|
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
self._get_cookies()
|
||||||
|
|
||||||
|
self.delay = 5
|
||||||
self.jobs_per_page = 20
|
self.jobs_per_page = 20
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
self.session = tls_client.Session(
|
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def scrape_page(
|
|
||||||
self, scraper_input: ScraperInput, page: int
|
|
||||||
) -> tuple[list[JobPost], int | None]:
|
|
||||||
"""
|
|
||||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
|
||||||
:param scraper_input:
|
|
||||||
:param page:
|
|
||||||
:param session:
|
|
||||||
:return: jobs found on page, total number of jobs found for search
|
|
||||||
"""
|
|
||||||
|
|
||||||
job_list = []
|
|
||||||
|
|
||||||
job_type_value = None
|
|
||||||
if scraper_input.job_type:
|
|
||||||
if scraper_input.job_type.value == "fulltime":
|
|
||||||
job_type_value = "full_time"
|
|
||||||
elif scraper_input.job_type.value == "parttime":
|
|
||||||
job_type_value = "part_time"
|
|
||||||
else:
|
|
||||||
job_type_value = scraper_input.job_type.value
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"search": scraper_input.search_term,
|
|
||||||
"location": scraper_input.location,
|
|
||||||
"page": page,
|
|
||||||
"form": "jobs-landing",
|
|
||||||
}
|
|
||||||
|
|
||||||
if scraper_input.is_remote:
|
|
||||||
params["refine_by_location_type"] = "only_remote"
|
|
||||||
|
|
||||||
if scraper_input.distance:
|
|
||||||
params["radius"] = scraper_input.distance
|
|
||||||
|
|
||||||
if job_type_value:
|
|
||||||
params[
|
|
||||||
"refine_by_employment"
|
|
||||||
] = f"employment_type:employment_type:{job_type_value}"
|
|
||||||
|
|
||||||
response = self.session.get(
|
|
||||||
self.url + "/jobs-search",
|
|
||||||
headers=ZipRecruiterScraper.headers(),
|
|
||||||
params=params,
|
|
||||||
allow_redirects=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# print(response.status_code)
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise StatusException(response.status_code)
|
|
||||||
|
|
||||||
html_string = response.text
|
|
||||||
soup = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
script_tag = soup.find("script", {"id": "js_variables"})
|
|
||||||
data = json.loads(script_tag.string)
|
|
||||||
|
|
||||||
if page == 1:
|
|
||||||
job_count = int(data["totalJobCount"].replace(",", ""))
|
|
||||||
else:
|
|
||||||
job_count = None
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
||||||
if "jobList" in data and data["jobList"]:
|
|
||||||
jobs_js = data["jobList"]
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_js, job) for job in jobs_js
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
jobs_html = soup.find_all("div", {"class": "job_content"})
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html, job) for job in jobs_html
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
|
||||||
|
|
||||||
return job_list, job_count
|
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||||
:param scraper_input:
|
:param scraper_input: Information about job search criteria.
|
||||||
:return: job_response
|
:return: JobResponse containing a list of jobs.
|
||||||
"""
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
job_list: list[JobPost] = []
|
||||||
|
continue_token = None
|
||||||
|
|
||||||
pages_to_process = max(
|
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
for page in range(1, max_pages + 1):
|
||||||
)
|
if len(job_list) >= scraper_input.results_wanted:
|
||||||
|
break
|
||||||
try:
|
if page > 1:
|
||||||
#: get first page to initialize session
|
time.sleep(self.delay)
|
||||||
job_list, total_results = self.scrape_page(scraper_input, 1)
|
log.info(f"search page: {page} / {max_pages}")
|
||||||
|
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
scraper_input, continue_token
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.scrape_page, scraper_input, page)
|
|
||||||
for page in range(2, pages_to_process + 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
|
||||||
jobs, _ = future.result()
|
|
||||||
|
|
||||||
job_list += jobs
|
|
||||||
|
|
||||||
except StatusException as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"ZipRecruiter returned status code {e.status_code}",
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
if jobs_on_page:
|
||||||
print(f"ZipRecruiter failed to scrape: {e}\n{traceback.format_exc()}")
|
job_list.extend(jobs_on_page)
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"ZipRecruiter failed to scrape: {e}",
|
|
||||||
)
|
|
||||||
|
|
||||||
#: note: this does not handle if the results are more or less than the results_wanted
|
|
||||||
|
|
||||||
if len(job_list) > scraper_input.results_wanted:
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
|
||||||
|
|
||||||
job_response = JobResponse(
|
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=total_results,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
def process_job_html(self, job: Tag) -> Optional[JobPost]:
|
|
||||||
"""
|
|
||||||
Parses a job from the job content tag
|
|
||||||
:param job: BeautifulSoup Tag for one job post
|
|
||||||
:return JobPost
|
|
||||||
"""
|
|
||||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
title = job.find("h2", {"class": "title"}).text
|
|
||||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
if updated_job_url is not None:
|
|
||||||
job_url = updated_job_url
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
|
||||||
|
|
||||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
|
||||||
job_type = None
|
|
||||||
if job_type_element:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_element.text.strip().lower().replace("-", "").replace(" ", "")
|
|
||||||
)
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
|
||||||
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
def process_job_js(self, job: dict) -> JobPost:
|
|
||||||
title = job.get("Title")
|
|
||||||
description = BeautifulSoup(
|
|
||||||
job.get("Snippet", "").strip(), "html.parser"
|
|
||||||
).get_text()
|
|
||||||
|
|
||||||
company = job.get("OrgName")
|
|
||||||
location = Location(
|
|
||||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
|
||||||
job.get("EmploymentType", "").replace("-", "_").lower()
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
|
||||||
salary_parts = formatted_salary.split(" ")
|
|
||||||
|
|
||||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
|
||||||
if "." in min_salary_str:
|
|
||||||
min_amount = int(float(min_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
min_amount = int(min_salary_str.replace("K", "000"))
|
|
||||||
|
|
||||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
|
||||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
|
||||||
if "." in max_salary_str:
|
|
||||||
max_amount = int(float(max_salary_str) * 1000)
|
|
||||||
else:
|
else:
|
||||||
max_amount = int(max_salary_str.replace("K", "000"))
|
break
|
||||||
else:
|
if not continue_token:
|
||||||
max_amount = 0
|
break
|
||||||
|
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||||
|
|
||||||
compensation = Compensation(
|
def _find_jobs_in_page(
|
||||||
interval=CompensationInterval.YEARLY,
|
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||||
min_amount=min_amount,
|
) -> Tuple[list[JobPost], Optional[str]]:
|
||||||
max_amount=max_amount,
|
"""
|
||||||
currency="USD/CAD",
|
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||||
|
:param scraper_input:
|
||||||
|
:param continue_token:
|
||||||
|
:return: jobs found on page
|
||||||
|
"""
|
||||||
|
jobs_list = []
|
||||||
|
params = self._add_params(scraper_input)
|
||||||
|
if continue_token:
|
||||||
|
params["continue_from"] = continue_token
|
||||||
|
try:
|
||||||
|
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
|
||||||
|
if res.status_code not in range(200, 400):
|
||||||
|
if res.status_code == 429:
|
||||||
|
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||||
|
else:
|
||||||
|
err = f"ZipRecruiter response status code {res.status_code}"
|
||||||
|
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||||
|
log.error(err)
|
||||||
|
return jobs_list, ""
|
||||||
|
except Exception as e:
|
||||||
|
if "Proxy responded with" in str(e):
|
||||||
|
log.error(f"Indeed: Bad proxy")
|
||||||
|
else:
|
||||||
|
log.error(f"Indeed: {str(e)}")
|
||||||
|
return jobs_list, ""
|
||||||
|
|
||||||
|
res_data = res.json()
|
||||||
|
jobs_list = res_data.get("jobs", [])
|
||||||
|
next_continue_token = res_data.get("continue", None)
|
||||||
|
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||||
|
job_results = [executor.submit(self._process_job, job) for job in jobs_list]
|
||||||
|
|
||||||
|
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||||
|
return job_list, next_continue_token
|
||||||
|
|
||||||
|
def _process_job(self, job: dict) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Processes an individual job dict from the response
|
||||||
|
"""
|
||||||
|
title = job.get("name")
|
||||||
|
job_url = f"{self.base_url}/jobs//j?lvk={job['listing_key']}"
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
|
||||||
|
description = job.get("job_description", "").strip()
|
||||||
|
listing_type = job.get("buyer_type", "")
|
||||||
|
description = (
|
||||||
|
markdown_converter(description)
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||||
|
else description
|
||||||
)
|
)
|
||||||
save_job_url = job.get("SaveJobURL", "")
|
company = job.get("hiring_company", {}).get("name")
|
||||||
posted_time_match = re.search(
|
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
country_enum = Country.from_string(country_value)
|
||||||
|
|
||||||
|
location = Location(
|
||||||
|
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||||
)
|
)
|
||||||
if posted_time_match:
|
job_type = self._get_job_type_enum(
|
||||||
date_time_str = posted_time_match.group(1)
|
job.get("employment_type", "").replace("_", "").lower()
|
||||||
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
|
)
|
||||||
date_posted = date_posted_obj.date()
|
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
|
||||||
else:
|
comp_interval = job.get("compensation_interval")
|
||||||
date_posted = date.today()
|
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
|
||||||
job_url = job.get("JobURL")
|
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||||
|
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||||
|
comp_currency = job.get("compensation_currency")
|
||||||
|
description_full, job_url_direct = self._get_descr(job_url)
|
||||||
|
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=f'zr-{job["listing_key"]}',
|
||||||
title=title,
|
title=title,
|
||||||
description=description,
|
|
||||||
company_name=company,
|
company_name=company,
|
||||||
location=location,
|
location=location,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=compensation,
|
compensation=Compensation(
|
||||||
|
interval=comp_interval,
|
||||||
|
min_amount=comp_min,
|
||||||
|
max_amount=comp_max,
|
||||||
|
currency=comp_currency,
|
||||||
|
),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
|
description=description_full if description_full else description,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
job_url_direct=job_url_direct,
|
||||||
|
listing_type=listing_type,
|
||||||
)
|
)
|
||||||
return job_post
|
|
||||||
|
def _get_descr(self, job_url):
|
||||||
|
res = self.session.get(job_url, allow_redirects=True)
|
||||||
|
description_full = job_url_direct = None
|
||||||
|
if res.ok:
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
job_descr_div = soup.find("div", class_="job_description")
|
||||||
|
company_descr_section = soup.find("section", class_="company_description")
|
||||||
|
job_description_clean = (
|
||||||
|
remove_attributes(job_descr_div).prettify(formatter="html")
|
||||||
|
if job_descr_div
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
company_description_clean = (
|
||||||
|
remove_attributes(company_descr_section).prettify(formatter="html")
|
||||||
|
if company_descr_section
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
description_full = job_description_clean + company_description_clean
|
||||||
|
script_tag = soup.find("script", type="application/json")
|
||||||
|
if script_tag:
|
||||||
|
job_json = json.loads(script_tag.string)
|
||||||
|
job_url_val = job_json["model"].get("saveJobURL", "")
|
||||||
|
m = re.search(r"job_url=(.+)", job_url_val)
|
||||||
|
if m:
|
||||||
|
job_url_direct = m.group(1)
|
||||||
|
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description_full = markdown_converter(description_full)
|
||||||
|
|
||||||
|
return description_full, job_url_direct
|
||||||
|
|
||||||
|
def _get_cookies(self):
|
||||||
|
"""
|
||||||
|
Sends a session event to the API with device properties.
|
||||||
|
"""
|
||||||
|
data = [
|
||||||
|
("event_type", "session"),
|
||||||
|
("logged_in", "false"),
|
||||||
|
("number_of_retry", "1"),
|
||||||
|
("property", "model:iPhone"),
|
||||||
|
("property", "os:iOS"),
|
||||||
|
("property", "locale:en_us"),
|
||||||
|
("property", "app_build_number:4734"),
|
||||||
|
("property", "app_version:91.0"),
|
||||||
|
("property", "manufacturer:Apple"),
|
||||||
|
("property", "timestamp:2025-01-12T12:04:42-06:00"),
|
||||||
|
("property", "screen_height:852"),
|
||||||
|
("property", "os_version:16.6.1"),
|
||||||
|
("property", "source:install"),
|
||||||
|
("property", "screen_width:393"),
|
||||||
|
("property", "device_model:iPhone 14 Pro"),
|
||||||
|
("property", "brand:Apple"),
|
||||||
|
]
|
||||||
|
|
||||||
|
url = f"{self.api_url}/jobs-app/event"
|
||||||
|
self.session.post(url, data=data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_enum_from_value(value_str):
|
def _get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
for job_type in JobType:
|
|
||||||
if value_str in job_type.value:
|
|
||||||
return job_type
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
|
|
||||||
for job_type in JobType:
|
for job_type in JobType:
|
||||||
if job_type_str in job_type.value:
|
if job_type_str in job_type.value:
|
||||||
return job_type
|
return [job_type]
|
||||||
return None
|
|
||||||
|
|
||||||
def get_description(self, job_page_url: str) -> Tuple[Optional[str], Optional[str]]:
|
|
||||||
"""
|
|
||||||
Retrieves job description by going to the job page url
|
|
||||||
:param job_page_url:
|
|
||||||
:param session:
|
|
||||||
:return: description or None, response url
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
response = self.session.get(
|
|
||||||
job_page_url,
|
|
||||||
headers=ZipRecruiterScraper.headers(),
|
|
||||||
allow_redirects=True,
|
|
||||||
timeout_seconds=5,
|
|
||||||
)
|
|
||||||
except requests.exceptions.Timeout:
|
|
||||||
print("The request timed out.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
html_string = response.content
|
|
||||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
|
||||||
if job_description_div:
|
|
||||||
return job_description_div.text.strip(), response.url
|
|
||||||
return None, response.url
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_interval(interval_str: str):
|
|
||||||
"""
|
|
||||||
Maps the interval alias to its appropriate CompensationInterval.
|
|
||||||
:param interval_str
|
|
||||||
:return: CompensationInterval
|
|
||||||
"""
|
|
||||||
interval_alias = {"annually": CompensationInterval.YEARLY}
|
|
||||||
interval_str = interval_str.lower()
|
|
||||||
|
|
||||||
if interval_str in interval_alias:
|
|
||||||
return interval_alias[interval_str]
|
|
||||||
|
|
||||||
return CompensationInterval(interval_str)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_date_posted(job: BeautifulSoup) -> Optional[datetime.date]:
|
|
||||||
"""
|
|
||||||
Extracts the date a job was posted
|
|
||||||
:param job
|
|
||||||
:return: date the job was posted or None
|
|
||||||
"""
|
|
||||||
button = job.find(
|
|
||||||
"button", {"class": "action_input save_job zrs_btn_secondary_200"}
|
|
||||||
)
|
|
||||||
if not button:
|
|
||||||
return None
|
|
||||||
|
|
||||||
url_time = button.get("data-href", "")
|
|
||||||
url_components = urlparse(url_time)
|
|
||||||
params = parse_qs(url_components.query)
|
|
||||||
posted_time_str = params.get("posted_time", [None])[0]
|
|
||||||
|
|
||||||
if posted_time_str:
|
|
||||||
posted_date = datetime.strptime(
|
|
||||||
posted_time_str, "%Y-%m-%dT%H:%M:%SZ"
|
|
||||||
).date()
|
|
||||||
return posted_date
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_compensation(job: BeautifulSoup) -> Optional[Compensation]:
|
def _add_params(scraper_input) -> dict[str, str | Any]:
|
||||||
"""
|
params = {
|
||||||
Parses the compensation tag from the job BeautifulSoup object
|
"search": scraper_input.search_term,
|
||||||
:param job
|
"location": scraper_input.location,
|
||||||
:return: Compensation object or None
|
|
||||||
"""
|
|
||||||
pay_element = job.find("li", {"class": "perk_item perk_pay"})
|
|
||||||
if pay_element is None:
|
|
||||||
return None
|
|
||||||
pay = pay_element.find("div", {"class": "value"}).find("span").text.strip()
|
|
||||||
|
|
||||||
def create_compensation_object(pay_string: str) -> Compensation:
|
|
||||||
"""
|
|
||||||
Creates a Compensation object from a pay_string
|
|
||||||
:param pay_string
|
|
||||||
:return: compensation
|
|
||||||
"""
|
|
||||||
interval = ZipRecruiterScraper.get_interval(pay_string.split()[-1])
|
|
||||||
|
|
||||||
amounts = []
|
|
||||||
for amount in pay_string.split("to"):
|
|
||||||
amount = amount.replace(",", "").strip("$ ").split(" ")[0]
|
|
||||||
if "K" in amount:
|
|
||||||
amount = amount.replace("K", "")
|
|
||||||
amount = int(float(amount)) * 1000
|
|
||||||
else:
|
|
||||||
amount = int(float(amount))
|
|
||||||
amounts.append(amount)
|
|
||||||
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=interval,
|
|
||||||
min_amount=min(amounts),
|
|
||||||
max_amount=max(amounts),
|
|
||||||
currency="USD/CAD",
|
|
||||||
)
|
|
||||||
|
|
||||||
return compensation
|
|
||||||
|
|
||||||
return create_compensation_object(pay)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_location(job: BeautifulSoup) -> Location:
|
|
||||||
"""
|
|
||||||
Extracts the job location from BeatifulSoup object
|
|
||||||
:param job:
|
|
||||||
:return: location
|
|
||||||
"""
|
|
||||||
location_link = job.find("a", {"class": "company_location"})
|
|
||||||
if location_link is not None:
|
|
||||||
location_string = location_link.text.strip()
|
|
||||||
parts = location_string.split(", ")
|
|
||||||
if len(parts) == 2:
|
|
||||||
city, state = parts
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
return Location(city=city, state=state, country=Country.US_CANADA)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def headers() -> dict:
|
|
||||||
"""
|
|
||||||
Returns headers needed for requests
|
|
||||||
:return: dict - Dictionary containing headers
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
|
||||||
}
|
}
|
||||||
|
if scraper_input.hours_old:
|
||||||
|
params["days"] = max(scraper_input.hours_old // 24, 1)
|
||||||
|
job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
|
||||||
|
if scraper_input.job_type:
|
||||||
|
job_type = scraper_input.job_type
|
||||||
|
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
|
||||||
|
if scraper_input.easy_apply:
|
||||||
|
params["zipapply"] = 1
|
||||||
|
if scraper_input.is_remote:
|
||||||
|
params["remote"] = 1
|
||||||
|
if scraper_input.distance:
|
||||||
|
params["radius"] = scraper_input.distance
|
||||||
|
return {k: v for k, v in params.items() if v is not None}
|
||||||
|
|||||||
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
headers = {
|
||||||
|
"Host": "api.ziprecruiter.com",
|
||||||
|
"accept": "*/*",
|
||||||
|
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||||
|
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||||
|
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||||
|
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||||
|
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
}
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
from ..jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_indeed():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="indeed",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
assert result is not None
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_linkedin():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="linkedin",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
assert result is not None
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_ziprecruiter():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="zip_recruiter",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result is not None
|
|
||||||
Reference in New Issue
Block a user