mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 12:04:33 -08:00
Compare commits
217 Commits
v1.1.39
...
4c45d63f60
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c45d63f60 | ||
|
|
128a155b3b | ||
|
|
14f2d5eb93 | ||
|
|
1138a1b10b | ||
|
|
4daf19872f | ||
|
|
f90b545c2e | ||
|
|
8700e1c4ac | ||
|
|
15538061d7 | ||
|
|
3019fc6adb | ||
|
|
cce26cd8ae | ||
|
|
663c77efcf | ||
|
|
3719121937 | ||
|
|
076d30f17d | ||
|
|
a088b4d62c | ||
|
|
b513972a3f | ||
|
|
c9614dd74e | ||
|
|
5d45628f5c | ||
|
|
c310ff61ee | ||
|
|
cc7c7f0a1d | ||
|
|
692ae9ca21 | ||
|
|
743238350f | ||
|
|
c073ea08fd | ||
|
|
f8a7ae62b6 | ||
|
|
0b8b2b1e61 | ||
|
|
8f257f9e9a | ||
|
|
afa19cb564 | ||
|
|
77dab06c97 | ||
|
|
465de51ef9 | ||
|
|
a5e18752ee | ||
|
|
d1c6f7bb8d | ||
|
|
2e08cb5c66 | ||
|
|
979c4ed291 | ||
|
|
b7afcc22d8 | ||
|
|
57effe655b | ||
|
|
beb2757466 | ||
|
|
060f8dd417 | ||
|
|
8ee6ff2ed2 | ||
|
|
ef13753e9a | ||
|
|
cb77458d89 | ||
|
|
a18a0fda05 | ||
|
|
d81f7cd18e | ||
|
|
e2fc80cafe | ||
|
|
1cec3d88c3 | ||
|
|
e1b26f6556 | ||
|
|
817038a0dd | ||
|
|
61add9afbf | ||
|
|
1b52e69c08 | ||
|
|
d2700d93f7 | ||
|
|
eda95569ee | ||
|
|
19cfa69d6e | ||
|
|
1009295363 | ||
|
|
47180a3b02 | ||
|
|
1a56d655af | ||
|
|
127545a111 | ||
|
|
267f24a796 | ||
|
|
8905abecd2 | ||
|
|
d22118ba63 | ||
|
|
450b7999f6 | ||
|
|
5d3f40f3f6 | ||
|
|
6abacff979 | ||
|
|
4fe3131ae6 | ||
|
|
e3baae55fc | ||
|
|
f9013b8f21 | ||
|
|
b28bbcb6cd | ||
|
|
f8e71c326b | ||
|
|
2641cebd17 | ||
|
|
6d1cc5c592 | ||
|
|
d57901da66 | ||
|
|
5146f47d5b | ||
|
|
77cc1f8550 | ||
|
|
84b4524c43 | ||
|
|
e6ae23c76f | ||
|
|
0103e11234 | ||
|
|
697ae5c8c9 | ||
|
|
9e0674f7fc | ||
|
|
bbdad3584e | ||
|
|
a045bb442a | ||
|
|
3eb4c122e7 | ||
|
|
74877c5fd8 | ||
|
|
0a475e312f | ||
|
|
e0514d218e | ||
|
|
529aa8a1f4 | ||
|
|
93a21941eb | ||
|
|
8f8b39c6e2 | ||
|
|
cdcd79edfe | ||
|
|
89a40dc3e3 | ||
|
|
6a326b7dd4 | ||
|
|
0a5c5fa9b3 | ||
|
|
e22e4cc092 | ||
|
|
0abe28fae4 | ||
|
|
31d0389dd8 | ||
|
|
fb9ab3a315 | ||
|
|
c34eff610f | ||
|
|
e9160a0b4c | ||
|
|
cd916c7978 | ||
|
|
25c084ca2c | ||
|
|
341deba465 | ||
|
|
5337b3ec7f | ||
|
|
0171ecc4a0 | ||
|
|
e191405c8e | ||
|
|
a2d139cb96 | ||
|
|
9e41e6e9db | ||
|
|
bb7d4c55ed | ||
|
|
58cc1937bb | ||
|
|
60819a8fca | ||
|
|
1c59cd6738 | ||
|
|
eed96e4c04 | ||
|
|
83c64f4bca | ||
|
|
d8ad9da1c0 | ||
|
|
5f5738eaaa | ||
|
|
e1da326317 | ||
|
|
6782b9884e | ||
|
|
94c74d60f2 | ||
|
|
5463e5a664 | ||
|
|
ed139e7e6b | ||
|
|
5bd199d0a5 | ||
|
|
4ec308a302 | ||
|
|
7cb0c518fc | ||
|
|
df70d4bc2e | ||
|
|
3006063875 | ||
|
|
1be009b8bc | ||
|
|
81ed9b3ddf | ||
|
|
11a9e9a56a | ||
|
|
c6ade14784 | ||
|
|
13c74a0fed | ||
|
|
333e9e6760 | ||
|
|
04032a0f91 | ||
|
|
496896d0b5 | ||
|
|
87ba1ad1bf | ||
|
|
4e7ac9a583 | ||
|
|
e44d13e1cf | ||
|
|
d52e366ef7 | ||
|
|
395ebf0017 | ||
|
|
63fddd9b7f | ||
|
|
58956868ae | ||
|
|
4fce836222 | ||
|
|
5ba25e7a7c | ||
|
|
f7cb3e9206 | ||
|
|
3ad3f121f7 | ||
|
|
ff3c782912 | ||
|
|
338d854b96 | ||
|
|
811d4c40b4 | ||
|
|
dba92d22c2 | ||
|
|
10a3592a0f | ||
|
|
b7905cc756 | ||
|
|
6867d58829 | ||
|
|
f6248c8386 | ||
|
|
f395597fdd | ||
|
|
6372e41bd9 | ||
|
|
6c869decb8 | ||
|
|
9f4083380d | ||
|
|
9207ab56f6 | ||
|
|
757a94853e | ||
|
|
6bc191d5c7 | ||
|
|
0cc34287f7 | ||
|
|
923979093b | ||
|
|
286f0e4487 | ||
|
|
f7b29d43a2 | ||
|
|
6f1490458c | ||
|
|
6bb7d81ba8 | ||
|
|
0e046432d1 | ||
|
|
209e0e65b6 | ||
|
|
8570c0651e | ||
|
|
8678b0bbe4 | ||
|
|
60d4d911c9 | ||
|
|
2a0cba8c7e | ||
|
|
de70189fa2 | ||
|
|
b55c0eb86d | ||
|
|
88c95c4ad5 | ||
|
|
d8d33d602f | ||
|
|
6330c14879 | ||
|
|
48631ea271 | ||
|
|
edffe18e65 | ||
|
|
0988230a24 | ||
|
|
d000a81eb3 | ||
|
|
ccb0c17660 | ||
|
|
df339610fa | ||
|
|
c501006bd8 | ||
|
|
89a3ee231c | ||
|
|
6439f71433 | ||
|
|
7f6271b2e0 | ||
|
|
5cb7ffe5fd | ||
|
|
cd29f79796 | ||
|
|
65d2e5e707 | ||
|
|
08d63a87a2 | ||
|
|
1ffdb1756f | ||
|
|
1185693422 | ||
|
|
dcd7144318 | ||
|
|
bf73c061bd | ||
|
|
8dd08ed9fd | ||
|
|
5d3df732e6 | ||
|
|
86f858e06d | ||
|
|
1089d1f0a5 | ||
|
|
3e93454738 | ||
|
|
0d150d519f | ||
|
|
cc3497f929 | ||
|
|
5986f75346 | ||
|
|
4b7bdb9313 | ||
|
|
80213f28d2 | ||
|
|
ada38532c3 | ||
|
|
3b0017964c | ||
|
|
94d8f555fd | ||
|
|
e8b4b376b8 | ||
|
|
54ac1bad16 | ||
|
|
0a669e9ba8 | ||
|
|
a4f6851c32 | ||
|
|
db01bc6bbb | ||
|
|
f8a4eccc6b | ||
|
|
ba3a16b228 | ||
|
|
aeb1a50d2c | ||
|
|
91b137ef86 | ||
|
|
2563c5ca08 | ||
|
|
32282305c8 | ||
|
|
ccbea51f3c | ||
|
|
6ec7c24f7f | ||
|
|
02caf1b38d | ||
|
|
8e2ab277da |
63
.github/workflows/job_scraper_dynamic.yml
vendored
Normal file
63
.github/workflows/job_scraper_dynamic.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: JobSpy Scraper Dynamic Workflow
|
||||
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
user_email:
|
||||
description: 'Email of user'
|
||||
required: true
|
||||
default: 'Branden@autoemployme.onmicrosoft.com'
|
||||
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
|
||||
jobs:
|
||||
scrape_jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
|
||||
- name: Set Up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
|
||||
- name: Sanitize Email (Preserve Case)
|
||||
id: sanitize
|
||||
run: |
|
||||
raw_email="${{ github.event.inputs.user_email }}"
|
||||
safe_email=$(echo "$raw_email" | sed 's/@/_at_/g; s/\./_/g')
|
||||
echo "safe_email=$safe_email" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
- name: Ensure outputs folder exists
|
||||
run: mkdir -p outputs
|
||||
|
||||
|
||||
- name: Run Job Scraper with Config
|
||||
run: |
|
||||
python job_scraper_dynamic.py "${{ github.event.inputs.user_email }}"
|
||||
|
||||
|
||||
- name: Upload Output Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: jobspy_output_${{ steps.sanitize.outputs.safe_email }}
|
||||
path: outputs/jobspy_output_${{ steps.sanitize.outputs.safe_email }}.csv
|
||||
|
||||
|
||||
48
.github/workflows/jobspy_scraper.yml
vendored
Normal file
48
.github/workflows/jobspy_scraper.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: JobSpy Scraper Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows manual trigger from GitHub or Power Automate
|
||||
# Remove or comment out the schedule to prevent auto-runs
|
||||
# schedule:
|
||||
# - cron: '0 */6 * * *' # Runs every 6 hours (DISABLED)
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
scrape_jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run JobSpy Scraper
|
||||
run: python job_scraper_exact_match.py
|
||||
|
||||
- name: Debug - Check if jobspy_output.csv exists
|
||||
run: |
|
||||
if [ ! -f jobspy_output.csv ]; then
|
||||
echo "❌ ERROR: jobspy_output.csv not found!"
|
||||
exit 1
|
||||
else
|
||||
echo "✅ jobspy_output.csv found, proceeding to upload..."
|
||||
fi
|
||||
|
||||
- name: Upload JobSpy Output as Artifact
|
||||
uses: actions/upload-artifact@v4 # Explicitly using latest version
|
||||
with:
|
||||
name: jobspy-results
|
||||
path: jobspy_output.csv
|
||||
12
.github/workflows/publish-to-pypi.yml
vendored
12
.github/workflows/publish-to-pypi.yml
vendored
@@ -1,9 +1,13 @@
|
||||
name: Publish Python 🐍 distributions 📦 to PyPI
|
||||
on: push
|
||||
name: Publish JobSpy to PyPi
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-n-publish:
|
||||
name: Build and publish Python 🐍 distributions 📦 to PyPI
|
||||
name: Build and publish JobSpy to PyPi
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -27,7 +31,7 @@ jobs:
|
||||
build
|
||||
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
if: startsWith(github.ref, 'refs/tags') || github.event_name == 'workflow_dispatch'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
7
.pre-commit-config.yaml
Normal file
7
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python
|
||||
args: [--line-length=88, --quiet]
|
||||
223
README.md
223
README.md
@@ -1,27 +1,19 @@
|
||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||
|
||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||
|
||||
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||
|
||||
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
|
||||
work with us.*
|
||||
**JobSpy** is a job scraping library with the goal of aggregating all the jobs from popular job boards with one tool.
|
||||
|
||||
## Features
|
||||
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||
- Aggregates the job postings in a Pandas DataFrame
|
||||
- Proxy support (HTTP/S, SOCKS)
|
||||
|
||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
||||
Updated for release v1.1.3
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, **Google**, **ZipRecruiter**, & **Bayt** concurrently
|
||||
- Aggregates the job postings in a dataframe
|
||||
- Proxies support to bypass blocking
|
||||
|
||||

|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
pip install python-jobspy
|
||||
pip install -U python-jobspy
|
||||
```
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
@@ -29,24 +21,30 @@ _Python version >= [3.10](https://www.python.org/downloads/release/python-3100/)
|
||||
### Usage
|
||||
|
||||
```python
|
||||
import csv
|
||||
from jobspy import scrape_jobs
|
||||
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor", "google", "bayt"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=10,
|
||||
country_indeed='USA' # only needed for indeed / glassdoor
|
||||
google_search_term="software engineer jobs near San Francisco, CA since yesterday",
|
||||
location="San Francisco, CA",
|
||||
results_wanted=20,
|
||||
hours_old=72,
|
||||
country_indeed='USA',
|
||||
|
||||
# linkedin_fetch_description=True # gets more info such as description, direct job url (slower)
|
||||
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||
)
|
||||
print(f"Found {len(jobs)} jobs")
|
||||
print(jobs.head())
|
||||
jobs.to_csv("jobs.csv", index=False) # to_xlsx
|
||||
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_excel
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
```
|
||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||
@@ -58,61 +56,84 @@ zip_recruiter Software Developer TEKsystems Phoenix
|
||||
### Parameters for `scrape_jobs()`
|
||||
|
||||
```plaintext
|
||||
Required
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
||||
└── search_term (str)
|
||||
Optional
|
||||
├── location (int)
|
||||
├── distance (int): in miles
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||
├── site_name (list|str):
|
||||
| linkedin, zip_recruiter, indeed, glassdoor, google, bayt
|
||||
| (default is all)
|
||||
│
|
||||
├── search_term (str)
|
||||
|
|
||||
├── google_search_term (str)
|
||||
| search term for google jobs. This is the only param for filtering google jobs.
|
||||
│
|
||||
├── location (str)
|
||||
│
|
||||
├── distance (int):
|
||||
| in miles, default 50
|
||||
│
|
||||
├── job_type (str):
|
||||
| fulltime, parttime, internship, contract
|
||||
│
|
||||
├── proxies (list):
|
||||
| in format ['user:pass@host:port', 'localhost']
|
||||
| each job board scraper will round robin through the proxies
|
||||
|
|
||||
├── is_remote (bool)
|
||||
├── full_description (bool): fetches full description for Indeed / LinkedIn (much slower)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
│
|
||||
├── results_wanted (int):
|
||||
| number of job results to retrieve for each site specified in 'site_name'
|
||||
│
|
||||
├── easy_apply (bool):
|
||||
| filters for jobs that are hosted on the job board site (LinkedIn easy apply filter no longer works)
|
||||
│
|
||||
├── description_format (str):
|
||||
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||
│
|
||||
├── offset (int):
|
||||
| starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
│
|
||||
├── hours_old (int):
|
||||
| filters jobs by the number of hours since the job was posted
|
||||
| (ZipRecruiter and Glassdoor round up to next day.)
|
||||
│
|
||||
├── verbose (int) {0, 1, 2}:
|
||||
| Controls the verbosity of the runtime printouts
|
||||
| (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||
|
||||
├── linkedin_fetch_description (bool):
|
||||
| fetches full description and direct job url for LinkedIn (Increases requests by O(n))
|
||||
│
|
||||
├── linkedin_company_ids (list[int]):
|
||||
| searches for linkedin jobs with specific company ids
|
||||
|
|
||||
├── country_indeed (str):
|
||||
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||
|
|
||||
├── enforce_annual_salary (bool):
|
||||
| converts wages to annual salary
|
||||
|
|
||||
├── ca_cert (str)
|
||||
| path to CA Certificate file for proxies
|
||||
```
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company (str)
|
||||
├── company_url (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
│ ├── city (str)
|
||||
│ ├── state (str)
|
||||
├── description (str)
|
||||
├── job_type (str): fulltime, parttime, internship, contract
|
||||
├── compensation (object)
|
||||
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount (int)
|
||||
│ ├── max_amount (int)
|
||||
│ └── currency (enum)
|
||||
└── date_posted (date)
|
||||
└── emails (str)
|
||||
└── num_urgent_words (int)
|
||||
└── is_remote (bool)
|
||||
```
|
||||
|
||||
### Exceptions
|
||||
|
||||
The following exceptions may be raised when using JobSpy:
|
||||
|
||||
* `LinkedInException`
|
||||
* `IndeedException`
|
||||
* `ZipRecruiterException`
|
||||
* `GlassdoorException`
|
||||
├── Indeed limitations:
|
||||
| Only one from this list can be used in a search:
|
||||
| - hours_old
|
||||
| - job_type & is_remote
|
||||
| - easy_apply
|
||||
│
|
||||
└── LinkedIn limitations:
|
||||
| Only one from this list can be used in a search:
|
||||
| - hours_old
|
||||
| - easy_apply
|
||||
```
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
### **LinkedIn**
|
||||
|
||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using
|
||||
LinkedIn searches globally & uses only the `location` parameter.
|
||||
|
||||
### **ZipRecruiter**
|
||||
|
||||
@@ -142,28 +163,86 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||
| South Korea | Spain* | Sweden | Switzerland* |
|
||||
| Taiwan | Thailand | Turkey | Ukraine |
|
||||
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||
| Venezuela | Vietnam | | |
|
||||
| Venezuela | Vietnam* | | |
|
||||
|
||||
### **Bayt**
|
||||
|
||||
Bayt only uses the search_term parameter currently and searches internationally
|
||||
|
||||
|
||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||
|
||||
## Notes
|
||||
* Indeed is the best scraper currently with no rate limiting.
|
||||
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||
* LinkedIn is the most restrictive and usually rate limits around the 10th page with one ip. Proxies are a must basically.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
**Q: Why is Indeed giving unrelated roles?**
|
||||
**A:** Indeed searches the description too.
|
||||
|
||||
**Q: Encountering issues with your queries?**
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
- use - to remove words
|
||||
- "" for exact match
|
||||
|
||||
Example of a good Indeed query
|
||||
|
||||
```py
|
||||
search_term='"engineering intern" software summer (java OR python OR c++) 2025 -tax -marketing'
|
||||
```
|
||||
|
||||
This searches the description/title and must include software, summer, 2025, one of the languages, engineering intern exactly, no tax, no marketing.
|
||||
|
||||
---
|
||||
|
||||
**Q: No results when using "google"?**
|
||||
**A:** You have to use super specific syntax. Search for google jobs on your browser and then whatever pops up in the google jobs search box after applying some filters is what you need to copy & paste into the google_search_term.
|
||||
|
||||
---
|
||||
|
||||
**Q: Received a response code 429?**
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||
|
||||
- Waiting some time between scrapes (site-dependent).
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
- Wait some time between scrapes (site-dependent).
|
||||
- Try using the proxies param to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title
|
||||
├── company
|
||||
├── company_url
|
||||
├── job_url
|
||||
├── location
|
||||
│ ├── country
|
||||
│ ├── city
|
||||
│ ├── state
|
||||
├── description
|
||||
├── job_type: fulltime, parttime, internship, contract
|
||||
├── job_function
|
||||
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount
|
||||
│ ├── max_amount
|
||||
│ ├── currency
|
||||
│ └── salary_source: direct_data, description (parsed from posting)
|
||||
├── date_posted
|
||||
├── emails
|
||||
└── is_remote
|
||||
|
||||
|
||||
Linkedin specific
|
||||
└── job_level
|
||||
|
||||
Linkedin & Indeed specific
|
||||
└── company_industry
|
||||
|
||||
Indeed specific
|
||||
├── company_country
|
||||
├── company_addresses
|
||||
├── company_employees_label
|
||||
├── company_revenue_label
|
||||
├── company_description
|
||||
└── company_logo
|
||||
```
|
||||
|
||||
8
configs/config.json
Normal file
8
configs/config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"search_terms": ["IT Support", "Help Desk"],
|
||||
"results_wanted": 50,
|
||||
"max_days_old": 7,
|
||||
"target_state": "NY",
|
||||
"user_email": "Branden@autoemployme.onmicrosoft.com"
|
||||
}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
{"search_terms":["Accountant"," Test"],"results_wanted":"50\n","max_days_old":"1\n","target_state":"NY","user_email":"Branden@autoemployme.onmicrosoft.com"}
|
||||
@@ -0,0 +1 @@
|
||||
{"search_terms":["Developer"," Tester"],"results_wanted":"50\n","max_days_old":"2\n","target_state":"FL","user_email":"Danny@autoemployme.onmicrosoft.com"}
|
||||
@@ -1,30 +0,0 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=25, # be wary the higher it is, the more likey you'll get blocked (rotating proxy can help tho)
|
||||
country_indeed="USA",
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# formatting for pandas
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc
|
||||
|
||||
# 1: output to console
|
||||
print(jobs)
|
||||
|
||||
# 2: output to .csv
|
||||
jobs.to_csv("./jobs.csv", index=False)
|
||||
print("outputted to jobs.csv")
|
||||
|
||||
# 3: output to .xlsx
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
||||
@@ -1,167 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from jobspy import scrape_jobs\n",
|
||||
"import pandas as pd\n",
|
||||
"from IPython.display import display, HTML"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.set_option('display.max_columns', None)\n",
|
||||
"pd.set_option('display.max_rows', None)\n",
|
||||
"pd.set_option('display.width', None)\n",
|
||||
"pd.set_option('display.max_colwidth', 50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 1 (no hyperlinks, USA)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='san francisco',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" results_wanted=5,\n",
|
||||
"\n",
|
||||
" # use if you want to use a proxy\n",
|
||||
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
")\n",
|
||||
"display(jobs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a581b2d-f7da-4fac-868d-9efe143ee20a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 2 - remote USA & hyperlinks\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
|
||||
" # location='san francisco',\n",
|
||||
" search_term=\"software engineer\",\n",
|
||||
" country_indeed=\"USA\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" is_remote=True,\n",
|
||||
" results_wanted=5, \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe8289bc-5b64-4202-9a64-7c117c83fd9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "951c2fe1-52ff-407d-8bb1-068049b36777",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='berlin',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" results_wanted=5,\n",
|
||||
" easy_apply=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e37a521-caef-441c-8fc2-2eb5b2e7da62",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0650e608-0b58-4bf5-ae86-68348035b16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"indeed\"],\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" country_indeed = \"China\",\n",
|
||||
" hyperlinks=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "40913ac8-3f8a-4d7e-ac47-afb88316432b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
import os
|
||||
import time
|
||||
|
||||
# creates csv a new filename if the jobs.csv already exists.
|
||||
csv_filename = "jobs.csv"
|
||||
counter = 1
|
||||
while os.path.exists(csv_filename):
|
||||
csv_filename = f"jobs_{counter}.csv"
|
||||
counter += 1
|
||||
|
||||
# results wanted and offset
|
||||
results_wanted = 1000
|
||||
offset = 0
|
||||
|
||||
all_jobs = []
|
||||
|
||||
# max retries
|
||||
max_retries = 3
|
||||
|
||||
# nuumber of results at each iteration
|
||||
results_in_each_iteration = 30
|
||||
|
||||
while len(all_jobs) < results_wanted:
|
||||
retry_count = 0
|
||||
while retry_count < max_retries:
|
||||
print("Doing from", offset, "to", offset + results_in_each_iteration, "jobs")
|
||||
try:
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed"],
|
||||
search_term="software engineer",
|
||||
# New York, NY
|
||||
# Dallas, TX
|
||||
|
||||
# Los Angeles, CA
|
||||
location="Los Angeles, CA",
|
||||
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
|
||||
country_indeed="USA",
|
||||
offset=offset,
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# Add the scraped jobs to the list
|
||||
all_jobs.extend(jobs.to_dict('records'))
|
||||
|
||||
# Increment the offset for the next page of results
|
||||
offset += results_in_each_iteration
|
||||
|
||||
# Add a delay to avoid rate limiting (you can adjust the delay time as needed)
|
||||
print(f"Scraped {len(all_jobs)} jobs")
|
||||
print("Sleeping secs", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1)) # Sleep for 2 seconds between requests
|
||||
|
||||
break # Break out of the retry loop if successful
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
retry_count += 1
|
||||
print("Sleeping secs before retry", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1))
|
||||
if retry_count >= max_retries:
|
||||
print("Max retries reached. Exiting.")
|
||||
break
|
||||
|
||||
# DataFrame from the collected job data
|
||||
jobs_df = pd.DataFrame(all_jobs)
|
||||
|
||||
# Formatting
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50)
|
||||
|
||||
print(jobs_df)
|
||||
|
||||
jobs_df.to_csv(csv_filename, index=False)
|
||||
print(f"Outputted to {csv_filename}")
|
||||
116
job_scraper.py
Normal file
116
job_scraper.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import csv
|
||||
import datetime
|
||||
from jobspy.google import Google
|
||||
from jobspy.linkedin import LinkedIn
|
||||
from jobspy.indeed import Indeed
|
||||
from jobspy.ziprecruiter import ZipRecruiter
|
||||
from jobspy.model import ScraperInput
|
||||
|
||||
# Define job sources
|
||||
sources = {
|
||||
"google": Google,
|
||||
"linkedin": LinkedIn,
|
||||
"indeed": Indeed,
|
||||
"zip_recruiter": ZipRecruiter,
|
||||
}
|
||||
|
||||
# Define search preferences
|
||||
search_terms = ["Automation Engineer", "CRM Manager", "Implementation Specialist"]
|
||||
results_wanted = 200 # Fetch more jobs
|
||||
max_days_old = 2 # Fetch jobs posted in last 48 hours
|
||||
target_state = "NY" # Only keep jobs from New York
|
||||
|
||||
|
||||
def scrape_jobs(search_terms, results_wanted, max_days_old, target_state):
|
||||
"""Scrape jobs from multiple sources and filter by state."""
|
||||
all_jobs = []
|
||||
today = datetime.date.today()
|
||||
print("\n🔎 DEBUG: Fetching jobs for search terms:", search_terms)
|
||||
|
||||
for search_term in search_terms:
|
||||
for source_name, source_class in sources.items():
|
||||
print(f"\n🚀 Scraping {search_term} from {source_name}...")
|
||||
|
||||
scraper = source_class()
|
||||
search_criteria = ScraperInput(
|
||||
site_type=[source_name],
|
||||
search_term=search_term,
|
||||
results_wanted=results_wanted,
|
||||
)
|
||||
|
||||
job_response = scraper.scrape(search_criteria)
|
||||
|
||||
for job in job_response.jobs:
|
||||
# Normalize location fields
|
||||
location_city = job.location.city.strip() if job.location.city else "Unknown"
|
||||
location_state = job.location.state.strip().upper() if job.location.state else "Unknown"
|
||||
location_country = str(job.location.country) if job.location.country else "Unknown"
|
||||
|
||||
# Debug: Show all jobs being fetched
|
||||
print(f"📍 Fetched Job: {job.title} - {location_city}, {location_state}, {location_country}")
|
||||
|
||||
# Ensure the job is recent
|
||||
if job.date_posted and (today - job.date_posted).days <= max_days_old:
|
||||
if location_state == target_state or job.is_remote:
|
||||
print(f"✅ MATCH (In NY or Remote): {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
|
||||
all_jobs.append({
|
||||
"Job ID": job.id,
|
||||
"Job Title (Primary)": job.title,
|
||||
"Company Name": job.company_name if job.company_name else "Unknown",
|
||||
"Industry": job.company_industry if job.company_industry else "Not Provided",
|
||||
"Experience Level": job.job_level if job.job_level else "Not Provided",
|
||||
"Job Type": job.job_type[0].name if job.job_type else "Not Provided",
|
||||
"Is Remote": job.is_remote,
|
||||
"Currency": job.compensation.currency if job.compensation else "",
|
||||
"Salary Min": job.compensation.min_amount if job.compensation else "",
|
||||
"Salary Max": job.compensation.max_amount if job.compensation else "",
|
||||
"Date Posted": job.date_posted.strftime("%Y-%m-%d") if job.date_posted else "Not Provided",
|
||||
"Location City": location_city,
|
||||
"Location State": location_state,
|
||||
"Location Country": location_country,
|
||||
"Job URL": job.job_url,
|
||||
"Job Description": job.description[:500] if job.description else "No description available",
|
||||
"Job Source": source_name
|
||||
})
|
||||
else:
|
||||
print(f"❌ Ignored (Wrong State): {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
else:
|
||||
print(f"⏳ Ignored (Too Old): {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
|
||||
print(f"\n✅ {len(all_jobs)} jobs retrieved in NY")
|
||||
return all_jobs
|
||||
|
||||
|
||||
def save_jobs_to_csv(jobs, filename="jobspy_output.csv"):
|
||||
"""Save job data to a CSV file."""
|
||||
if not jobs:
|
||||
print("⚠️ No jobs found matching criteria.")
|
||||
return
|
||||
|
||||
fieldnames = [
|
||||
"Job ID", "Job Title (Primary)", "Company Name", "Industry",
|
||||
"Experience Level", "Job Type", "Is Remote", "Currency",
|
||||
"Salary Min", "Salary Max", "Date Posted", "Location City",
|
||||
"Location State", "Location Country", "Job URL", "Job Description",
|
||||
"Job Source"
|
||||
]
|
||||
|
||||
with open(filename, mode="w", newline="", encoding="utf-8") as file:
|
||||
writer = csv.DictWriter(file, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
writer.writerows(jobs)
|
||||
|
||||
print(f"✅ Jobs saved to {filename} ({len(jobs)} entries)")
|
||||
|
||||
|
||||
# Run the scraper with multiple job searches
|
||||
job_data = scrape_jobs(
|
||||
search_terms=search_terms,
|
||||
results_wanted=results_wanted,
|
||||
max_days_old=max_days_old,
|
||||
target_state=target_state
|
||||
)
|
||||
|
||||
# Save results to CSV
|
||||
save_jobs_to_csv(job_data)
|
||||
135
job_scraper_dynamic.py
Normal file
135
job_scraper_dynamic.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import csv
|
||||
import datetime
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from jobspy.google import Google
|
||||
from jobspy.linkedin import LinkedIn
|
||||
from jobspy.indeed import Indeed
|
||||
from jobspy.model import ScraperInput
|
||||
|
||||
# Define job sources
|
||||
sources = {
|
||||
"google": Google,
|
||||
"linkedin": LinkedIn,
|
||||
"indeed": Indeed,
|
||||
}
|
||||
|
||||
def sanitize_email(email):
|
||||
return email.replace("@", "_at_").replace(".", "_")
|
||||
|
||||
def load_config_file(email=None):
|
||||
if email:
|
||||
safe_email = sanitize_email(email)
|
||||
config_path = os.path.join("configs", f"config_{safe_email}.json")
|
||||
if os.path.exists(config_path):
|
||||
print(f"📂 Loading config for {email} → {config_path}")
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f), safe_email
|
||||
else:
|
||||
raise FileNotFoundError(f"❌ Config for {email} not found at {config_path}")
|
||||
else:
|
||||
raise ValueError("❌ Email must be passed as argument")
|
||||
|
||||
def scrape_jobs(search_terms, results_wanted, max_days_old, target_state):
|
||||
# Ensure numeric values are converted
|
||||
results_wanted = int(results_wanted)
|
||||
max_days_old = int(max_days_old)
|
||||
|
||||
all_jobs = []
|
||||
today = datetime.date.today()
|
||||
print(f"\n🔍 Scraping jobs for: {search_terms}")
|
||||
|
||||
for term in search_terms:
|
||||
for source_name, source_class in sources.items():
|
||||
print(f"🚀 Scraping '{term}' from {source_name}...")
|
||||
scraper = source_class()
|
||||
criteria = ScraperInput(site_type=[source_name], search_term=term, results_wanted=results_wanted)
|
||||
|
||||
try:
|
||||
response = scraper.scrape(criteria)
|
||||
except Exception as e:
|
||||
print(f"❌ Error scraping {source_name}: {e}")
|
||||
continue
|
||||
|
||||
for job in response.jobs:
|
||||
city = job.location.city.strip() if job.location.city else "Unknown"
|
||||
state = job.location.state.strip().upper() if job.location.state else "Unknown"
|
||||
country = str(job.location.country) if job.location.country else "Unknown"
|
||||
|
||||
if not any(t.lower() in job.title.lower() for t in search_terms):
|
||||
continue
|
||||
|
||||
if job.date_posted and (today - job.date_posted).days <= max_days_old:
|
||||
if state == target_state or job.is_remote:
|
||||
all_jobs.append({
|
||||
"Job ID": job.id,
|
||||
"Job Title (Primary)": job.title,
|
||||
"Company Name": job.company_name or "Unknown",
|
||||
"Industry": job.company_industry or "Not Provided",
|
||||
"Experience Level": job.job_level or "Not Provided",
|
||||
"Job Type": job.job_type[0].name if job.job_type else "Not Provided",
|
||||
"Is Remote": job.is_remote,
|
||||
"Currency": job.compensation.currency if job.compensation else "",
|
||||
"Salary Min": job.compensation.min_amount if job.compensation else "",
|
||||
"Salary Max": job.compensation.max_amount if job.compensation else "",
|
||||
"Date Posted": job.date_posted.strftime("%Y-%m-%d") if job.date_posted else "Not Provided",
|
||||
"Location City": city,
|
||||
"Location State": state,
|
||||
"Location Country": country,
|
||||
"Job URL": job.job_url,
|
||||
"Job Description": job.description.replace(",", "") if job.description else "No description available",
|
||||
"Job Source": source_name
|
||||
})
|
||||
print(f"✅ {len(all_jobs)} jobs matched.")
|
||||
return all_jobs
|
||||
|
||||
def save_jobs_to_csv(jobs, output_path):
|
||||
if not jobs:
|
||||
print("⚠️ No jobs found.")
|
||||
return
|
||||
|
||||
fieldnames = [
|
||||
"Job ID", "Job Title (Primary)", "Company Name", "Industry",
|
||||
"Experience Level", "Job Type", "Is Remote", "Currency",
|
||||
"Salary Min", "Salary Max", "Date Posted", "Location City",
|
||||
"Location State", "Location Country", "Job URL", "Job Description",
|
||||
"Job Source"
|
||||
]
|
||||
|
||||
header = "|~|".join(fieldnames)
|
||||
rows = [header]
|
||||
|
||||
for job in jobs:
|
||||
row = []
|
||||
for field in fieldnames:
|
||||
value = str(job.get(field, "Not Provided")).replace(",", "").strip()
|
||||
row.append(value if value else "Not Provided")
|
||||
rows.append("|~|".join(row))
|
||||
|
||||
output = ",".join(rows)
|
||||
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||
with open(output_path, "w", encoding="utf-8") as f:
|
||||
f.write(output)
|
||||
|
||||
print(f"💾 Saved output to: {output_path}")
|
||||
|
||||
# MAIN
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
user_email = sys.argv[1] if len(sys.argv) >= 2 else None
|
||||
config, safe_email = load_config_file(user_email)
|
||||
|
||||
job_data = scrape_jobs(
|
||||
search_terms=config["search_terms"],
|
||||
results_wanted=config["results_wanted"],
|
||||
max_days_old=config["max_days_old"],
|
||||
target_state=config["target_state"]
|
||||
)
|
||||
|
||||
output_file = f"outputs/jobspy_output_{safe_email}.csv"
|
||||
save_jobs_to_csv(job_data, output_file)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Fatal Error: {e}")
|
||||
sys.exit(1)
|
||||
146
job_scraper_exact_match.py
Normal file
146
job_scraper_exact_match.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import csv
|
||||
import datetime
|
||||
import os
|
||||
from jobspy.google import Google
|
||||
from jobspy.linkedin import LinkedIn
|
||||
from jobspy.indeed import Indeed
|
||||
from jobspy.model import ScraperInput
|
||||
|
||||
# Define job sources
|
||||
sources = {
|
||||
"google": Google,
|
||||
"linkedin": LinkedIn,
|
||||
"indeed": Indeed,
|
||||
}
|
||||
|
||||
# Define search preferences
|
||||
search_terms = ["Automation Engineer", "CRM Manager", "Implementation Specialist", "CRM", "Project Manager", "POS", "Microsoft Power", "IT Support"]
|
||||
results_wanted = 100 # Fetch more jobs
|
||||
max_days_old = 2 # Fetch jobs posted in last 48 hours
|
||||
target_state = "NY" # Only keep jobs from New York
|
||||
|
||||
def scrape_jobs(search_terms, results_wanted, max_days_old, target_state):
|
||||
"""Scrape jobs from multiple sources and filter by state."""
|
||||
all_jobs = []
|
||||
today = datetime.date.today()
|
||||
|
||||
print("\n🔎 DEBUG: Fetching jobs for search terms:", search_terms)
|
||||
|
||||
for search_term in search_terms:
|
||||
for source_name, source_class in sources.items():
|
||||
print(f"\n🚀 Scraping {search_term} from {source_name}...")
|
||||
|
||||
scraper = source_class()
|
||||
search_criteria = ScraperInput(
|
||||
site_type=[source_name],
|
||||
search_term=search_term,
|
||||
results_wanted=results_wanted,
|
||||
)
|
||||
|
||||
job_response = scraper.scrape(search_criteria)
|
||||
|
||||
for job in job_response.jobs:
|
||||
# Normalize location fields
|
||||
location_city = job.location.city.strip() if job.location.city else "Unknown"
|
||||
location_state = job.location.state.strip().upper() if job.location.state else "Unknown"
|
||||
location_country = str(job.location.country) if job.location.country else "Unknown"
|
||||
|
||||
# Debug: Show all jobs being fetched
|
||||
print(f"📍 Fetched Job: {job.title} - {location_city}, {location_state}, {location_country}")
|
||||
|
||||
# Exclude jobs that don’t explicitly match the search terms
|
||||
if not any(term.lower() in job.title.lower() for term in search_terms):
|
||||
print(f"🚫 Excluding: {job.title} (Doesn't match {search_terms})")
|
||||
continue # Skip this job
|
||||
|
||||
# Ensure the job is recent
|
||||
if job.date_posted and (today - job.date_posted).days <= max_days_old:
|
||||
# Only accept jobs if they're in NY or Remote
|
||||
if location_state == target_state or job.is_remote:
|
||||
print(f"✅ MATCH: {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
all_jobs.append({
|
||||
"Job ID": job.id,
|
||||
"Job Title (Primary)": job.title,
|
||||
"Company Name": job.company_name if job.company_name else "Unknown",
|
||||
"Industry": job.company_industry if job.company_industry else "Not Provided",
|
||||
"Experience Level": job.job_level if job.job_level else "Not Provided",
|
||||
"Job Type": job.job_type[0].name if job.job_type else "Not Provided",
|
||||
"Is Remote": job.is_remote,
|
||||
"Currency": job.compensation.currency if job.compensation else "",
|
||||
"Salary Min": job.compensation.min_amount if job.compensation else "",
|
||||
"Salary Max": job.compensation.max_amount if job.compensation else "",
|
||||
"Date Posted": job.date_posted.strftime("%Y-%m-%d") if job.date_posted else "Not Provided",
|
||||
"Location City": location_city,
|
||||
"Location State": location_state,
|
||||
"Location Country": location_country,
|
||||
"Job URL": job.job_url,
|
||||
"Job Description": job.description.replace(",", "") if job.description else "No description available",
|
||||
"Job Source": source_name
|
||||
})
|
||||
else:
|
||||
print(f"❌ Ignored (Wrong State): {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
else:
|
||||
print(f"⏳ Ignored (Too Old): {job.title} - {location_city}, {location_state} (Posted {job.date_posted})")
|
||||
|
||||
print(f"\n✅ {len(all_jobs)} jobs retrieved in NY")
|
||||
return all_jobs
|
||||
|
||||
|
||||
def save_jobs_to_csv(jobs, filename="jobspy_output.csv"):
|
||||
"""Save job data to a CSV file with custom formatting:
|
||||
- Fields within a record are separated by the custom delimiter |~|
|
||||
- Records are separated by a comma
|
||||
- All commas in field values are removed
|
||||
- Blank fields are replaced with 'Not Provided'
|
||||
"""
|
||||
if not jobs:
|
||||
print("⚠️ No jobs found matching criteria.")
|
||||
return
|
||||
|
||||
# Remove old CSV file before writing
|
||||
if os.path.exists(filename):
|
||||
os.remove(filename)
|
||||
|
||||
fieldnames = [
|
||||
"Job ID", "Job Title (Primary)", "Company Name", "Industry",
|
||||
"Experience Level", "Job Type", "Is Remote", "Currency",
|
||||
"Salary Min", "Salary Max", "Date Posted", "Location City",
|
||||
"Location State", "Location Country", "Job URL", "Job Description",
|
||||
"Job Source"
|
||||
]
|
||||
|
||||
# Build header record using custom field delimiter
|
||||
header_record = "|~|".join(fieldnames)
|
||||
records = [header_record]
|
||||
|
||||
for job in jobs:
|
||||
row = []
|
||||
for field in fieldnames:
|
||||
value = str(job.get(field, "")).strip()
|
||||
if not value:
|
||||
value = "Not Provided"
|
||||
# Remove all commas from the value
|
||||
value = value.replace(",", "")
|
||||
row.append(value)
|
||||
# Join fields with the custom delimiter
|
||||
record = "|~|".join(row)
|
||||
records.append(record)
|
||||
|
||||
# Join records with a comma as the record separator
|
||||
output = ",".join(records)
|
||||
with open(filename, "w", encoding="utf-8") as file:
|
||||
file.write(output)
|
||||
|
||||
print(f"✅ Jobs saved to {filename} ({len(jobs)} entries)")
|
||||
|
||||
|
||||
# Run the scraper with multiple job searches
|
||||
job_data = scrape_jobs(
|
||||
search_terms=search_terms,
|
||||
results_wanted=results_wanted,
|
||||
max_days_old=max_days_old,
|
||||
target_state=target_state
|
||||
)
|
||||
|
||||
# Save results to CSV with custom formatting
|
||||
save_jobs_to_csv(job_data)
|
||||
202
jobspy/__init__.py
Normal file
202
jobspy/__init__.py
Normal file
@@ -0,0 +1,202 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Tuple
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from jobspy.bayt import BaytScraper
|
||||
from jobspy.glassdoor import Glassdoor
|
||||
from jobspy.google import Google
|
||||
from jobspy.indeed import Indeed
|
||||
from jobspy.linkedin import LinkedIn
|
||||
from jobspy.model import JobType, Location, JobResponse, Country
|
||||
from jobspy.model import SalarySource, ScraperInput, Site
|
||||
from jobspy.util import (
|
||||
set_logger_level,
|
||||
extract_salary,
|
||||
create_logger,
|
||||
get_enum_from_value,
|
||||
map_str_to_site,
|
||||
convert_to_annual,
|
||||
desired_order,
|
||||
)
|
||||
from jobspy.ziprecruiter import ZipRecruiter
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||
search_term: str | None = None,
|
||||
google_search_term: str | None = None,
|
||||
location: str | None = None,
|
||||
distance: int | None = 50,
|
||||
is_remote: bool = False,
|
||||
job_type: str | None = None,
|
||||
easy_apply: bool | None = None,
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
proxies: list[str] | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
description_format: str = "markdown",
|
||||
linkedin_fetch_description: bool | None = False,
|
||||
linkedin_company_ids: list[int] | None = None,
|
||||
offset: int | None = 0,
|
||||
hours_old: int = None,
|
||||
enforce_annual_salary: bool = False,
|
||||
verbose: int = 0,
|
||||
**kwargs,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Scrapes job data from job boards concurrently
|
||||
:return: Pandas DataFrame containing job data
|
||||
"""
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedIn,
|
||||
Site.INDEED: Indeed,
|
||||
Site.ZIP_RECRUITER: ZipRecruiter,
|
||||
Site.GLASSDOOR: Glassdoor,
|
||||
Site.GOOGLE: Google,
|
||||
Site.BAYT: BaytScraper,
|
||||
}
|
||||
set_logger_level(verbose)
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
def get_site_type():
|
||||
site_types = list(Site)
|
||||
if isinstance(site_name, str):
|
||||
site_types = [map_str_to_site(site_name)]
|
||||
elif isinstance(site_name, Site):
|
||||
site_types = [site_name]
|
||||
elif isinstance(site_name, list):
|
||||
site_types = [
|
||||
map_str_to_site(site) if isinstance(site, str) else site
|
||||
for site in site_name
|
||||
]
|
||||
return site_types
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
site_type=get_site_type(),
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
google_search_term=google_search_term,
|
||||
location=location,
|
||||
distance=distance,
|
||||
is_remote=is_remote,
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
description_format=description_format,
|
||||
linkedin_fetch_description=linkedin_fetch_description,
|
||||
results_wanted=results_wanted,
|
||||
linkedin_company_ids=linkedin_company_ids,
|
||||
offset=offset,
|
||||
hours_old=hours_old,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxies=proxies, ca_cert=ca_cert)
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
cap_name = site.value.capitalize()
|
||||
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||
create_logger(site_name).info(f"finished scraping")
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
def worker(site):
|
||||
site_val, scraped_info = scrape_site(site)
|
||||
return site_val, scraped_info
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: list[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
job_data = job.dict()
|
||||
job_url = job_data["job_url"]
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||
if job_data["job_type"]
|
||||
else None
|
||||
)
|
||||
job_data["emails"] = (
|
||||
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||
)
|
||||
if job_data["location"]:
|
||||
job_data["location"] = Location(
|
||||
**job_data["location"]
|
||||
).display_location()
|
||||
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||
if enforce_annual_salary and (
|
||||
job_data["interval"]
|
||||
and job_data["interval"] != "yearly"
|
||||
and job_data["min_amount"]
|
||||
and job_data["max_amount"]
|
||||
):
|
||||
convert_to_annual(job_data)
|
||||
|
||||
else:
|
||||
if country_enum == Country.USA:
|
||||
(
|
||||
job_data["interval"],
|
||||
job_data["min_amount"],
|
||||
job_data["max_amount"],
|
||||
job_data["currency"],
|
||||
) = extract_salary(
|
||||
job_data["description"],
|
||||
enforce_annual_salary=enforce_annual_salary,
|
||||
)
|
||||
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||
|
||||
job_data["salary_source"] = (
|
||||
job_data["salary_source"]
|
||||
if "min_amount" in job_data and job_data["min_amount"]
|
||||
else None
|
||||
)
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if jobs_dfs:
|
||||
# Step 1: Filter out all-NA columns from each DataFrame before concatenation
|
||||
filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
|
||||
|
||||
# Step 2: Concatenate the filtered DataFrames
|
||||
jobs_df = pd.concat(filtered_dfs, ignore_index=True)
|
||||
|
||||
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||
for column in desired_order:
|
||||
if column not in jobs_df.columns:
|
||||
jobs_df[column] = None # Add missing columns as empty
|
||||
|
||||
# Reorder the DataFrame according to the desired order
|
||||
jobs_df = jobs_df[desired_order]
|
||||
|
||||
# Step 4: Sort the DataFrame as required
|
||||
return jobs_df.sort_values(
|
||||
by=["site", "date_posted"], ascending=[True, False]
|
||||
).reset_index(drop=True)
|
||||
else:
|
||||
return pd.DataFrame()
|
||||
145
jobspy/bayt/__init__.py
Normal file
145
jobspy/bayt/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
JobResponse,
|
||||
Location,
|
||||
Country,
|
||||
)
|
||||
from jobspy.util import create_logger, create_session
|
||||
|
||||
log = create_logger("Bayt")
|
||||
|
||||
|
||||
class BaytScraper(Scraper):
|
||||
base_url = "https://www.bayt.com"
|
||||
delay = 2
|
||||
band_delay = 3
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
super().__init__(Site.BAYT, proxies=proxies, ca_cert=ca_cert)
|
||||
self.scraper_input = None
|
||||
self.session = None
|
||||
self.country = "worldwide"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
self.scraper_input = scraper_input
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
job_list: list[JobPost] = []
|
||||
page = 1
|
||||
results_wanted = (
|
||||
scraper_input.results_wanted if scraper_input.results_wanted else 10
|
||||
)
|
||||
|
||||
while len(job_list) < results_wanted:
|
||||
log.info(f"Fetching Bayt jobs page {page}")
|
||||
job_elements = self._fetch_jobs(self.scraper_input.search_term, page)
|
||||
if not job_elements:
|
||||
break
|
||||
|
||||
if job_elements:
|
||||
log.debug(
|
||||
"First job element snippet:\n" + job_elements[0].prettify()[:500]
|
||||
)
|
||||
|
||||
initial_count = len(job_list)
|
||||
for job in job_elements:
|
||||
try:
|
||||
job_post = self._extract_job_info(job)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if len(job_list) >= results_wanted:
|
||||
break
|
||||
else:
|
||||
log.debug(
|
||||
"Extraction returned None. Job snippet:\n"
|
||||
+ job.prettify()[:500]
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error extracting job info: {str(e)}")
|
||||
continue
|
||||
|
||||
if len(job_list) == initial_count:
|
||||
log.info(f"No new jobs found on page {page}. Ending pagination.")
|
||||
break
|
||||
|
||||
page += 1
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _fetch_jobs(self, query: str, page: int) -> list | None:
|
||||
"""
|
||||
Grabs the job results for the given query and page number.
|
||||
"""
|
||||
try:
|
||||
url = f"{self.base_url}/en/international/jobs/{query}-jobs/?page={page}"
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_listings = soup.find_all("li", attrs={"data-js-job": ""})
|
||||
log.debug(f"Found {len(job_listings)} job listing elements")
|
||||
return job_listings
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error fetching jobs - {str(e)}")
|
||||
return None
|
||||
|
||||
def _extract_job_info(self, job: BeautifulSoup) -> JobPost | None:
|
||||
"""
|
||||
Extracts the job information from a single job listing.
|
||||
"""
|
||||
# Find the h2 element holding the title and link (no class filtering)
|
||||
job_general_information = job.find("h2")
|
||||
if not job_general_information:
|
||||
return
|
||||
|
||||
job_title = job_general_information.get_text(strip=True)
|
||||
job_url = self._extract_job_url(job_general_information)
|
||||
if not job_url:
|
||||
return
|
||||
|
||||
# Extract company name using the original approach:
|
||||
company_tag = job.find("div", class_="t-nowrap p10l")
|
||||
company_name = (
|
||||
company_tag.find("span").get_text(strip=True)
|
||||
if company_tag and company_tag.find("span")
|
||||
else None
|
||||
)
|
||||
|
||||
# Extract location using the original approach:
|
||||
location_tag = job.find("div", class_="t-mute t-small")
|
||||
location = location_tag.get_text(strip=True) if location_tag else None
|
||||
|
||||
job_id = f"bayt-{abs(hash(job_url))}"
|
||||
location_obj = Location(
|
||||
city=location,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
return JobPost(
|
||||
id=job_id,
|
||||
title=job_title,
|
||||
company_name=company_name,
|
||||
location=location_obj,
|
||||
job_url=job_url,
|
||||
)
|
||||
|
||||
def _extract_job_url(self, job_general_information: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Pulls the job URL from the 'a' within the h2 element.
|
||||
"""
|
||||
a_tag = job_general_information.find("a")
|
||||
if a_tag and a_tag.has_attr("href"):
|
||||
return self.base_url + a_tag["href"].strip()
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
jobspy.scrapers.exceptions
|
||||
jobspy.jobboard.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains the set of Scrapers' exceptions.
|
||||
@@ -24,3 +24,13 @@ class ZipRecruiterException(Exception):
|
||||
class GlassdoorException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Glassdoor")
|
||||
|
||||
|
||||
class GoogleJobsException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Google Jobs")
|
||||
|
||||
|
||||
class BaytException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Bayt")
|
||||
320
jobspy/glassdoor/__init__.py
Normal file
320
jobspy/glassdoor/__init__.py
Normal file
@@ -0,0 +1,320 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import json
|
||||
import requests
|
||||
from typing import Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from jobspy.glassdoor.constant import fallback_token, query_template, headers
|
||||
from jobspy.glassdoor.util import (
|
||||
get_cursor_for_page,
|
||||
parse_compensation,
|
||||
parse_location,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
create_logger,
|
||||
create_session,
|
||||
markdown_converter,
|
||||
)
|
||||
from jobspy.exception import GlassdoorException
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
JobResponse,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
|
||||
log = create_logger("Glassdoor")
|
||||
|
||||
|
||||
class Glassdoor(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.base_url = None
|
||||
self.country = None
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 30
|
||||
self.max_pages = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, has_retry=True
|
||||
)
|
||||
token = self._get_csrf_token()
|
||||
headers["gd-csrf-token"] = token if token else fallback_token
|
||||
self.session.headers.update(headers)
|
||||
|
||||
location_id, location_type = self._get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
if location_type is None:
|
||||
log.error("Glassdoor: location not parsed")
|
||||
return JobResponse(jobs=[])
|
||||
job_list: list[JobPost] = []
|
||||
cursor = None
|
||||
|
||||
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||
range_end = min(tot_pages, self.max_pages + 1)
|
||||
for page in range(range_start, range_end):
|
||||
log.info(f"search page: {page} / {range_end - 1}")
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
job_list.extend(jobs)
|
||||
if not jobs or len(job_list) >= scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
break
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _fetch_jobs_page(
|
||||
self,
|
||||
scraper_input: ScraperInput,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
jobs = []
|
||||
self.scraper_input = scraper_input
|
||||
try:
|
||||
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||
response = self.session.post(
|
||||
f"{self.base_url}/graph",
|
||||
timeout_seconds=15,
|
||||
data=payload,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
exc_msg = f"bad response status code: {response.status_code}"
|
||||
raise GlassdoorException(exc_msg)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except (
|
||||
requests.exceptions.ReadTimeout,
|
||||
GlassdoorException,
|
||||
ValueError,
|
||||
Exception,
|
||||
) as e:
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
return jobs, None
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {
|
||||
executor.submit(self._process_job, job): job for job in jobs_data
|
||||
}
|
||||
for future in as_completed(future_to_job_data):
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
except Exception as exc:
|
||||
raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
|
||||
|
||||
return jobs, get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def _get_csrf_token(self):
|
||||
"""
|
||||
Fetches csrf token needed for API by visiting a generic page
|
||||
"""
|
||||
res = self.session.get(f"{self.base_url}/Job/computer-science-jobs.htm")
|
||||
pattern = r'"token":\s*"([^"]+)"'
|
||||
matches = re.findall(pattern, res.text)
|
||||
token = None
|
||||
if matches:
|
||||
token = matches[0]
|
||||
return token
|
||||
|
||||
def _process_job(self, job_data):
|
||||
"""
|
||||
Processes a single job and fetches its description.
|
||||
"""
|
||||
job_id = job_data["jobview"]["job"]["listingId"]
|
||||
job_url = f"{self.base_url}job-listing/j?jl={job_id}"
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
self.seen_urls.add(job_url)
|
||||
job = job_data["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
company_id = job_data["jobview"]["header"]["employer"]["id"]
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
|
||||
date_posted = date_diff if age_in_days is not None else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
else:
|
||||
location = parse_location(location_name)
|
||||
|
||||
compensation = parse_compensation(job["header"])
|
||||
try:
|
||||
description = self._fetch_job_description(job_id)
|
||||
except:
|
||||
description = None
|
||||
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||
company_logo = (
|
||||
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||
)
|
||||
listing_type = (
|
||||
job_data["jobview"]
|
||||
.get("header", {})
|
||||
.get("adOrderSponsorshipLevel", "")
|
||||
.lower()
|
||||
)
|
||||
return JobPost(
|
||||
id=f"gd-{job_id}",
|
||||
title=title,
|
||||
company_url=company_url if company_id else None,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
location=location,
|
||||
compensation=compensation,
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
company_logo=company_logo,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _fetch_job_description(self, job_id):
|
||||
"""
|
||||
Fetches the job description for a single job ID.
|
||||
"""
|
||||
url = f"{self.base_url}/graph"
|
||||
body = [
|
||||
{
|
||||
"operationName": "JobDetailQuery",
|
||||
"variables": {
|
||||
"jl": job_id,
|
||||
"queryString": "q",
|
||||
"pageTypeEnum": "SERP",
|
||||
},
|
||||
"query": """
|
||||
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||
jobview: jobView(
|
||||
listingId: $jl
|
||||
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||
) {
|
||||
job {
|
||||
description
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
]
|
||||
res = requests.post(url, json=body, headers=headers)
|
||||
if res.status_code != 200:
|
||||
return None
|
||||
data = res.json()[0]
|
||||
desc = data["data"]["jobview"]["job"]["description"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
desc = markdown_converter(desc)
|
||||
return desc
|
||||
|
||||
def _get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
res = self.session.get(url)
|
||||
if res.status_code != 200:
|
||||
if res.status_code == 429:
|
||||
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||
log.error(err)
|
||||
return None, None
|
||||
else:
|
||||
err = f"Glassdoor response status code {res.status_code}"
|
||||
err += f" - {res.text}"
|
||||
log.error(f"Glassdoor response status code {res.status_code}")
|
||||
return None, None
|
||||
items = res.json()
|
||||
|
||||
if not items:
|
||||
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||
location_type = items[0]["locationType"]
|
||||
if location_type == "C":
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
elif location_type == "N":
|
||||
location_type = "COUNTRY"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
def _add_payload(
|
||||
self,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
fromage = None
|
||||
if self.scraper_input.hours_old:
|
||||
fromage = max(self.scraper_input.hours_old // 24, 1)
|
||||
filter_params = []
|
||||
if self.scraper_input.easy_apply:
|
||||
filter_params.append({"filterKey": "applicationType", "values": "1"})
|
||||
if fromage:
|
||||
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
|
||||
payload = {
|
||||
"operationName": "JobSearchResultsQuery",
|
||||
"variables": {
|
||||
"excludeJobListingIds": [],
|
||||
"filterParams": filter_params,
|
||||
"keyword": self.scraper_input.search_term,
|
||||
"numJobsToShow": 30,
|
||||
"locationType": location_type,
|
||||
"locationId": int(location_id),
|
||||
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
"fromage": fromage,
|
||||
"sort": "date",
|
||||
},
|
||||
"query": query_template,
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
184
jobspy/glassdoor/constant.py
Normal file
184
jobspy/glassdoor/constant.py
Normal file
@@ -0,0 +1,184 @@
|
||||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
query_template = """
|
||||
query JobSearchResultsQuery(
|
||||
$excludeJobListingIds: [Long!],
|
||||
$keyword: String,
|
||||
$locationId: Int,
|
||||
$locationType: LocationTypeEnum,
|
||||
$numJobsToShow: Int!,
|
||||
$pageCursor: String,
|
||||
$pageNumber: Int,
|
||||
$filterParams: [FilterParams],
|
||||
$originalPageUrl: String,
|
||||
$seoFriendlyUrlInput: String,
|
||||
$parameterUrlInput: String,
|
||||
$seoUrl: Boolean
|
||||
) {
|
||||
jobListings(
|
||||
contextHolder: {
|
||||
searchParams: {
|
||||
excludeJobListingIds: $excludeJobListingIds,
|
||||
keyword: $keyword,
|
||||
locationId: $locationId,
|
||||
locationType: $locationType,
|
||||
numPerPage: $numJobsToShow,
|
||||
pageCursor: $pageCursor,
|
||||
pageNumber: $pageNumber,
|
||||
filterParams: $filterParams,
|
||||
originalPageUrl: $originalPageUrl,
|
||||
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||
parameterUrlInput: $parameterUrlInput,
|
||||
seoUrl: $seoUrl,
|
||||
searchType: SR
|
||||
}
|
||||
}
|
||||
) {
|
||||
companyFilterOptions {
|
||||
id
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
filterOptions
|
||||
indeedCtk
|
||||
jobListings {
|
||||
...JobView
|
||||
__typename
|
||||
}
|
||||
jobListingSeoLinks {
|
||||
linkItems {
|
||||
position
|
||||
url
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
jobSearchTrackingKey
|
||||
jobsPageSeoData {
|
||||
pageMetaDescription
|
||||
pageTitle
|
||||
__typename
|
||||
}
|
||||
paginationCursors {
|
||||
cursor
|
||||
pageNumber
|
||||
__typename
|
||||
}
|
||||
indexablePageForSeo
|
||||
searchResultsMetadata {
|
||||
searchCriteria {
|
||||
implicitLocation {
|
||||
id
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
keyword
|
||||
location {
|
||||
id
|
||||
shortName
|
||||
localizedShortName
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
helpCenterDomain
|
||||
helpCenterLocale
|
||||
jobSerpJobOutlook {
|
||||
occupation
|
||||
paragraph
|
||||
__typename
|
||||
}
|
||||
showMachineReadableJobs
|
||||
__typename
|
||||
}
|
||||
totalJobsCount
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
fragment JobView on JobListingSearchResult {
|
||||
jobview {
|
||||
header {
|
||||
adOrderId
|
||||
advertiserType
|
||||
adOrderSponsorshipLevel
|
||||
ageInDays
|
||||
divisionEmployerName
|
||||
easyApply
|
||||
employer {
|
||||
id
|
||||
name
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
employerNameFromSearch
|
||||
goc
|
||||
gocConfidence
|
||||
gocId
|
||||
jobCountryId
|
||||
jobLink
|
||||
jobResultTrackingKey
|
||||
jobTitleText
|
||||
locationName
|
||||
locationType
|
||||
locId
|
||||
needsCommission
|
||||
payCurrency
|
||||
payPeriod
|
||||
payPeriodAdjustedPay {
|
||||
p10
|
||||
p50
|
||||
p90
|
||||
__typename
|
||||
}
|
||||
rating
|
||||
salarySource
|
||||
savedJobId
|
||||
sponsored
|
||||
__typename
|
||||
}
|
||||
job {
|
||||
description
|
||||
importConfigId
|
||||
jobTitleId
|
||||
jobTitleText
|
||||
listingId
|
||||
__typename
|
||||
}
|
||||
jobListingAdminDetails {
|
||||
cpcVal
|
||||
importConfigId
|
||||
jobListingId
|
||||
jobSourceId
|
||||
userEligibleForAdminJobDetails
|
||||
__typename
|
||||
}
|
||||
overview {
|
||||
shortName
|
||||
squareLogoUrl
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
"""
|
||||
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||
42
jobspy/glassdoor/util.py
Normal file
42
jobspy/glassdoor/util.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from jobspy.model import Compensation, CompensationInterval, Location, JobType
|
||||
|
||||
|
||||
def parse_compensation(data: dict) -> Compensation | None:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
202
jobspy/google/__init__.py
Normal file
202
jobspy/google/__init__.py
Normal file
@@ -0,0 +1,202 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import re
|
||||
import json
|
||||
from typing import Tuple
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from jobspy.google.constant import headers_jobs, headers_initial, async_param
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
JobResponse,
|
||||
Location,
|
||||
JobType,
|
||||
)
|
||||
from jobspy.util import extract_emails_from_text, extract_job_type, create_session
|
||||
from jobspy.google.util import log, find_job_info_initial_page, find_job_info
|
||||
|
||||
|
||||
class Google(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes Google Scraper with the Goodle jobs search url
|
||||
"""
|
||||
site = Site(Site.GOOGLE)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.country = None
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 10
|
||||
self.seen_urls = set()
|
||||
self.url = "https://www.google.com/search"
|
||||
self.jobs_url = "https://www.google.com/async/callback:550"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Google for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
forward_cursor, job_list = self._get_initial_cursor_and_jobs()
|
||||
if forward_cursor is None:
|
||||
log.warning(
|
||||
"initial cursor not found, try changing your query or there was at most 10 results"
|
||||
)
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
page = 1
|
||||
|
||||
while (
|
||||
len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset
|
||||
and forward_cursor
|
||||
):
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
try:
|
||||
jobs, forward_cursor = self._get_jobs_next_page(forward_cursor)
|
||||
except Exception as e:
|
||||
log.error(f"failed to get jobs on page: {page}, {e}")
|
||||
break
|
||||
if not jobs:
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _get_initial_cursor_and_jobs(self) -> Tuple[str, list[JobPost]]:
|
||||
"""Gets initial cursor and jobs to paginate through job listings"""
|
||||
query = f"{self.scraper_input.search_term} jobs"
|
||||
|
||||
def get_time_range(hours_old):
|
||||
if hours_old <= 24:
|
||||
return "since yesterday"
|
||||
elif hours_old <= 72:
|
||||
return "in the last 3 days"
|
||||
elif hours_old <= 168:
|
||||
return "in the last week"
|
||||
else:
|
||||
return "in the last month"
|
||||
|
||||
job_type_mapping = {
|
||||
JobType.FULL_TIME: "Full time",
|
||||
JobType.PART_TIME: "Part time",
|
||||
JobType.INTERNSHIP: "Internship",
|
||||
JobType.CONTRACT: "Contract",
|
||||
}
|
||||
|
||||
if self.scraper_input.job_type in job_type_mapping:
|
||||
query += f" {job_type_mapping[self.scraper_input.job_type]}"
|
||||
|
||||
if self.scraper_input.location:
|
||||
query += f" near {self.scraper_input.location}"
|
||||
|
||||
if self.scraper_input.hours_old:
|
||||
time_filter = get_time_range(self.scraper_input.hours_old)
|
||||
query += f" {time_filter}"
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
query += " remote"
|
||||
|
||||
if self.scraper_input.google_search_term:
|
||||
query = self.scraper_input.google_search_term
|
||||
|
||||
params = {"q": query, "udm": "8"}
|
||||
response = self.session.get(self.url, headers=headers_initial, params=params)
|
||||
|
||||
pattern_fc = r'<div jsname="Yust4d"[^>]+data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, response.text)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_raw = find_job_info_initial_page(response.text)
|
||||
jobs = []
|
||||
for job_raw in jobs_raw:
|
||||
job_post = self._parse_job(job_raw)
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
return data_async_fc, jobs
|
||||
|
||||
def _get_jobs_next_page(self, forward_cursor: str) -> Tuple[list[JobPost], str]:
|
||||
params = {"fc": [forward_cursor], "fcv": ["3"], "async": [async_param]}
|
||||
response = self.session.get(self.jobs_url, headers=headers_jobs, params=params)
|
||||
return self._parse_jobs(response.text)
|
||||
|
||||
def _parse_jobs(self, job_data: str) -> Tuple[list[JobPost], str]:
|
||||
"""
|
||||
Parses jobs on a page with next page cursor
|
||||
"""
|
||||
start_idx = job_data.find("[[[")
|
||||
end_idx = job_data.rindex("]]]") + 3
|
||||
s = job_data[start_idx:end_idx]
|
||||
parsed = json.loads(s)[0]
|
||||
|
||||
pattern_fc = r'data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, job_data)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_on_page = []
|
||||
for array in parsed:
|
||||
_, job_data = array
|
||||
if not job_data.startswith("[[["):
|
||||
continue
|
||||
job_d = json.loads(job_data)
|
||||
|
||||
job_info = find_job_info(job_d)
|
||||
job_post = self._parse_job(job_info)
|
||||
if job_post:
|
||||
jobs_on_page.append(job_post)
|
||||
return jobs_on_page, data_async_fc
|
||||
|
||||
def _parse_job(self, job_info: list):
|
||||
job_url = job_info[3][0][0] if job_info[3] and job_info[3][0] else None
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
title = job_info[0]
|
||||
company_name = job_info[1]
|
||||
location = city = job_info[2]
|
||||
state = country = date_posted = None
|
||||
if location and "," in location:
|
||||
city, state, *country = [*map(lambda x: x.strip(), location.split(","))]
|
||||
|
||||
days_ago_str = job_info[12]
|
||||
if type(days_ago_str) == str:
|
||||
match = re.search(r"\d+", days_ago_str)
|
||||
days_ago = int(match.group()) if match else None
|
||||
date_posted = (datetime.now() - timedelta(days=days_ago)).date()
|
||||
|
||||
description = job_info[19]
|
||||
|
||||
job_post = JobPost(
|
||||
id=f"go-{job_info[28]}",
|
||||
title=title,
|
||||
company_name=company_name,
|
||||
location=Location(
|
||||
city=city, state=state, country=country[0] if country else None
|
||||
),
|
||||
job_url=job_url,
|
||||
date_posted=date_posted,
|
||||
is_remote="remote" in description.lower() or "wfh" in description.lower(),
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description),
|
||||
job_type=extract_job_type(description),
|
||||
)
|
||||
return job_post
|
||||
52
jobspy/google/constant.py
Normal file
52
jobspy/google/constant.py
Normal file
@@ -0,0 +1,52 @@
|
||||
headers_initial = {
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=0, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
"x-browser-channel": "stable",
|
||||
"x-browser-copyright": "Copyright 2024 Google LLC. All rights reserved.",
|
||||
"x-browser-year": "2024",
|
||||
}
|
||||
|
||||
headers_jobs = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
async_param = "_basejs:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/am=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAACAAAoICAAAAAAAKMAfAAAAIAQAAAAAAAAAAAAACCAAAEJDAAACAAAAAGABAIAAARBAAABAAAAAgAgQAABAASKAfv8JAAABAAAAAAwAQAQACQAAAAAAcAEAQABoCAAAABAAAIABAACAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAACAQADoBwAAAAAAAAAAAAAQBAAAAATQAAoACOAHAAAAAAAAAQAAAIIAAAA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/dg=0/br=1/rs=ACT90oGxMeaFMCopIHq5tuQM-6_3M_VMjQ,_basecss:/xjs/_/ss/k=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAIAIAIAoEwCAADIC8AfsgEAawwAPkAAjgoAGAAAAAAAAEADAAAAAAIgAECHAAAAAAAAAAABAQAggAARQAAAQCEAAAAAIAAAABgAAAAAIAQIACCAAfB-AAFIQABoCEA_CgEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAAAAQEAAABAgAMCPAAA4AoE2BAEAggSAAIoAQAAAAAgAAAAACCAQAAAxEwA_ZAACAAAAAAAAAAkAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAQAEAAAAAAAAAAAAAAAAAAAAAQA/br=1/rs=ACT90oGZc36t3uUQkj0srnIvvbHjO2hgyg,_basecomb:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/ck=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAKAIAoIqEwCAADIK8AfsgEAawwAPkAAjgoAGAAACCAAAEJDAAACAAIgAGCHAIAAARBAAABBAQAggAgRQABAQSOAfv8JIAABABgAAAwAYAQICSCAAfB-cAFIQABoCEA_ChEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAACAQEDoBxAgAMCPAAA4AoE2BAEAggTQAIoASOAHAAgAAAAACSAQAIIxEwA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/d=1/ed=1/dg=0/br=1/ujg=1/rs=ACT90oFNLTjPzD_OAqhhtXwe2pg1T3WpBg,_fmt:prog,_id:fc_5FwaZ86OKsfdwN4P4La3yA4_2"
|
||||
41
jobspy/google/util.py
Normal file
41
jobspy/google/util.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import re
|
||||
|
||||
from jobspy.util import create_logger
|
||||
|
||||
log = create_logger("Google")
|
||||
|
||||
|
||||
def find_job_info(jobs_data: list | dict) -> list | None:
|
||||
"""Iterates through the JSON data to find the job listings"""
|
||||
if isinstance(jobs_data, dict):
|
||||
for key, value in jobs_data.items():
|
||||
if key == "520084652" and isinstance(value, list):
|
||||
return value
|
||||
else:
|
||||
result = find_job_info(value)
|
||||
if result:
|
||||
return result
|
||||
elif isinstance(jobs_data, list):
|
||||
for item in jobs_data:
|
||||
result = find_job_info(item)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
|
||||
def find_job_info_initial_page(html_text: str):
|
||||
pattern = f'520084652":(' + r"\[.*?\]\s*])\s*}\s*]\s*]\s*]\s*]\s*]"
|
||||
results = []
|
||||
matches = re.finditer(pattern, html_text)
|
||||
|
||||
import json
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
parsed_data = json.loads(match.group(1))
|
||||
results.append(parsed_data)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
log.error(f"Failed to parse match: {str(e)}")
|
||||
results.append({"raw_match": match.group(0), "error": str(e)})
|
||||
return results
|
||||
262
jobspy/indeed/__init__.py
Normal file
262
jobspy/indeed/__init__.py
Normal file
@@ -0,0 +1,262 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from datetime import datetime
|
||||
from typing import Tuple
|
||||
|
||||
from jobspy.indeed.constant import job_search_query, api_headers
|
||||
from jobspy.indeed.util import is_job_remote, get_compensation, get_job_type
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
DescriptionFormat,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
markdown_converter,
|
||||
create_session,
|
||||
create_logger,
|
||||
)
|
||||
|
||||
log = create_logger("Indeed")
|
||||
|
||||
|
||||
class Indeed(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed API url
|
||||
"""
|
||||
super().__init__(Site.INDEED, proxies=proxies)
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=ca_cert, is_tls=False
|
||||
)
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 100
|
||||
self.num_workers = 10
|
||||
self.seen_urls = set()
|
||||
self.headers = None
|
||||
self.api_country_code = None
|
||||
self.base_url = None
|
||||
self.api_url = "https://apis.indeed.com/graphql"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
self.headers = api_headers.copy()
|
||||
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||
job_list = []
|
||||
page = 1
|
||||
|
||||
cursor = None
|
||||
|
||||
while len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset:
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
jobs, cursor = self._scrape_page(cursor)
|
||||
if not jobs:
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param cursor:
|
||||
:return: jobs found on page, next page cursor
|
||||
"""
|
||||
jobs = []
|
||||
new_cursor = None
|
||||
filters = self._build_filters()
|
||||
search_term = (
|
||||
self.scraper_input.search_term.replace('"', '\\"')
|
||||
if self.scraper_input.search_term
|
||||
else ""
|
||||
)
|
||||
query = job_search_query.format(
|
||||
what=(f'what: "{search_term}"' if search_term else ""),
|
||||
location=(
|
||||
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||
if self.scraper_input.location
|
||||
else ""
|
||||
),
|
||||
dateOnIndeed=self.scraper_input.hours_old,
|
||||
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||
filters=filters,
|
||||
)
|
||||
payload = {
|
||||
"query": query,
|
||||
}
|
||||
api_headers_temp = api_headers.copy()
|
||||
api_headers_temp["indeed-co"] = self.api_country_code
|
||||
response = self.session.post(
|
||||
self.api_url,
|
||||
headers=api_headers_temp,
|
||||
json=payload,
|
||||
timeout=10,
|
||||
verify=False,
|
||||
)
|
||||
if not response.ok:
|
||||
log.info(
|
||||
f"responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||
)
|
||||
return jobs, new_cursor
|
||||
data = response.json()
|
||||
jobs = data["data"]["jobSearch"]["results"]
|
||||
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||
|
||||
job_list = []
|
||||
for job in jobs:
|
||||
processed_job = self._process_job(job["job"])
|
||||
if processed_job:
|
||||
job_list.append(processed_job)
|
||||
|
||||
return job_list, new_cursor
|
||||
|
||||
def _build_filters(self):
|
||||
"""
|
||||
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||
"""
|
||||
filters_str = ""
|
||||
if self.scraper_input.hours_old:
|
||||
filters_str = """
|
||||
filters: {{
|
||||
date: {{
|
||||
field: "dateOnIndeed",
|
||||
start: "{start}h"
|
||||
}}
|
||||
}}
|
||||
""".format(
|
||||
start=self.scraper_input.hours_old
|
||||
)
|
||||
elif self.scraper_input.easy_apply:
|
||||
filters_str = """
|
||||
filters: {
|
||||
keyword: {
|
||||
field: "indeedApplyScope",
|
||||
keys: ["DESKTOP"]
|
||||
}
|
||||
}
|
||||
"""
|
||||
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||
job_type_key_mapping = {
|
||||
JobType.FULL_TIME: "CF3CP",
|
||||
JobType.PART_TIME: "75GKK",
|
||||
JobType.CONTRACT: "NJXCK",
|
||||
JobType.INTERNSHIP: "VDTG7",
|
||||
}
|
||||
|
||||
keys = []
|
||||
if self.scraper_input.job_type:
|
||||
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||
keys.append(key)
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
keys.append("DSQF7")
|
||||
|
||||
if keys:
|
||||
keys_str = '", "'.join(keys)
|
||||
filters_str = f"""
|
||||
filters: {{
|
||||
composite: {{
|
||||
filters: [{{
|
||||
keyword: {{
|
||||
field: "attributes",
|
||||
keys: ["{keys_str}"]
|
||||
}}
|
||||
}}]
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
return filters_str
|
||||
|
||||
def _process_job(self, job: dict) -> JobPost | None:
|
||||
"""
|
||||
Parses the job dict into JobPost model
|
||||
:param job: dict to parse
|
||||
:return: JobPost if it's a new job
|
||||
"""
|
||||
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
description = job["description"]["html"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
description = description.replace(",", "")
|
||||
|
||||
|
||||
job_type = get_job_type(job["attributes"])
|
||||
timestamp_seconds = job["datePublished"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||
employer = job["employer"].get("dossier") if job["employer"] else None
|
||||
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||
return JobPost(
|
||||
id=f'in-{job["key"]}',
|
||||
title=job["title"],
|
||||
description=description,
|
||||
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||
company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
|
||||
company_url_direct=(
|
||||
employer["links"]["corporateWebsite"] if employer else None
|
||||
),
|
||||
location=Location(
|
||||
city=job.get("location", {}).get("city"),
|
||||
state=job.get("location", {}).get("admin1Code"),
|
||||
country=job.get("location", {}).get("countryCode"),
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=get_compensation(job["compensation"]),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_url_direct=(
|
||||
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
|
||||
),
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
is_remote=is_job_remote(job, description),
|
||||
company_addresses=(
|
||||
employer_details["addresses"][0]
|
||||
if employer_details.get("addresses")
|
||||
else None
|
||||
),
|
||||
company_industry=(
|
||||
employer_details["industry"]
|
||||
.replace("Iv1", "")
|
||||
.replace("_", " ")
|
||||
.title()
|
||||
.strip()
|
||||
if employer_details.get("industry")
|
||||
else None
|
||||
),
|
||||
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||
company_description=employer_details.get("briefDescription"),
|
||||
company_logo=(
|
||||
employer["images"].get("squareLogoUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
)
|
||||
109
jobspy/indeed/constant.py
Normal file
109
jobspy/indeed/constant.py
Normal file
@@ -0,0 +1,109 @@
|
||||
job_search_query = """
|
||||
query GetJobData {{
|
||||
jobSearch(
|
||||
{what}
|
||||
{location}
|
||||
limit: 100
|
||||
{cursor}
|
||||
sort: RELEVANCE
|
||||
{filters}
|
||||
) {{
|
||||
pageInfo {{
|
||||
nextCursor
|
||||
}}
|
||||
results {{
|
||||
trackingKey
|
||||
job {{
|
||||
source {{
|
||||
name
|
||||
}}
|
||||
key
|
||||
title
|
||||
datePublished
|
||||
dateOnIndeed
|
||||
description {{
|
||||
html
|
||||
}}
|
||||
location {{
|
||||
countryName
|
||||
countryCode
|
||||
admin1Code
|
||||
city
|
||||
postalCode
|
||||
streetAddress
|
||||
formatted {{
|
||||
short
|
||||
long
|
||||
}}
|
||||
}}
|
||||
compensation {{
|
||||
estimated {{
|
||||
currencyCode
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
currencyCode
|
||||
}}
|
||||
attributes {{
|
||||
key
|
||||
label
|
||||
}}
|
||||
employer {{
|
||||
relativeCompanyPageUrl
|
||||
name
|
||||
dossier {{
|
||||
employerDetails {{
|
||||
addresses
|
||||
industry
|
||||
employeesLocalizedLabel
|
||||
revenueLocalizedLabel
|
||||
briefDescription
|
||||
ceoName
|
||||
ceoPhotoUrl
|
||||
}}
|
||||
images {{
|
||||
headerImageUrl
|
||||
squareLogoUrl
|
||||
}}
|
||||
links {{
|
||||
corporateWebsite
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
recruit {{
|
||||
viewJobUrl
|
||||
detailedSalary
|
||||
workSchedule
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
api_headers = {
|
||||
"Host": "apis.indeed.com",
|
||||
"content-type": "application/json",
|
||||
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||
"accept": "application/json",
|
||||
"indeed-locale": "en-US",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||
}
|
||||
80
jobspy/indeed/util.py
Normal file
80
jobspy/indeed/util.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from jobspy.model import CompensationInterval, JobType, Compensation
|
||||
from jobspy.util import get_enum_from_job_type
|
||||
|
||||
|
||||
def get_job_type(attributes: list) -> list[JobType]:
|
||||
"""
|
||||
Parses the attributes to get list of job types
|
||||
:param attributes:
|
||||
:return: list of JobType
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for attribute in attributes:
|
||||
job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
|
||||
def get_compensation(compensation: dict) -> Compensation | None:
|
||||
"""
|
||||
Parses the job to get compensation
|
||||
:param sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrompensation:
|
||||
:return: compensation object
|
||||
"""
|
||||
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||
return None
|
||||
comp = (
|
||||
compensation["baseSalary"]
|
||||
if compensation["baseSalary"]
|
||||
else compensation["estimated"]["baseSalary"]
|
||||
)
|
||||
if not comp:
|
||||
return None
|
||||
interval = get_compensation_interval(comp["unitOfWork"])
|
||||
if not interval:
|
||||
return None
|
||||
min_range = comp["range"].get("min")
|
||||
max_range = comp["range"].get("max")
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=int(min_range) if min_range is not None else None,
|
||||
max_amount=int(max_range) if max_range is not None else None,
|
||||
currency=(
|
||||
compensation["estimated"]["currencyCode"]
|
||||
if compensation["estimated"]
|
||||
else compensation["currencyCode"]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def is_job_remote(job: dict, description: str) -> bool:
|
||||
"""
|
||||
Searches the description, location, and attributes to check if job is remote
|
||||
"""
|
||||
remote_keywords = ["remote", "work from home", "wfh"]
|
||||
is_remote_in_attributes = any(
|
||||
any(keyword in attr["label"].lower() for keyword in remote_keywords)
|
||||
for attr in job["attributes"]
|
||||
)
|
||||
is_remote_in_location = any(
|
||||
keyword in job["location"]["formatted"]["long"].lower()
|
||||
for keyword in remote_keywords
|
||||
)
|
||||
return is_remote_in_attributes or is_remote_in_location
|
||||
|
||||
|
||||
def get_compensation_interval(interval: str) -> CompensationInterval:
|
||||
interval_mapping = {
|
||||
"DAY": "DAILY",
|
||||
"YEAR": "YEARLY",
|
||||
"HOUR": "HOURLY",
|
||||
"WEEK": "WEEKLY",
|
||||
"MONTH": "MONTHLY",
|
||||
}
|
||||
mapped_interval = interval_mapping.get(interval.upper(), None)
|
||||
if mapped_interval and mapped_interval in CompensationInterval.__members__:
|
||||
return CompensationInterval[mapped_interval]
|
||||
else:
|
||||
raise ValueError(f"Unsupported interval: {interval}")
|
||||
337
jobspy/linkedin/__init__.py
Normal file
337
jobspy/linkedin/__init__.py
Normal file
@@ -0,0 +1,337 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from urllib.parse import urlparse, urlunparse, unquote
|
||||
|
||||
import regex as re
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
|
||||
from jobspy.exception import LinkedInException
|
||||
from jobspy.linkedin.constant import headers
|
||||
from jobspy.linkedin.util import (
|
||||
job_type_code,
|
||||
parse_job_type,
|
||||
parse_job_level,
|
||||
parse_company_industry,
|
||||
)
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
Country,
|
||||
Compensation,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
currency_parser,
|
||||
markdown_converter,
|
||||
create_session,
|
||||
remove_attributes,
|
||||
create_logger,
|
||||
)
|
||||
|
||||
log = create_logger("LinkedIn")
|
||||
|
||||
|
||||
class LinkedIn(Scraper):
|
||||
base_url = "https://www.linkedin.com"
|
||||
delay = 3
|
||||
band_delay = 4
|
||||
jobs_per_page = 25
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
super().__init__(Site.LINKEDIN, proxies=proxies, ca_cert=ca_cert)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies,
|
||||
ca_cert=ca_cert,
|
||||
is_tls=False,
|
||||
has_retry=True,
|
||||
delay=5,
|
||||
clear_cookies=True,
|
||||
)
|
||||
self.session.headers.update(headers)
|
||||
self.scraper_input = None
|
||||
self.country = "worldwide"
|
||||
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes LinkedIn for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
seen_ids = set()
|
||||
start = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||
request_count = 0
|
||||
seconds_old = (
|
||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||
)
|
||||
continue_search = (
|
||||
lambda: len(job_list) < scraper_input.results_wanted and start < 1000
|
||||
)
|
||||
while continue_search():
|
||||
request_count += 1
|
||||
log.info(
|
||||
f"search page: {request_count} / {math.ceil(scraper_input.results_wanted / 10)}"
|
||||
)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": (
|
||||
job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None
|
||||
),
|
||||
"pageNum": 0,
|
||||
"start": start,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": (
|
||||
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||
if scraper_input.linkedin_company_ids
|
||||
else None
|
||||
),
|
||||
}
|
||||
if seconds_old is not None:
|
||||
params["f_TPR"] = f"r{seconds_old}"
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
if response.status_code == 429:
|
||||
err = (
|
||||
f"429 Response - Blocked by LinkedIn for too many requests"
|
||||
)
|
||||
else:
|
||||
err = f"LinkedIn response status code {response.status_code}"
|
||||
err += f" - {response.text}"
|
||||
log.error(err)
|
||||
return JobResponse(jobs=job_list)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
log.error(f"LinkedIn: Bad proxy")
|
||||
else:
|
||||
log.error(f"LinkedIn: {str(e)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job_card in job_cards:
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
|
||||
if job_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(job_id)
|
||||
|
||||
try:
|
||||
fetch_desc = scraper_input.linkedin_fetch_description
|
||||
job_post = self._process_job(job_card, job_id, fetch_desc)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if not continue_search():
|
||||
break
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
start += len(job_list)
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _process_job(
|
||||
self, job_card: Tag, job_id: str, full_descr: bool
|
||||
) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||
|
||||
compensation = None
|
||||
if salary_tag:
|
||||
salary_text = salary_tag.get_text(separator=" ").strip()
|
||||
salary_values = [currency_parser(value) for value in salary_text.split("-")]
|
||||
salary_min = salary_values[0]
|
||||
salary_max = salary_values[1]
|
||||
currency = salary_text[0] if salary_text[0] != "$" else "USD"
|
||||
|
||||
compensation = Compensation(
|
||||
min_amount=int(salary_min),
|
||||
max_amount=int(salary_max),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company_url = (
|
||||
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||
if company_a_tag and company_a_tag.has_attr("href")
|
||||
else ""
|
||||
)
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self._get_location(metadata_card)
|
||||
|
||||
datetime_tag = (
|
||||
metadata_card.find("time", class_="job-search-card__listdate")
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except:
|
||||
date_posted = None
|
||||
job_details = {}
|
||||
if full_descr:
|
||||
job_details = self._get_job_details(job_id)
|
||||
description = description.replace(",", "")
|
||||
|
||||
|
||||
return JobPost(
|
||||
id=f"li-{job_id}",
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||
compensation=compensation,
|
||||
job_type=job_details.get("job_type"),
|
||||
job_level=job_details.get("job_level", "").lower(),
|
||||
company_industry=job_details.get("company_industry"),
|
||||
description=job_details.get("description"),
|
||||
job_url_direct=job_details.get("job_url_direct"),
|
||||
emails=extract_emails_from_text(job_details.get("description")),
|
||||
company_logo=job_details.get("company_logo"),
|
||||
job_function=job_details.get("job_function"),
|
||||
)
|
||||
|
||||
def _get_job_details(self, job_id: str) -> dict:
|
||||
"""
|
||||
Retrieves job description and other job details by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: dict
|
||||
"""
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/jobs/view/{job_id}", timeout=5
|
||||
)
|
||||
response.raise_for_status()
|
||||
except:
|
||||
return {}
|
||||
if "linkedin.com/signup" in response.url:
|
||||
return {}
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
div_content = soup.find(
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
description = None
|
||||
if div_content is not None:
|
||||
div_content = remove_attributes(div_content)
|
||||
description = div_content.prettify(formatter="html")
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
|
||||
h3_tag = soup.find(
|
||||
"h3", text=lambda text: text and "Job function" in text.strip()
|
||||
)
|
||||
|
||||
job_function = None
|
||||
if h3_tag:
|
||||
job_function_span = h3_tag.find_next(
|
||||
"span", class_="description__job-criteria-text"
|
||||
)
|
||||
if job_function_span:
|
||||
job_function = job_function_span.text.strip()
|
||||
|
||||
company_logo = (
|
||||
logo_image.get("data-delayed-url")
|
||||
if (logo_image := soup.find("img", {"class": "artdeco-entity-image"}))
|
||||
else None
|
||||
)
|
||||
return {
|
||||
"description": description,
|
||||
"job_level": parse_job_level(soup),
|
||||
"company_industry": parse_company_industry(soup),
|
||||
"job_type": parse_job_type(soup),
|
||||
"job_url_direct": self._parse_job_url_direct(soup),
|
||||
"company_logo": company_logo,
|
||||
"job_function": job_function,
|
||||
}
|
||||
|
||||
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
Extracts the location data from the job metadata card.
|
||||
:param metadata_card
|
||||
:return: location
|
||||
"""
|
||||
location = Location(country=Country.from_string(self.country))
|
||||
if metadata_card is not None:
|
||||
location_tag = metadata_card.find(
|
||||
"span", class_="job-search-card__location"
|
||||
)
|
||||
location_string = location_tag.text.strip() if location_tag else "N/A"
|
||||
parts = location_string.split(", ")
|
||||
if len(parts) == 2:
|
||||
city, state = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
country = Country.from_string(country)
|
||||
location = Location(city=city, state=state, country=country)
|
||||
return location
|
||||
|
||||
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job url direct from job page
|
||||
:param soup:
|
||||
:return: str
|
||||
"""
|
||||
job_url_direct = None
|
||||
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||
if job_url_direct_content:
|
||||
job_url_direct_match = self.job_url_direct_regex.search(
|
||||
job_url_direct_content.decode_contents().strip()
|
||||
)
|
||||
if job_url_direct_match:
|
||||
job_url_direct = unquote(job_url_direct_match.group())
|
||||
|
||||
return job_url_direct
|
||||
8
jobspy/linkedin/constant.py
Normal file
8
jobspy/linkedin/constant.py
Normal file
@@ -0,0 +1,8 @@
|
||||
headers = {
|
||||
"authority": "www.linkedin.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
}
|
||||
85
jobspy/linkedin/util.py
Normal file
85
jobspy/linkedin/util.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.model import JobType
|
||||
from jobspy.util import get_enum_from_job_type
|
||||
|
||||
|
||||
def job_type_code(job_type_enum: JobType) -> str:
|
||||
return {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
JobType.INTERNSHIP: "I",
|
||||
JobType.CONTRACT: "C",
|
||||
JobType.TEMPORARY: "T",
|
||||
}.get(job_type_enum, "")
|
||||
|
||||
|
||||
def parse_job_type(soup_job_type: BeautifulSoup) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
)
|
||||
employment_type = None
|
||||
if h3_tag:
|
||||
employment_type_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if employment_type_span:
|
||||
employment_type = employment_type_span.get_text(strip=True)
|
||||
employment_type = employment_type.lower()
|
||||
employment_type = employment_type.replace("-", "")
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
|
||||
def parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job level from job page
|
||||
:param soup_job_level:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_job_level.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Seniority level" in text,
|
||||
)
|
||||
job_level = None
|
||||
if h3_tag:
|
||||
job_level_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if job_level_span:
|
||||
job_level = job_level_span.get_text(strip=True)
|
||||
|
||||
return job_level
|
||||
|
||||
|
||||
def parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the company industry from job page
|
||||
:param soup_industry:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_industry.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Industries" in text,
|
||||
)
|
||||
industry = None
|
||||
if h3_tag:
|
||||
industry_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if industry_span:
|
||||
industry = industry_span.get_text(strip=True)
|
||||
|
||||
return industry
|
||||
@@ -1,3 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from enum import Enum
|
||||
@@ -57,7 +60,7 @@ class JobType(Enum):
|
||||
class Country(Enum):
|
||||
"""
|
||||
Gets the subdomain for Indeed and Glassdoor.
|
||||
The second item in the tuple is the subdomain for Indeed
|
||||
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||
"""
|
||||
|
||||
@@ -66,16 +69,20 @@ class Country(Enum):
|
||||
AUSTRIA = ("austria", "at", "at")
|
||||
BAHRAIN = ("bahrain", "bh")
|
||||
BELGIUM = ("belgium", "be", "fr:be")
|
||||
BULGARIA = ("bulgaria", "bg")
|
||||
BRAZIL = ("brazil", "br", "com.br")
|
||||
CANADA = ("canada", "ca", "ca")
|
||||
CHILE = ("chile", "cl")
|
||||
CHINA = ("china", "cn")
|
||||
COLOMBIA = ("colombia", "co")
|
||||
COSTARICA = ("costa rica", "cr")
|
||||
CROATIA = ("croatia", "hr")
|
||||
CYPRUS = ("cyprus", "cy")
|
||||
CZECHREPUBLIC = ("czech republic,czechia", "cz")
|
||||
DENMARK = ("denmark", "dk")
|
||||
ECUADOR = ("ecuador", "ec")
|
||||
EGYPT = ("egypt", "eg")
|
||||
ESTONIA = ("estonia", "ee")
|
||||
FINLAND = ("finland", "fi")
|
||||
FRANCE = ("france", "fr", "fr")
|
||||
GERMANY = ("germany", "de", "de")
|
||||
@@ -89,8 +96,11 @@ class Country(Enum):
|
||||
ITALY = ("italy", "it", "it")
|
||||
JAPAN = ("japan", "jp")
|
||||
KUWAIT = ("kuwait", "kw")
|
||||
LATVIA = ("latvia", "lv")
|
||||
LITHUANIA = ("lithuania", "lt")
|
||||
LUXEMBOURG = ("luxembourg", "lu")
|
||||
MALAYSIA = ("malaysia", "malaysia")
|
||||
MALAYSIA = ("malaysia", "malaysia:my", "com")
|
||||
MALTA = ("malta", "malta:mt", "mt")
|
||||
MEXICO = ("mexico", "mx", "com.mx")
|
||||
MOROCCO = ("morocco", "ma")
|
||||
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||
@@ -108,6 +118,8 @@ class Country(Enum):
|
||||
ROMANIA = ("romania", "ro")
|
||||
SAUDIARABIA = ("saudi arabia", "sa")
|
||||
SINGAPORE = ("singapore", "sg", "sg")
|
||||
SLOVAKIA = ("slovakia", "sk")
|
||||
SLOVENIA = ("slovenia", "sl")
|
||||
SOUTHAFRICA = ("south africa", "za")
|
||||
SOUTHKOREA = ("south korea", "kr")
|
||||
SPAIN = ("spain", "es", "es")
|
||||
@@ -115,14 +127,14 @@ class Country(Enum):
|
||||
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||
TAIWAN = ("taiwan", "tw")
|
||||
THAILAND = ("thailand", "th")
|
||||
TURKEY = ("turkey", "tr")
|
||||
TURKEY = ("türkiye,turkey", "tr")
|
||||
UKRAINE = ("ukraine", "ua")
|
||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||
UK = ("uk,united kingdom", "uk", "co.uk")
|
||||
USA = ("usa,us,united states", "www", "com")
|
||||
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||
USA = ("usa,us,united states", "www:us", "com")
|
||||
URUGUAY = ("uruguay", "uy")
|
||||
VENEZUELA = ("venezuela", "ve")
|
||||
VIETNAM = ("vietnam", "vn")
|
||||
VIETNAM = ("vietnam", "vn", "com")
|
||||
|
||||
# internal for ziprecruiter
|
||||
US_CANADA = ("usa/ca", "www")
|
||||
@@ -132,7 +144,10 @@ class Country(Enum):
|
||||
|
||||
@property
|
||||
def indeed_domain_value(self):
|
||||
return self.value[1]
|
||||
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||
if subdomain and api_country_code:
|
||||
return subdomain, api_country_code.upper()
|
||||
return self.value[1], self.value[1].upper()
|
||||
|
||||
@property
|
||||
def glassdoor_domain_value(self):
|
||||
@@ -145,7 +160,7 @@ class Country(Enum):
|
||||
else:
|
||||
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||
|
||||
def get_url(self):
|
||||
def get_glassdoor_url(self):
|
||||
return f"https://{self.glassdoor_domain_value}/"
|
||||
|
||||
@classmethod
|
||||
@@ -153,7 +168,7 @@ class Country(Enum):
|
||||
"""Convert a string to the corresponding Country enum."""
|
||||
country_str = country_str.strip().lower()
|
||||
for country in cls:
|
||||
country_names = country.value[0].split(',')
|
||||
country_names = country.value[0].split(",")
|
||||
if country_str in country_names:
|
||||
return country
|
||||
valid_countries = [country.value for country in cls]
|
||||
@@ -163,7 +178,7 @@ class Country(Enum):
|
||||
|
||||
|
||||
class Location(BaseModel):
|
||||
country: Country | None = None
|
||||
country: Country | str | None = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
|
||||
@@ -173,7 +188,12 @@ class Location(BaseModel):
|
||||
location_parts.append(self.city)
|
||||
if self.state:
|
||||
location_parts.append(self.state)
|
||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
||||
if isinstance(self.country, str):
|
||||
location_parts.append(self.country)
|
||||
elif self.country and self.country not in (
|
||||
Country.US_CANADA,
|
||||
Country.WORLDWIDE,
|
||||
):
|
||||
country_name = self.country.value[0]
|
||||
if "," in country_name:
|
||||
country_name = country_name.split(",")[0]
|
||||
@@ -193,34 +213,110 @@ class CompensationInterval(Enum):
|
||||
|
||||
@classmethod
|
||||
def get_interval(cls, pay_period):
|
||||
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||
interval_mapping = {
|
||||
"YEAR": cls.YEARLY,
|
||||
"HOUR": cls.HOURLY,
|
||||
}
|
||||
if pay_period in interval_mapping:
|
||||
return interval_mapping[pay_period].value
|
||||
else:
|
||||
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||
|
||||
|
||||
class Compensation(BaseModel):
|
||||
interval: Optional[CompensationInterval] = None
|
||||
min_amount: int | None = None
|
||||
max_amount: int | None = None
|
||||
min_amount: float | None = None
|
||||
max_amount: float | None = None
|
||||
currency: Optional[str] = "USD"
|
||||
|
||||
|
||||
class DescriptionFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
|
||||
|
||||
class JobPost(BaseModel):
|
||||
id: str | None = None
|
||||
title: str
|
||||
company_name: str
|
||||
company_name: str | None
|
||||
job_url: str
|
||||
job_url_direct: str | None = None
|
||||
location: Optional[Location]
|
||||
|
||||
description: str | None = None
|
||||
company_url: str | None = None
|
||||
company_url_direct: str | None = None
|
||||
|
||||
job_type: list[JobType] | None = None
|
||||
compensation: Compensation | None = None
|
||||
date_posted: date | None = None
|
||||
benefits: str | None = None
|
||||
emails: list[str] | None = None
|
||||
num_urgent_words: int | None = None
|
||||
is_remote: bool | None = None
|
||||
# company_industry: str | None = None
|
||||
listing_type: str | None = None
|
||||
|
||||
# linkedin specific
|
||||
job_level: str | None = None
|
||||
|
||||
# linkedin and indeed specific
|
||||
company_industry: str | None = None
|
||||
|
||||
# indeed specific
|
||||
company_addresses: str | None = None
|
||||
company_num_employees: str | None = None
|
||||
company_revenue: str | None = None
|
||||
company_description: str | None = None
|
||||
company_logo: str | None = None
|
||||
banner_photo_url: str | None = None
|
||||
|
||||
# linkedin only atm
|
||||
job_function: str | None = None
|
||||
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
jobs: list[JobPost] = []
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
GOOGLE = "google"
|
||||
BAYT = "bayt"
|
||||
|
||||
|
||||
class SalarySource(Enum):
|
||||
DIRECT_DATA = "direct_data"
|
||||
DESCRIPTION = "description"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: list[Site]
|
||||
search_term: str | None = None
|
||||
google_search_term: str | None = None
|
||||
|
||||
location: str | None = None
|
||||
country: Country | None = Country.USA
|
||||
distance: int | None = None
|
||||
is_remote: bool = False
|
||||
job_type: JobType | None = None
|
||||
easy_apply: bool | None = None
|
||||
offset: int = 0
|
||||
linkedin_fetch_description: bool = False
|
||||
linkedin_company_ids: list[int] | None = None
|
||||
description_format: DescriptionFormat | None = DescriptionFormat.MARKDOWN
|
||||
|
||||
results_wanted: int = 15
|
||||
hours_old: int | None = None
|
||||
|
||||
|
||||
class Scraper(ABC):
|
||||
def __init__(
|
||||
self, site: Site, proxies: list[str] | None = None, ca_cert: str | None = None
|
||||
):
|
||||
self.site = site
|
||||
self.proxies = proxies
|
||||
self.ca_cert = ca_cert
|
||||
|
||||
@abstractmethod
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||
347
jobspy/util.py
Normal file
347
jobspy/util.py
Normal file
@@ -0,0 +1,347 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from itertools import cycle
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
import tls_client
|
||||
import urllib3
|
||||
from markdownify import markdownify as md
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from jobspy.model import CompensationInterval, JobType, Site
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
def create_logger(name: str):
|
||||
logger = logging.getLogger(f"JobSpy:{name}")
|
||||
logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
||||
formatter = logging.Formatter(format)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
return logger
|
||||
|
||||
|
||||
class RotatingProxySession:
|
||||
def __init__(self, proxies=None):
|
||||
if isinstance(proxies, str):
|
||||
self.proxy_cycle = cycle([self.format_proxy(proxies)])
|
||||
elif isinstance(proxies, list):
|
||||
self.proxy_cycle = (
|
||||
cycle([self.format_proxy(proxy) for proxy in proxies])
|
||||
if proxies
|
||||
else None
|
||||
)
|
||||
else:
|
||||
self.proxy_cycle = None
|
||||
|
||||
@staticmethod
|
||||
def format_proxy(proxy):
|
||||
"""Utility method to format a proxy string into a dictionary."""
|
||||
if proxy.startswith("http://") or proxy.startswith("https://"):
|
||||
return {"http": proxy, "https": proxy}
|
||||
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
|
||||
|
||||
|
||||
class RequestsRotating(RotatingProxySession, requests.Session):
|
||||
|
||||
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
|
||||
RotatingProxySession.__init__(self, proxies=proxies)
|
||||
requests.Session.__init__(self)
|
||||
self.clear_cookies = clear_cookies
|
||||
self.allow_redirects = True
|
||||
self.setup_session(has_retry, delay)
|
||||
|
||||
def setup_session(self, has_retry, delay):
|
||||
if has_retry:
|
||||
retries = Retry(
|
||||
total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay,
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
self.mount("http://", adapter)
|
||||
self.mount("https://", adapter)
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
if self.clear_cookies:
|
||||
self.cookies.clear()
|
||||
|
||||
if self.proxy_cycle:
|
||||
next_proxy = next(self.proxy_cycle)
|
||||
if next_proxy["http"] != "http://localhost":
|
||||
self.proxies = next_proxy
|
||||
else:
|
||||
self.proxies = {}
|
||||
return requests.Session.request(self, method, url, **kwargs)
|
||||
|
||||
|
||||
class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||
|
||||
def __init__(self, proxies=None):
|
||||
RotatingProxySession.__init__(self, proxies=proxies)
|
||||
tls_client.Session.__init__(self, random_tls_extension_order=True)
|
||||
|
||||
def execute_request(self, *args, **kwargs):
|
||||
if self.proxy_cycle:
|
||||
next_proxy = next(self.proxy_cycle)
|
||||
if next_proxy["http"] != "http://localhost":
|
||||
self.proxies = next_proxy
|
||||
else:
|
||||
self.proxies = {}
|
||||
response = tls_client.Session.execute_request(self, *args, **kwargs)
|
||||
response.ok = response.status_code in range(200, 400)
|
||||
return response
|
||||
|
||||
|
||||
def create_session(
|
||||
*,
|
||||
proxies: dict | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
is_tls: bool = True,
|
||||
has_retry: bool = False,
|
||||
delay: int = 1,
|
||||
clear_cookies: bool = False,
|
||||
) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
:return: A session object
|
||||
"""
|
||||
if is_tls:
|
||||
session = TLSRotating(proxies=proxies)
|
||||
else:
|
||||
session = RequestsRotating(
|
||||
proxies=proxies,
|
||||
has_retry=has_retry,
|
||||
delay=delay,
|
||||
clear_cookies=clear_cookies,
|
||||
)
|
||||
|
||||
if ca_cert:
|
||||
session.verify = ca_cert
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def set_logger_level(verbose: int):
|
||||
"""
|
||||
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||
|
||||
Parameters:
|
||||
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||
"""
|
||||
if verbose is None:
|
||||
return
|
||||
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||
level = getattr(logging, level_name.upper(), None)
|
||||
if level is not None:
|
||||
for logger_name in logging.root.manager.loggerDict:
|
||||
if logger_name.startswith("JobSpy:"):
|
||||
logging.getLogger(logger_name).setLevel(level)
|
||||
else:
|
||||
raise ValueError(f"Invalid log level: {level_name}")
|
||||
|
||||
|
||||
def markdown_converter(description_html: str):
|
||||
if description_html is None:
|
||||
return None
|
||||
markdown = md(description_html)
|
||||
return markdown.strip()
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
"""
|
||||
res = None
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
res = job_type
|
||||
return res
|
||||
|
||||
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", "", cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if "." in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif "," in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(",", "."))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
||||
|
||||
|
||||
def remove_attributes(tag):
|
||||
for attr in list(tag.attrs):
|
||||
del tag[attr]
|
||||
return tag
|
||||
|
||||
|
||||
def extract_salary(
|
||||
salary_str,
|
||||
lower_limit=1000,
|
||||
upper_limit=700000,
|
||||
hourly_threshold=350,
|
||||
monthly_threshold=30000,
|
||||
enforce_annual_salary=False,
|
||||
):
|
||||
"""
|
||||
Extracts salary information from a string and returns the salary interval, min and max salary values, and currency.
|
||||
(TODO: Needs test cases as the regex is complicated and may not cover all edge cases)
|
||||
"""
|
||||
if not salary_str:
|
||||
return None, None, None, None
|
||||
|
||||
annual_max_salary = None
|
||||
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||
|
||||
def to_int(s):
|
||||
return int(float(s.replace(",", "")))
|
||||
|
||||
def convert_hourly_to_annual(hourly_wage):
|
||||
return hourly_wage * 2080
|
||||
|
||||
def convert_monthly_to_annual(monthly_wage):
|
||||
return monthly_wage * 12
|
||||
|
||||
match = re.search(min_max_pattern, salary_str)
|
||||
|
||||
if match:
|
||||
min_salary = to_int(match.group(1))
|
||||
max_salary = to_int(match.group(3))
|
||||
# Handle 'k' suffix for min and max salaries independently
|
||||
if "k" in match.group(2).lower() or "k" in match.group(4).lower():
|
||||
min_salary *= 1000
|
||||
max_salary *= 1000
|
||||
|
||||
# Convert to annual if less than the hourly threshold
|
||||
if min_salary < hourly_threshold:
|
||||
interval = CompensationInterval.HOURLY.value
|
||||
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||
if max_salary < hourly_threshold:
|
||||
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||
|
||||
elif min_salary < monthly_threshold:
|
||||
interval = CompensationInterval.MONTHLY.value
|
||||
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||
if max_salary < monthly_threshold:
|
||||
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||
|
||||
else:
|
||||
interval = CompensationInterval.YEARLY.value
|
||||
annual_min_salary = min_salary
|
||||
annual_max_salary = max_salary
|
||||
|
||||
# Ensure salary range is within specified limits
|
||||
if not annual_max_salary:
|
||||
return None, None, None, None
|
||||
if (
|
||||
lower_limit <= annual_min_salary <= upper_limit
|
||||
and lower_limit <= annual_max_salary <= upper_limit
|
||||
and annual_min_salary < annual_max_salary
|
||||
):
|
||||
if enforce_annual_salary:
|
||||
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||
else:
|
||||
return interval, min_salary, max_salary, "USD"
|
||||
return None, None, None, None
|
||||
|
||||
|
||||
def extract_job_type(description: str):
|
||||
if not description:
|
||||
return []
|
||||
|
||||
keywords = {
|
||||
JobType.FULL_TIME: r"full\s?time",
|
||||
JobType.PART_TIME: r"part\s?time",
|
||||
JobType.INTERNSHIP: r"internship",
|
||||
JobType.CONTRACT: r"contract",
|
||||
}
|
||||
|
||||
listing_types = []
|
||||
for key, pattern in keywords.items():
|
||||
if re.search(pattern, description, re.IGNORECASE):
|
||||
listing_types.append(key)
|
||||
|
||||
return listing_types if listing_types else None
|
||||
|
||||
|
||||
def map_str_to_site(site_name: str) -> Site:
|
||||
return Site[site_name.upper()]
|
||||
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
|
||||
def convert_to_annual(job_data: dict):
|
||||
if job_data["interval"] == "hourly":
|
||||
job_data["min_amount"] *= 2080
|
||||
job_data["max_amount"] *= 2080
|
||||
if job_data["interval"] == "monthly":
|
||||
job_data["min_amount"] *= 12
|
||||
job_data["max_amount"] *= 12
|
||||
if job_data["interval"] == "weekly":
|
||||
job_data["min_amount"] *= 52
|
||||
job_data["max_amount"] *= 52
|
||||
if job_data["interval"] == "daily":
|
||||
job_data["min_amount"] *= 260
|
||||
job_data["max_amount"] *= 260
|
||||
job_data["interval"] = "yearly"
|
||||
|
||||
|
||||
desired_order = [
|
||||
"id",
|
||||
"site",
|
||||
"job_url",
|
||||
"job_url_direct",
|
||||
"title",
|
||||
"company",
|
||||
"location",
|
||||
"date_posted",
|
||||
"job_type",
|
||||
"salary_source",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"job_level",
|
||||
"job_function",
|
||||
"listing_type",
|
||||
"emails",
|
||||
"description",
|
||||
"company_industry",
|
||||
"company_url",
|
||||
"company_logo",
|
||||
"company_url_direct",
|
||||
"company_addresses",
|
||||
"company_num_employees",
|
||||
"company_revenue",
|
||||
"company_description",
|
||||
]
|
||||
219
jobspy/ziprecruiter/__init__.py
Normal file
219
jobspy/ziprecruiter/__init__.py
Normal file
@@ -0,0 +1,219 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.ziprecruiter.constant import headers, get_cookie_data
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
markdown_converter,
|
||||
remove_attributes,
|
||||
create_logger,
|
||||
)
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
Location,
|
||||
JobResponse,
|
||||
Country,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
from jobspy.ziprecruiter.util import get_job_type_enum, add_params
|
||||
|
||||
log = create_logger("ZipRecruiter")
|
||||
|
||||
|
||||
class ZipRecruiter(Scraper):
|
||||
base_url = "https://www.ziprecruiter.com"
|
||||
api_url = "https://api.ziprecruiter.com"
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||
|
||||
self.scraper_input = None
|
||||
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
|
||||
self.session.headers.update(headers)
|
||||
self._get_cookies()
|
||||
|
||||
self.delay = 5
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
log.info(f"search page: {page} / {max_pages}")
|
||||
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
else:
|
||||
break
|
||||
if not continue_token:
|
||||
break
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
|
||||
def _find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
) -> tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param continue_token:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
jobs_list = []
|
||||
params = add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
|
||||
if res.status_code not in range(200, 400):
|
||||
if res.status_code == 429:
|
||||
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||
else:
|
||||
err = f"ZipRecruiter response status code {res.status_code}"
|
||||
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||
log.error(err)
|
||||
return jobs_list, ""
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
log.error(f"Indeed: Bad proxy")
|
||||
else:
|
||||
log.error(f"Indeed: {str(e)}")
|
||||
return jobs_list, ""
|
||||
|
||||
res_data = res.json()
|
||||
jobs_list = res_data.get("jobs", [])
|
||||
next_continue_token = res_data.get("continue", None)
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self._process_job, job) for job in jobs_list]
|
||||
|
||||
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||
return job_list, next_continue_token
|
||||
|
||||
def _process_job(self, job: dict) -> JobPost | None:
|
||||
"""
|
||||
Processes an individual job dict from the response
|
||||
"""
|
||||
title = job.get("name")
|
||||
job_url = f"{self.base_url}/jobs//j?lvk={job['listing_key']}"
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
description = job.get("job_description", "").strip()
|
||||
listing_type = job.get("buyer_type", "")
|
||||
description = (
|
||||
markdown_converter(description)
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||
else description
|
||||
)
|
||||
company = job.get("hiring_company", {}).get("name")
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
location = Location(
|
||||
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||
)
|
||||
job_type = get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
|
||||
comp_interval = job.get("compensation_interval")
|
||||
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
|
||||
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||
comp_currency = job.get("compensation_currency")
|
||||
description_full, job_url_direct = self._get_descr(job_url)
|
||||
|
||||
return JobPost(
|
||||
id=f'zr-{job["listing_key"]}',
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval=comp_interval,
|
||||
min_amount=comp_min,
|
||||
max_amount=comp_max,
|
||||
currency=comp_currency,
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description_full if description_full else description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
job_url_direct=job_url_direct,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _get_descr(self, job_url):
|
||||
res = self.session.get(job_url, allow_redirects=True)
|
||||
description_full = job_url_direct = None
|
||||
if res.ok:
|
||||
soup = BeautifulSoup(res.text, "html.parser")
|
||||
job_descr_div = soup.find("div", class_="job_description")
|
||||
company_descr_section = soup.find("section", class_="company_description")
|
||||
job_description_clean = (
|
||||
remove_attributes(job_descr_div).prettify(formatter="html")
|
||||
if job_descr_div
|
||||
else ""
|
||||
)
|
||||
company_description_clean = (
|
||||
remove_attributes(company_descr_section).prettify(formatter="html")
|
||||
if company_descr_section
|
||||
else ""
|
||||
)
|
||||
description_full = job_description_clean + company_description_clean
|
||||
|
||||
try:
|
||||
script_tag = soup.find("script", type="application/json")
|
||||
if script_tag:
|
||||
job_json = json.loads(script_tag.string)
|
||||
job_url_val = job_json["model"].get("saveJobURL", "")
|
||||
m = re.search(r"job_url=(.+)", job_url_val)
|
||||
if m:
|
||||
job_url_direct = m.group(1)
|
||||
except:
|
||||
job_url_direct = None
|
||||
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description_full = markdown_converter(description_full)
|
||||
|
||||
return description_full, job_url_direct
|
||||
|
||||
def _get_cookies(self):
|
||||
"""
|
||||
Sends a session event to the API with device properties.
|
||||
"""
|
||||
url = f"{self.api_url}/jobs-app/event"
|
||||
self.session.post(url, data=get_cookie_data)
|
||||
29
jobspy/ziprecruiter/constant.py
Normal file
29
jobspy/ziprecruiter/constant.py
Normal file
@@ -0,0 +1,29 @@
|
||||
headers = {
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
|
||||
get_cookie_data = [
|
||||
("event_type", "session"),
|
||||
("logged_in", "false"),
|
||||
("number_of_retry", "1"),
|
||||
("property", "model:iPhone"),
|
||||
("property", "os:iOS"),
|
||||
("property", "locale:en_us"),
|
||||
("property", "app_build_number:4734"),
|
||||
("property", "app_version:91.0"),
|
||||
("property", "manufacturer:Apple"),
|
||||
("property", "timestamp:2025-01-12T12:04:42-06:00"),
|
||||
("property", "screen_height:852"),
|
||||
("property", "os_version:16.6.1"),
|
||||
("property", "source:install"),
|
||||
("property", "screen_width:393"),
|
||||
("property", "device_model:iPhone 14 Pro"),
|
||||
("property", "brand:Apple"),
|
||||
]
|
||||
31
jobspy/ziprecruiter/util.py
Normal file
31
jobspy/ziprecruiter/util.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from jobspy.model import JobType
|
||||
|
||||
|
||||
def add_params(scraper_input) -> dict[str, str | int]:
|
||||
params: dict[str, str | int] = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
}
|
||||
if scraper_input.hours_old:
|
||||
params["days"] = max(scraper_input.hours_old // 24, 1)
|
||||
|
||||
job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
|
||||
if scraper_input.job_type:
|
||||
job_type = scraper_input.job_type
|
||||
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
|
||||
|
||||
if scraper_input.easy_apply:
|
||||
params["zipapply"] = 1
|
||||
if scraper_input.is_remote:
|
||||
params["remote"] = 1
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
return None
|
||||
1159
jobspy_output.csv
Normal file
1159
jobspy_output.csv
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,612 @@
|
||||
Job ID|~|Job Title (Primary)|~|Company Name|~|Industry|~|Experience Level|~|Job Type|~|Is Remote|~|Currency|~|Salary Min|~|Salary Max|~|Date Posted|~|Location City|~|Location State|~|Location Country|~|Job URL|~|Job Description|~|Job Source,in-1204f360ed401e85|~|IT Support Technician – Hospitality|~|Edge Communications|~|Not Provided|~|Not Provided|~|Not Provided|~|True|~|USD|~|70000.0|~|80000.0|~|2025-04-15|~|Honolulu|~|HI|~|US|~|https://www.indeed.com/viewjob?jk=1204f360ed401e85|~|Description:
|
||||
**IT Support Technician – Hospitality**
|
||||
|
||||
**Reports to: IT Services**
|
||||
|
||||
**Location: Honolulu**
|
||||
|
||||
**Company Description**
|
||||
|
||||
|
||||
Edge provides integrated managed voice and data technology systems and services for small/medium businesses and enterprises.
|
||||
|
||||
**Position Description**
|
||||
|
||||
|
||||
As an IT Support Technician you will be part of a team of IT professionals who provide onsite \& remote support for all facets of the IT ecosystem. Our "white\-glove" 24/7 support program specializes in industries where attention to detail and timely response is mission critical. Our hospitality division caters to high\-end large\-scale boutique hotels restaurants and nightclubs whose staff and patrons expect industry\-leading support. This is a fast\-paced interactive hands\-on role where you must "dress to impress’' and give 100% daily.
|
||||
|
||||
|
||||
|
||||
As part of a team that supports multiple properties in several states we are looking for people who are self\-starters and can work remotely as well. You must manage your workload each day and be able to prioritize each task based on each unique situation. Using cutting\-edge industry remote management monitoring and access tools you will be assisted by teams in other regions and may be asked to do the same for them.
|
||||
|
||||
**Primary Responsibilities**
|
||||
|
||||
* Desktop support for hardware and software troubleshooting
|
||||
* Willingness to learn industry\-specific and proprietary management systems
|
||||
* Setup deploy and maintain end\-user equipment
|
||||
* Perform network administration functions user account permissions Active Directory changes
|
||||
* Follow up with clients to ensure resolution is complete and satisfactory
|
||||
* Maintain accurate thorough and timely information in ticketing system
|
||||
* Research and resolve problems through all IT functions
|
||||
* Collaborate with peers to form technical solutions
|
||||
* Completion of day\-to\-day help desk support requests and assigned projects that require interaction with other divisions of our company
|
||||
|
||||
|
||||
Requirements:
|
||||
**Required Skills**
|
||||
|
||||
* Ability to provide on\-site \& remote desktop support to customers.
|
||||
* Ability to use remote support tools like VNC LogMeIn RDP etc.
|
||||
* Strong troubleshooting abilities
|
||||
* Ability to use our remote management platform for workstation configuration status testing
|
||||
* Familiarity supporting (not engineering) TCP/IP cables IP phones workstation connectivity printer connectivity POS devices and Active Directory administration
|
||||
* Ability to be responsible dependable and committed to building a long\-term career at Edge Communications.
|
||||
* Being a goal\-driven team player with solid organizational skills and a keen attention to detail.
|
||||
* Independent self\-starting attitude with the willingness to share knowledge.
|
||||
* Thorough knowledge of all Windows server and desktop operating systems
|
||||
* Understanding of Hotel property management \& Point of Sale applications
|
||||
* Thorough knowledge of PC server hardware and configuration including related peripherals.
|
||||
* Thorough knowledge of Word Excel PowerPoint Outlook Active Directory and Exchange
|
||||
* Strong customer service and problem\-solving skills including the ability to provide diligent prompt and courteous responses to users’ questions or PC issues.
|
||||
* Ability to function effectively in a fast\-paced environment
|
||||
* Willingness to travel occasionally
|
||||
* Ability to multi\-task and maintain good communication is a must
|
||||
|
||||
**Desired Skills \& Experience**
|
||||
|
||||
* Five years related experience or equivalent.
|
||||
* Two years of telecommunications experience
|
||||
* Knowledge of mobile devices in an enterprise including iPads iPhones Android devices
|
||||
* Understanding of PCI compliance and certificates
|
||||
* Familiarity with Ruckus APs and Meraki APs administration
|
||||
* Understanding of IP Networking and troubleshooting
|
||||
* Familiarity with hotel applications such as: PMS\-Opera; POS\-Micros; Revenue Management\-Ideas; Building Management –HotSOS Safelock InnComm and more; Sales – Delphi/SalesForce
|
||||
* A\+ Certification
|
||||
* MCSE / MCDST / A\+ certification(s)
|
||||
* ACSP certification(s)|~|indeed,in-908e40df617013b9|~|IT Support Internship (Summer) — Lalor Family Dental|~|Lalor Family Dental|~|Not Provided|~|Not Provided|~|INTERNSHIP|~|False|~|USD|~|16.0|~|18.0|~|2025-04-15|~|Johnson City|~|NY|~|US|~|https://www.indeed.com/viewjob?jk=908e40df617013b9|~|**Join the growing team at Lalor Family Dental** a second\-generation family\-owned healthcare practice with over 60 years of experience in delivering exceptional patient care. We are seeking motivated tech\-savvy individuals for our **IT Support Internship** designed for those eager to gain real\-world IT experience in a dynamic multi\-location healthcare environment.
|
||||
|
||||
|
||||
This is a **paid summer internship** ideal for students pursuing a career in IT systems administration or healthcare technology. Whether you're exploring the field or looking to build your resume this hands\-on opportunity offers a unique blend of technical training mentorship and meaningful work.
|
||||
|
||||
**Why Intern at Lalor Family Dental?**
|
||||
|
||||
* Work in a **collaborative family\-owned healthcare practice**
|
||||
* Gain **hands\-on experience** supporting real IT systems and end\-users
|
||||
* Shadow seasoned IT professionals in a **fast\-paced healthcare environment**
|
||||
* Participate in IT projects and infrastructure design
|
||||
* Named a **Great Place to Work** and **\#18 in Fortune’s Best Workplaces in Health Care**
|
||||
* Fun company culture with **team events** and a strong focus on **work\-life balance**
|
||||
|
||||
**Key Responsibilities:**
|
||||
|
||||
* Assist with **IT support tickets** and troubleshooting of hardware/software issues
|
||||
* Shadow and support setup of **workstations mobile devices printers and medical equipment**
|
||||
* Learn and participate in **network and server maintenance**
|
||||
* Support system audits updates and performance tracking
|
||||
* Help deploy IT equipment and assist with **asset management across six locations**
|
||||
* Contribute to a **capstone project** aimed at improving IT operations
|
||||
|
||||
**Qualifications:**
|
||||
|
||||
* High school diploma or GED required
|
||||
* Currently pursuing a degree in Information Technology or related field (preferred)
|
||||
* Strong interest in **IT technology and healthcare**
|
||||
* Basic understanding of **computers networking and troubleshooting**
|
||||
* Excellent communication and problem\-solving skills
|
||||
* Ability to work independently and in a collaborative team setting
|
||||
|
||||
**Internship Benefits:**
|
||||
|
||||
* **Mentorship** from experienced IT Systems Support staff
|
||||
* **Real\-world experience** in a healthcare IT environment
|
||||
* Opportunity to develop technical communication and project management skills
|
||||
* Supportive team culture with **regular check\-ins and career development**
|
||||
* Internship completion letter and experience for **resume or school credit**
|
||||
|
||||
**Ready to Launch Your Career in IT?**
|
||||
|
||||
|
||||
Apply today to join Lalor Family Dental’s IT team and gain the hands\-on experience that will set you apart. Here your learning growth and future in tech truly matter.
|
||||
|
||||
**Lalor Family Dental is an equal\-opportunity employer** committed to creating an inclusive and diverse team environment.|~|indeed,in-4238c0f342b06c39|~|Help Desk Associate|~|Initiate Government Solutions|~|Not Provided|~|Not Provided|~|Not Provided|~|True|~|USD|~|44615.0|~|55920.0|~|2025-04-15|~|Washington|~|DC|~|US|~|https://www.indeed.com/viewjob?jk=4238c0f342b06c39|~|Description:
|
||||
|
||||
Founded in 2007 Initiate Government Solutions (IGS) a Woman Owned Small Business. We are a fully remote IT services provider that delivers innovative Enterprise IT and Health Services solutions across the federal sector. Our focus is on data analytics health informatics cloud migration and the modernization of federal information systems.
|
||||
|
||||
|
||||
|
||||
IGS uses ISO 9001:2015 20000\-1:2018 27001:2013 28001:2007 CMMI/SVC3 CMMI/DEV3 best practices and PMBOK® methods to provide clients with a strategy to build solid foundations to grow capabilities and revenue. Our range of IT services and delivery methodologies are tailored to our customers’ unique needs to achieve maximum value.
|
||||
|
||||
|
||||
IGS is currently recruiting for a **Help Desk Associate** to support the Department of Veterans Affairs.
|
||||
|
||||
**This position is pending contract award applicants will be reviewed post\-award.**
|
||||
|
||||
**Assignment of Work and Travel:**
|
||||
|
||||
|
||||
This is a remote access assignment. Candidates will work remotely daily and will remotely access VA systems and therein use approved VA provided communications systems. Travel is not required; however the candidate may be required to attend onsite client meetings as requested.
|
||||
|
||||
**Responsibilities and Duties (Included but not limited to):**
|
||||
|
||||
* Provide help desk support assistance to the established Enterprise Service Desk (ESD) for managed access
|
||||
* Log help\-desk tickets into the appropriate existing workload management tracking system
|
||||
* Respond to email and phone inquiries from the ESD Helpdesk or customer
|
||||
* Provide user training and concierge services associated with access applications by creating workflow process documents and or using MS Word PowerPoint or ad hoc
|
||||
* Assess what types of data are available in the VA and what data is being requested to ensure requestors are only requesting data that they need to perform duties
|
||||
|
||||
|
||||
Requirements:
|
||||
* Bachelor’s degree in computer science Engineering or other technical discipline. (Bachelor’s Degree \- Can be substituted for an Associate’s Degree and two (2\) additional years of relevant experience or four (4\) additional years of relevant experience and High School Diploma/GED. Associate’s degree \- Can be substituted for High School Diploma/GED and two (2\) additional year’s relevant experience.)
|
||||
* 3 years relevant experience including significant experience in an help desk environment preferably with the Dept. of Veterans Affairs
|
||||
* Must have experience in the analysis of IT business and information environment activities and events.
|
||||
* Must have experience in finding trends errors and reviewing data with report writing skills.
|
||||
* Must have reliable internet service that allows for effective telecommuting
|
||||
* Must be able to obtain and maintain a VA Public Trust clearance
|
||||
* Excellent verbal and written communication skills
|
||||
* Must be eligible to work in the United States without sponsorship due to clearance requirement
|
||||
|
||||
**Preferred Qualifications and Core Competencies:**
|
||||
|
||||
* Active VA Public Trust
|
||||
* Experience supporting Department of Veterans Affairs and/or other federal organizations
|
||||
* Prior successful experience working in a remote environment
|
||||
|
||||
**Successful IGS employees embody the following Core Values:**
|
||||
|
||||
* **Integrity Honesty and Ethics:** We conduct our business with the highest level of ethics. Doing things like being accountable for mistakes accepting helpful criticism and following through on commitments to ourselves each other and our customers.
|
||||
* **Empathy Emotional Intelligence**: How we interact with others including peers colleagues stakeholders and customers. We take collective responsibility to create an environment where colleagues and customers feel valued included and respected. We work within a diverse integrated and collaborative team to drive towards accomplishing the larger mission. We conscientiously and meticulously learn about our customers’ and end\-users’ business drivers and challenges to ensure solutions meet not only technical needs but also support their mission.
|
||||
* **Strong Work Ethic (Reliability Dedication Productivity):** We are driven by a strong self\-motivated and results\-driven work ethic. We are reliable accountable proactive and tenacious and will do what it takes to get the job done.
|
||||
* **Life\-Long Learner (Curious Perspective Goal Orientated):** We challenge ourselves to continually learn and improve ourselves. We strive to be an expert in our field continuously honing our craft and finding solutions where others see problems.
|
||||
|
||||
**Compensation:** There are a host of factors that can influence final salary including but not limited to geographic location Federal Government contract labor categories and contract wage rates relevant prior work experience specific skills and competencies education and certifications.
|
||||
|
||||
**Benefits:** Initiate Government Solutions offers competitive compensation and a robust benefits package including comprehensive medical dental and vision care matching 401K and profit sharing paid time off training time for personal development flexible spending accounts employer\-paid life insurance employer\-paid short and long term disability coverage an education assistance program with potential merit increases for obtaining a work\-related certification employee recognition and referral programs spot bonuses and other benefits that help provide financial protection for the employee and their family.
|
||||
|
||||
|
||||
|
||||
Initiate Government Solutions participates in the Electronic Employment Verification Program.|~|indeed,in-c09e1d318a6a0bdc|~|IT Help Desk Technician|~|Ramaz School|~|Not Provided|~|Not Provided|~|FULL_TIME|~|False|~|USD|~|24.0|~|27.0|~|2025-04-15|~|New York|~|NY|~|US|~|https://www.indeed.com/viewjob?jk=c09e1d318a6a0bdc|~|**About The Ramaz School:**
|
||||
|
||||
|
||||
The Ramaz School is a prestigious Jewish day school renowned for its integration of rich Jewish traditions with superior academic achievement. Located in the vibrant heart of New York City Ramaz is dedicated to nurturing individual talents fostering social responsibility and encouraging community service. We are seeking a motivated and tech\-savvy Help Desk Technician to join our IT department. This role is crucial for providing top\-notch technical support to our dynamic community of educators and students.
|
||||
|
||||
|
||||
**Position Summary:**
|
||||
|
||||
|
||||
|
||||
As a Help Desk Technician you will be the go\-to person for faculty staff and students experiencing IT\-related issues. This position plays a key role in ensuring the smooth functioning of our educational technologies and systems. You will be responsible for troubleshooting diagnosing and resolving technical problems thus ensuring minimal disruption to our educational activities. Furthermore you will assist with AV maintenance and provide support during school events guaranteeing all presentations and performances are executed flawlessly.
|
||||
|
||||
|
||||
|
||||
**Operational Hours:**
|
||||
|
||||
|
||||
|
||||
\- Monday to Friday 8 AM \- 5 PM
|
||||
|
||||
|
||||
|
||||
* Occasional evening and weekend support required for school events and critical IT needs.
|
||||
|
||||
|
||||
|
||||
**Key Responsibilities:**
|
||||
|
||||
|
||||
|
||||
* Act as the first point of contact for technical assistance via phone or in\-person.
|
||||
|
||||
|
||||
|
||||
* Troubleshoot and resolve computer software and hardware issues.
|
||||
|
||||
|
||||
|
||||
* Assist with AV system maintenance setup and troubleshooting for school events.
|
||||
|
||||
|
||||
|
||||
* Escalate unresolved issues to higher\-level IT support staff.
|
||||
|
||||
|
||||
|
||||
* Maintain detailed records of IT issues and resolutions.
|
||||
|
||||
|
||||
|
||||
* Stay updated on the latest system information changes and updates.
|
||||
|
||||
|
||||
|
||||
* Assist in the installation of new equipment and software across classrooms and administrative offices.
|
||||
|
||||
|
||||
|
||||
**Qualifications:**
|
||||
|
||||
|
||||
|
||||
* High School diploma or equivalent; a degree or enrollment in a degree program in Information Technology Computer Science or a related field is a plus.
|
||||
|
||||
|
||||
|
||||
* Knowledge of Windows/Mac OS computer systems mobile devices and AV technology.
|
||||
|
||||
|
||||
|
||||
* Ability to diagnose and troubleshoot basic technical problems effectively.
|
||||
|
||||
|
||||
|
||||
* Strong communication skills and a commitment to excellent customer service.
|
||||
|
||||
|
||||
|
||||
* Must be available to workfull\-timehours as specified including occasional evenings and weekends.
|
||||
|
||||
|
||||
|
||||
**Salary Range:**
|
||||
|
||||
|
||||
|
||||
\- $24 \- $27 per hour commensurate with experience and qualifications.
|
||||
|
||||
|
||||
|
||||
**Why Join** **The** **Ramaz School?**
|
||||
|
||||
|
||||
|
||||
* Competitive compensation within the specified salary range.
|
||||
|
||||
|
||||
|
||||
* Work in a leading educational environment that values technology and innovation.
|
||||
|
||||
|
||||
|
||||
* Opportunities for professional growth in educational technology and AV support.
|
||||
|
||||
|
||||
|
||||
* Bepartof a supportive community that promotes learning and development.|~|indeed,go-3O6aUUjO8LS9FWVJAAAAAA==|~|Help Desk / Customer Support Lead|~|Cormac|~|Not Provided|~|Not Provided|~|CONTRACT|~|True|~||~||~||~|2025-04-15|~|Leesburg|~|VA|~|Unknown|~|https://www.monster.com/job-openings/help-desk-customer-support-lead-leesburg-va--a6bfa827-0fe2-4c03-8965-704c6f205929?utm_campaign=google_jobs_apply&utm_source=google_jobs_apply&utm_medium=organic|~|Help Desk/Customer Support Lead
|
||||
|
||||
CORMAC is seeking a Help Desk/Customer Support Lead to support the Department of Health and Human Services (HHS) Office of Head Start (OHS) Aligned Monitoring System 2.0 Digital Services Platform (IT-AMS). IT-AMS is a data management system which supports an innovative comprehensive and integrated approach to recipient oversight allowing OHS to effectively gain understanding of recipient compliance identify and understand the differences in program performance among OHS programs and to ensure the effective use of federal funds. This is a Hybrid (Remote-First) role where the candidate must be local to the Washington Metropolitan area encompassing the District of Columbia Maryland and Virginia.
|
||||
|
||||
Essential Duties & Responsibilities?
|
||||
|
||||
Daily duties will vary according to project needs with job responsibilities including:?
|
||||
• Provide helpdesk support to teams using OHS monitoring systems
|
||||
• Track and analyze rising trending and high-volume Helpdesk issues to coordinate and support intuitive software enhancements and develop training for the use of those options.
|
||||
• Generate and present regular reports on Help Desk performance user satisfaction and ticket resolution metrics to stakeholders.
|
||||
• Participate in release and deployment planning to ensure Help Desk preparedness and seamless user transitions.
|
||||
• Act as the primary liaison between end users and technical teams ensuring accurate communication of user needs and system limitations.
|
||||
• Support change management and user adoption strategies for new features or updates to the system.
|
||||
• Manage a Help Desk team
|
||||
|
||||
Required Skills & Experience?
|
||||
• Bachelor s Degree or higher in Information Management Information Systems Computer Science or equivalent field.
|
||||
• Must have understanding of multi-tiered help desk operations and experience supervising a Help Desk team
|
||||
• Experience analyzing support patterns and sharing the feedback with the development team
|
||||
• Experience collaborating with the project team members to address recurring support issues via new or revised product stories and design work
|
||||
• Experience in technical support in product or project management
|
||||
• Experience with ServiceNow ticketing system for help desk operations incident tracking and change management.
|
||||
• Demonstrable experience with federal security standards (FISMA NIST SP 800-53 etc) as they relate to user access and incident handling
|
||||
• Working knowledge of RESTful API troubleshooting
|
||||
• Basic Database querying proficiency
|
||||
• Proficiency using and interpreting SLA dashboards and support metrics
|
||||
|
||||
Preferred Skills & Experience?
|
||||
• Knowledge of CLASS or other federally mandated reviewer scoring systems
|
||||
• Understanding of FedRAMP-authorized cloud environments (AWS GovCloud Azure Government)
|
||||
• Experience supporting users on data visualization platforms (e.g. Tableau or similar)
|
||||
• Experience in a federal Agile DevSecOps environment with exposure to CI/CD pipelines and cross-system API integration troubleshooting.
|
||||
|
||||
Why CORMAC??
|
||||
|
||||
At CORMAC we leverage the power of data management and analytics to enable our customers to achieve their strategic goals. With over 20 years of experience in health information technology (HIT) human-centered design principles and Agile development methodologies CORMAC delivers complex digital solutions to solve some of the most challenging problems facing public healthcare programs today.?
|
||||
|
||||
As a US Federal Government contractor in the public healthcare sector our work is impactful and cutting-edge while being performed in a supportive collaborative and welcoming environment. We offer flexible work schedules with remote hybrid or fully in-person workplace options to empower our employees to decide the workplace most suitable for them. At CORMAC we have a highly diverse workforce and believe a work environment is a place where creativity collaboration enthusiasm and innovation happen regardless of location.?
|
||||
|
||||
Position Requires Employment Eligibility Verification /E-Verify Participation/EEO?
|
||||
|
||||
As an Equal Employment Opportunity employer CORMAC provides equal employment opportunity to all employees and applicants without regard to an individual's protected status including race/ethnicity color national origin ancestry religion creed age gender gender identity/expression sexual orientation marital status parental status including pregnancy childbirth or related conditions disability military service veteran status genetic information or any other protected status.?
|
||||
|
||||
About the Company:
|
||||
Cormac|~|google,go-qzGAEQlq1-gsmD_KAAAAAA==|~|Help Desk Technician|~|LMI Consulting LLC|~|Not Provided|~|Not Provided|~|CONTRACT|~|True|~||~||~||~|2025-04-15|~|McLean|~|VA|~|Unknown|~|https://www.whatjobs.com/gfj/1934920528?utm_campaign=google_jobs_apply&utm_source=google_jobs_apply&utm_medium=organic|~|Help Desk Technician Job Locations US-Remote Job ID 2025-12517 # of Openings 2 Category Information Technology Overview
|
||||
|
||||
LMI is seeking a skilled ATIS Help Desk Technician to provide Tier 2 and Tier 3 technical support for the RFMSS (Range Facility Management Support System) and ATMC (Army Training Management Capability) applications within the Army Training Information System (ATIS). This role is ideal for individuals with strong problem-solving skills and a passion for delivering high-quality customer service while supporting mission-critical applications for the U.S. Army.
|
||||
|
||||
At LMI we're reimagining the path from insight to outcome at The New Speed of Possible. Combining a legacy of over 60 years of federal expertise with our innovation ecosystem we minimize time to value and accelerate mission success. We energize the brightest minds with emerging technologies to inspire creative solutions and push the boundaries of capability. LMI advances the pace of progress enabling our customers to thrive while adapting to evolving mission needs.
|
||||
Responsibilities Provide Tier 2 and Tier 3 technical support for RFMSS and ATMC users via phone email and ticketing systems. Troubleshoot application network and system-related issues escalating unresolved problems as necessary. Assist users with login issues password resets and account management. Document reported issues and resolutions in the ticketing system to support knowledge management. Conduct user training sessions and develop instructional materials on RFMSS and ATMC features and best practices. Collaborate with developers system administrators and cybersecurity teams to resolve recurring issues and improve system functionality. Ensure compliance with security protocols policies and guidelines related to ATIS RFMSS and ATMC operations. Participate in system updates testing and implementation efforts to minimize service disruptions. Travel required once per quarter for a four-day PI Planning event. Qualifications Associate's or Bachelor's degree in Information Technology Computer Science or a related field (or equivalent experience). 1-3 years of experience in a help desk or technical support role preferably in a Tier 2 or Tier 3 capacity. Experience supporting RFMSS ATMC or similar military training and range management systems is highly desirable. Strong troubleshooting skills and ability to communicate technical concepts to non-technical users. Familiarity with ITSM ticketing systems remote troubleshooting tools and enterprise support environments. Ability to work independently prioritize tasks and manage multiple support requests efficiently. Security+ or other relevant IT certifications are preferred. Knowledge of Army training systems DoD networks and cybersecurity best practices is a plus.
|
||||
|
||||
Disclaimer:
|
||||
|
||||
The salary range displayed represents the typical salary range for this position and is not a guarantee of compensation. Individual salaries are determined by various factors including but not limited to location internal equity business considerations client contract requirements and candidate qualifications such as education experience skills and security clearances.
|
||||
|
||||
LMI is an Equal Opportunity Employer. LMI is committed to the fair treatment of all and to our policy of providing applicants and employees with equal employment opportunities. LMI recruits hires trains and promotes people without regard to race color religion sex sexual orientation gender identity national origin pregnancy disability age protected veteran status citizenship status genetic information or any other characteristic protected by applicable federal state or local law. If you are a person with a disability needing assistance with the application process please contact
|
||||
Colorado Residents: In any materials you submit you may redact or remove age-identifying information such as age date of birth or dates of school attendance or graduation. You will not be penalized for redacting or removing this information.
|
||||
|
||||
Need help finding the right job? We can recommend jobs specifically for you! Click here to get started.|~|google,in-1204f360ed401e85|~|IT Support Technician – Hospitality|~|Edge Communications|~|Not Provided|~|Not Provided|~|Not Provided|~|True|~|USD|~|70000.0|~|80000.0|~|2025-04-15|~|Honolulu|~|HI|~|US|~|https://www.indeed.com/viewjob?jk=1204f360ed401e85|~|Description:
|
||||
**IT Support Technician – Hospitality**
|
||||
|
||||
**Reports to: IT Services**
|
||||
|
||||
**Location: Honolulu**
|
||||
|
||||
**Company Description**
|
||||
|
||||
|
||||
Edge provides integrated managed voice and data technology systems and services for small/medium businesses and enterprises.
|
||||
|
||||
**Position Description**
|
||||
|
||||
|
||||
As an IT Support Technician you will be part of a team of IT professionals who provide onsite \& remote support for all facets of the IT ecosystem. Our "white\-glove" 24/7 support program specializes in industries where attention to detail and timely response is mission critical. Our hospitality division caters to high\-end large\-scale boutique hotels restaurants and nightclubs whose staff and patrons expect industry\-leading support. This is a fast\-paced interactive hands\-on role where you must "dress to impress’' and give 100% daily.
|
||||
|
||||
|
||||
|
||||
As part of a team that supports multiple properties in several states we are looking for people who are self\-starters and can work remotely as well. You must manage your workload each day and be able to prioritize each task based on each unique situation. Using cutting\-edge industry remote management monitoring and access tools you will be assisted by teams in other regions and may be asked to do the same for them.
|
||||
|
||||
**Primary Responsibilities**
|
||||
|
||||
* Desktop support for hardware and software troubleshooting
|
||||
* Willingness to learn industry\-specific and proprietary management systems
|
||||
* Setup deploy and maintain end\-user equipment
|
||||
* Perform network administration functions user account permissions Active Directory changes
|
||||
* Follow up with clients to ensure resolution is complete and satisfactory
|
||||
* Maintain accurate thorough and timely information in ticketing system
|
||||
* Research and resolve problems through all IT functions
|
||||
* Collaborate with peers to form technical solutions
|
||||
* Completion of day\-to\-day help desk support requests and assigned projects that require interaction with other divisions of our company
|
||||
|
||||
|
||||
Requirements:
|
||||
**Required Skills**
|
||||
|
||||
* Ability to provide on\-site \& remote desktop support to customers.
|
||||
* Ability to use remote support tools like VNC LogMeIn RDP etc.
|
||||
* Strong troubleshooting abilities
|
||||
* Ability to use our remote management platform for workstation configuration status testing
|
||||
* Familiarity supporting (not engineering) TCP/IP cables IP phones workstation connectivity printer connectivity POS devices and Active Directory administration
|
||||
* Ability to be responsible dependable and committed to building a long\-term career at Edge Communications.
|
||||
* Being a goal\-driven team player with solid organizational skills and a keen attention to detail.
|
||||
* Independent self\-starting attitude with the willingness to share knowledge.
|
||||
* Thorough knowledge of all Windows server and desktop operating systems
|
||||
* Understanding of Hotel property management \& Point of Sale applications
|
||||
* Thorough knowledge of PC server hardware and configuration including related peripherals.
|
||||
* Thorough knowledge of Word Excel PowerPoint Outlook Active Directory and Exchange
|
||||
* Strong customer service and problem\-solving skills including the ability to provide diligent prompt and courteous responses to users’ questions or PC issues.
|
||||
* Ability to function effectively in a fast\-paced environment
|
||||
* Willingness to travel occasionally
|
||||
* Ability to multi\-task and maintain good communication is a must
|
||||
|
||||
**Desired Skills \& Experience**
|
||||
|
||||
* Five years related experience or equivalent.
|
||||
* Two years of telecommunications experience
|
||||
* Knowledge of mobile devices in an enterprise including iPads iPhones Android devices
|
||||
* Understanding of PCI compliance and certificates
|
||||
* Familiarity with Ruckus APs and Meraki APs administration
|
||||
* Understanding of IP Networking and troubleshooting
|
||||
* Familiarity with hotel applications such as: PMS\-Opera; POS\-Micros; Revenue Management\-Ideas; Building Management –HotSOS Safelock InnComm and more; Sales – Delphi/SalesForce
|
||||
* A\+ Certification
|
||||
* MCSE / MCDST / A\+ certification(s)
|
||||
* ACSP certification(s)|~|indeed,in-b70651ea69f7c429|~|Bi-lingual Help Desk|~|Intone Networks|~|Not Provided|~|Not Provided|~|CONTRACT|~|False|~|USD|~|53115.0|~|73952.0|~|2025-04-15|~|New York|~|NY|~|US|~|https://www.indeed.com/viewjob?jk=b70651ea69f7c429|~|Role: Bi\-lingual Help Desk Location: New York NY (Hybrid)|~|indeed,in-908e40df617013b9|~|IT Support Internship (Summer) — Lalor Family Dental|~|Lalor Family Dental|~|Not Provided|~|Not Provided|~|INTERNSHIP|~|False|~|USD|~|16.0|~|18.0|~|2025-04-15|~|Johnson City|~|NY|~|US|~|https://www.indeed.com/viewjob?jk=908e40df617013b9|~|**Join the growing team at Lalor Family Dental** a second\-generation family\-owned healthcare practice with over 60 years of experience in delivering exceptional patient care. We are seeking motivated tech\-savvy individuals for our **IT Support Internship** designed for those eager to gain real\-world IT experience in a dynamic multi\-location healthcare environment.
|
||||
|
||||
|
||||
This is a **paid summer internship** ideal for students pursuing a career in IT systems administration or healthcare technology. Whether you're exploring the field or looking to build your resume this hands\-on opportunity offers a unique blend of technical training mentorship and meaningful work.
|
||||
|
||||
**Why Intern at Lalor Family Dental?**
|
||||
|
||||
* Work in a **collaborative family\-owned healthcare practice**
|
||||
* Gain **hands\-on experience** supporting real IT systems and end\-users
|
||||
* Shadow seasoned IT professionals in a **fast\-paced healthcare environment**
|
||||
* Participate in IT projects and infrastructure design
|
||||
* Named a **Great Place to Work** and **\#18 in Fortune’s Best Workplaces in Health Care**
|
||||
* Fun company culture with **team events** and a strong focus on **work\-life balance**
|
||||
|
||||
**Key Responsibilities:**
|
||||
|
||||
* Assist with **IT support tickets** and troubleshooting of hardware/software issues
|
||||
* Shadow and support setup of **workstations mobile devices printers and medical equipment**
|
||||
* Learn and participate in **network and server maintenance**
|
||||
* Support system audits updates and performance tracking
|
||||
* Help deploy IT equipment and assist with **asset management across six locations**
|
||||
* Contribute to a **capstone project** aimed at improving IT operations
|
||||
|
||||
**Qualifications:**
|
||||
|
||||
* High school diploma or GED required
|
||||
* Currently pursuing a degree in Information Technology or related field (preferred)
|
||||
* Strong interest in **IT technology and healthcare**
|
||||
* Basic understanding of **computers networking and troubleshooting**
|
||||
* Excellent communication and problem\-solving skills
|
||||
* Ability to work independently and in a collaborative team setting
|
||||
|
||||
**Internship Benefits:**
|
||||
|
||||
* **Mentorship** from experienced IT Systems Support staff
|
||||
* **Real\-world experience** in a healthcare IT environment
|
||||
* Opportunity to develop technical communication and project management skills
|
||||
* Supportive team culture with **regular check\-ins and career development**
|
||||
* Internship completion letter and experience for **resume or school credit**
|
||||
|
||||
**Ready to Launch Your Career in IT?**
|
||||
|
||||
|
||||
Apply today to join Lalor Family Dental’s IT team and gain the hands\-on experience that will set you apart. Here your learning growth and future in tech truly matter.
|
||||
|
||||
**Lalor Family Dental is an equal\-opportunity employer** committed to creating an inclusive and diverse team environment.|~|indeed,in-4238c0f342b06c39|~|Help Desk Associate|~|Initiate Government Solutions|~|Not Provided|~|Not Provided|~|Not Provided|~|True|~|USD|~|44615.0|~|55920.0|~|2025-04-15|~|Washington|~|DC|~|US|~|https://www.indeed.com/viewjob?jk=4238c0f342b06c39|~|Description:
|
||||
|
||||
Founded in 2007 Initiate Government Solutions (IGS) a Woman Owned Small Business. We are a fully remote IT services provider that delivers innovative Enterprise IT and Health Services solutions across the federal sector. Our focus is on data analytics health informatics cloud migration and the modernization of federal information systems.
|
||||
|
||||
|
||||
|
||||
IGS uses ISO 9001:2015 20000\-1:2018 27001:2013 28001:2007 CMMI/SVC3 CMMI/DEV3 best practices and PMBOK® methods to provide clients with a strategy to build solid foundations to grow capabilities and revenue. Our range of IT services and delivery methodologies are tailored to our customers’ unique needs to achieve maximum value.
|
||||
|
||||
|
||||
IGS is currently recruiting for a **Help Desk Associate** to support the Department of Veterans Affairs.
|
||||
|
||||
**This position is pending contract award applicants will be reviewed post\-award.**
|
||||
|
||||
**Assignment of Work and Travel:**
|
||||
|
||||
|
||||
This is a remote access assignment. Candidates will work remotely daily and will remotely access VA systems and therein use approved VA provided communications systems. Travel is not required; however the candidate may be required to attend onsite client meetings as requested.
|
||||
|
||||
**Responsibilities and Duties (Included but not limited to):**
|
||||
|
||||
* Provide help desk support assistance to the established Enterprise Service Desk (ESD) for managed access
|
||||
* Log help\-desk tickets into the appropriate existing workload management tracking system
|
||||
* Respond to email and phone inquiries from the ESD Helpdesk or customer
|
||||
* Provide user training and concierge services associated with access applications by creating workflow process documents and or using MS Word PowerPoint or ad hoc
|
||||
* Assess what types of data are available in the VA and what data is being requested to ensure requestors are only requesting data that they need to perform duties
|
||||
|
||||
|
||||
Requirements:
|
||||
* Bachelor’s degree in computer science Engineering or other technical discipline. (Bachelor’s Degree \- Can be substituted for an Associate’s Degree and two (2\) additional years of relevant experience or four (4\) additional years of relevant experience and High School Diploma/GED. Associate’s degree \- Can be substituted for High School Diploma/GED and two (2\) additional year’s relevant experience.)
|
||||
* 3 years relevant experience including significant experience in an help desk environment preferably with the Dept. of Veterans Affairs
|
||||
* Must have experience in the analysis of IT business and information environment activities and events.
|
||||
* Must have experience in finding trends errors and reviewing data with report writing skills.
|
||||
* Must have reliable internet service that allows for effective telecommuting
|
||||
* Must be able to obtain and maintain a VA Public Trust clearance
|
||||
* Excellent verbal and written communication skills
|
||||
* Must be eligible to work in the United States without sponsorship due to clearance requirement
|
||||
|
||||
**Preferred Qualifications and Core Competencies:**
|
||||
|
||||
* Active VA Public Trust
|
||||
* Experience supporting Department of Veterans Affairs and/or other federal organizations
|
||||
* Prior successful experience working in a remote environment
|
||||
|
||||
**Successful IGS employees embody the following Core Values:**
|
||||
|
||||
* **Integrity Honesty and Ethics:** We conduct our business with the highest level of ethics. Doing things like being accountable for mistakes accepting helpful criticism and following through on commitments to ourselves each other and our customers.
|
||||
* **Empathy Emotional Intelligence**: How we interact with others including peers colleagues stakeholders and customers. We take collective responsibility to create an environment where colleagues and customers feel valued included and respected. We work within a diverse integrated and collaborative team to drive towards accomplishing the larger mission. We conscientiously and meticulously learn about our customers’ and end\-users’ business drivers and challenges to ensure solutions meet not only technical needs but also support their mission.
|
||||
* **Strong Work Ethic (Reliability Dedication Productivity):** We are driven by a strong self\-motivated and results\-driven work ethic. We are reliable accountable proactive and tenacious and will do what it takes to get the job done.
|
||||
* **Life\-Long Learner (Curious Perspective Goal Orientated):** We challenge ourselves to continually learn and improve ourselves. We strive to be an expert in our field continuously honing our craft and finding solutions where others see problems.
|
||||
|
||||
**Compensation:** There are a host of factors that can influence final salary including but not limited to geographic location Federal Government contract labor categories and contract wage rates relevant prior work experience specific skills and competencies education and certifications.
|
||||
|
||||
**Benefits:** Initiate Government Solutions offers competitive compensation and a robust benefits package including comprehensive medical dental and vision care matching 401K and profit sharing paid time off training time for personal development flexible spending accounts employer\-paid life insurance employer\-paid short and long term disability coverage an education assistance program with potential merit increases for obtaining a work\-related certification employee recognition and referral programs spot bonuses and other benefits that help provide financial protection for the employee and their family.
|
||||
|
||||
|
||||
|
||||
Initiate Government Solutions participates in the Electronic Employment Verification Program.|~|indeed,in-9f7403b0512eed78|~|Senior Technical Analyst Yardi Help Desk - REMOTE|~|Welltower Inc|~|Not Provided|~|Not Provided|~|FULL_TIME|~|True|~|USD|~|73744.0|~|108594.0|~|2025-04-15|~|Unknown|~|TX|~|US|~|https://www.indeed.com/viewjob?jk=9f7403b0512eed78|~|**SUMMARY**
|
||||
|
||||
|
||||
|
||||
The Senior Technical Analyst Yardi Help Desk is an experienced and dynamic team player who will be on the front line of support for stakeholders using the Yardi Senior product suite. The ideal candidate possesses the ability to work cross\-functionally be detailed\-oriented to provide advanced technical support to stakeholders troubleshooting complex issues leading escalations and ensuring efficient resolution of technical problems. The Senior Technical Analyst Help Desk will be required to work within a high demand performance driven environment that focuses on implementing scalable solutions that are aligned with the company’s overall business strategy.
|
||||
|
||||
|
||||
|
||||
|
||||
**KEY RESPONSIBILITIES**
|
||||
|
||||
|
||||
* Develops and leverages relationships with internal and external stakeholders to meet strategic business objectives
|
||||
* Provide expert\-level technical support for escalated help desk issues
|
||||
* Troubleshoot complex issues and offer solutions across different modules within the Yardi Senior product suite
|
||||
* Owns and manages high\-priority or escalated incidents to resolution ensuring that issues are tracked communicated effectively to stakeholders and resolved in a timely manner
|
||||
* Responds to inbound support requests related to the Yardi Senior product suite via help desk platform phone email or chat
|
||||
* Troubleshoot and resolve technical issues related to the platform ensuring a high level of customer satisfaction
|
||||
* Document prioritize and track all inquiries and issues in the help desk ticketing system (e.g. JIRA ServiceNow Zendesk)
|
||||
* Stays up to date on new features and product updates within the Yardi Senior product suite to maintain a high level of technical knowledge and service excellence
|
||||
* Strives to meet or exceed service level agreements (SLAs) for ticket resolution response time and customer satisfaction
|
||||
* Collaborates with internal support teams to resolve challenges
|
||||
* Understands and fosters cross\-program and cross\-functional dependencies to champion execution success and maximize value capture
|
||||
* Develops regular and thorough status communications for senior leadership and stakeholders
|
||||
* Anticipates and mitigates risks dependencies and impediments to facilitate resolutions
|
||||
|
||||
|
||||
|
||||
|
||||
**OTHER DUTIES**
|
||||
|
||||
|
||||
|
||||
Please note this job description is not designed to provide a comprehensive listing of activities duties or responsibilities that are required of this role. Duties responsibilities and activities may change at any time with or without notice.
|
||||
|
||||
|
||||
**TRAVEL**
|
||||
|
||||
|
||||
|
||||
Out\-of\-area and overnight travel should be expected as outlined in specific projects for which this role will engage.
|
||||
|
||||
|
||||
**MINIMUM REQUIREMENTS**
|
||||
|
||||
|
||||
**Skills / Specialized Knowledge:**
|
||||
|
||||
|
||||
* Ability to manage portfolios of work
|
||||
* Solid understanding of project management and agile practices with the ability to teach and coach others
|
||||
* Keen ability to engage and work with different teams
|
||||
* Strong interpersonal conflict management and communications skills
|
||||
* Effective documentation and reporting skills
|
||||
|
||||
|
||||
**Experience:**
|
||||
|
||||
|
||||
* At least 5 years of experience in technical support help desk or IT roles with at least 2 years in a senior capacity
|
||||
* Strong knowledge of the Yardi Senior product suite is highly preferred
|
||||
* Experience with help desk platforms ticketing systems and customer relationship management tools (JIRA ServiceNow Zendesk)
|
||||
* Proficient troubleshooting skills with a solid understanding of web\-based applications SaaS products and general IT systems
|
||||
* Strong knowledge and expertise with property management software (Yardi) or experience in the senior housing industry is a plus
|
||||
* Project Management and Technical Support experience
|
||||
|
||||
|
||||
**Education:**
|
||||
|
||||
|
||||
* Bachelor’s degree in computer science information technology or related field or equivalent work experience
|
||||
* Relevant certifications (ITIL Help Desk Management) are a plus
|
||||
* Agile Six Sigma or PMP certification strongly preferred
|
||||
|
||||
|
||||
Applicants must be able to pass a pre\-employment drug screen.
|
||||
|
||||
|
||||
**WHAT WE OFFER**
|
||||
|
||||
|
||||
* Competitive Base Salary \+ Annual Bonus
|
||||
* Generous Paid Time Off and Holidays
|
||||
* Employer\-matching 401(k) Program \+ Profit Sharing Program
|
||||
* Student Debt Program – we’ll contribute up to $10000 towards your student loans!
|
||||
* Tuition Assistance Program
|
||||
* Employee Stock Purchase Program – purchase shares at a 15% discount
|
||||
* Comprehensive and progressive Medical/Dental/Vision options
|
||||
* And much more! https://welltower.com/newsroom/careers/
|
||||
|
||||
|
||||
|
||||
|
||||
**ABOUT WELLTOWER**
|
||||
|
||||
|
||||
|
||||
Welltower® Inc. (NYSE: WELL) an S\&P 500 company is the world's preeminent residential wellness and healthcare infrastructure company. Our portfolio of 1500\+ Seniors and Wellness Housing communities is positioned at the intersection of housing healthcare and hospitality creating vibrant communities for mature renters and older adults in the United States United Kingdom and Canada. We also seek to support physicians in our Outpatient Medical buildings with the critical infrastructure needed to deliver quality care.
|
||||
|
||||
|
||||
|
||||
Our real estate portfolio is unmatched located in highly attractive micro\-markets with stunning built environments. Yet we are an unusual real estate organization as we view ourselves as a product company in a real estate wrapper driven by relationships and unconventional culture.
|
||||
|
||||
|
||||
|
||||
Through our disciplined approach to capital allocation powered by our data science platform and superior operating results driven by the Welltower Business System we aspire to deliver long\-term compounding of per share growth and returns for our existing investors – our North Star.
|
||||
|
||||
|
||||
|
||||
\#LI\-REMOTE
|
||||
|
||||
|
||||
Equal Opportunity Employer/Protected Veterans/Individuals with Disabilities
|
||||
|
||||
|
||||
The contractor will not discharge or in any other manner discriminate against employees or applicants because they have inquired about discussed or disclosed their own pay or the pay of another employee or applicant. However employees who have access to the compensation information of other employees or applicants as a part of their essential job functions cannot disclose the pay of other employees or applicants to individuals who do not otherwise have access to compensation information unless the disclosure is (a) in response to a formal complaint or charge (b) in furtherance of an investigation proceeding hearing or action including an investigation conducted by the employer or (c) consistent with the contractor’s legal duty to furnish information. 41 CFR 60\-1\.35(c)|~|indeed
|
||||
|
Can't render this file because it contains an unexpected character in line 16 and column 153.
|
2844
poetry.lock
generated
2844
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,33 @@
|
||||
[build-system]
|
||||
requires = [ "poetry-core",]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.39"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
version = "1.1.78"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor, ZipRecruiter & Bayt"
|
||||
authors = ["Cullen Watson <cullen@cullenwatson.com>", "Zachary Hampton <zachary@zacharysproducts.com>"]
|
||||
homepage = "https://github.com/cullenwatson/JobSpy"
|
||||
readme = "README.md"
|
||||
keywords = [ "jobs-scraper", "linkedin", "indeed", "glassdoor", "ziprecruiter", "bayt"]
|
||||
[[tool.poetry.packages]]
|
||||
include = "jobspy"
|
||||
|
||||
packages = [
|
||||
{ include = "jobspy", from = "src" }
|
||||
]
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
python = "^3.10 || ^3.12"
|
||||
requests = "^2.31.0"
|
||||
tls-client = "*"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
pandas = "^2.1.0"
|
||||
NUMPY = "1.24.2"
|
||||
NUMPY = "1.26.3"
|
||||
pydantic = "^2.3.0"
|
||||
|
||||
tls-client = "^1.0.1"
|
||||
markdownify = "^0.13.1"
|
||||
regex = "^2024.4.28"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^7.4.1"
|
||||
jupyter = "^1.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
black = "*"
|
||||
pre-commit = "*"
|
||||
|
||||
118
requirements.txt
Normal file
118
requirements.txt
Normal file
@@ -0,0 +1,118 @@
|
||||
annotated-types==0.7.0
|
||||
anyio==4.6.2.post1
|
||||
argon2-cffi==23.1.0
|
||||
argon2-cffi-bindings==21.2.0
|
||||
arrow==1.3.0
|
||||
asttokens==2.4.1
|
||||
async-lru==2.0.4
|
||||
attrs==24.2.0
|
||||
babel==2.16.0
|
||||
beautifulsoup4==4.12.3
|
||||
black==24.10.0
|
||||
bleach==6.1.0
|
||||
certifi==2024.8.30
|
||||
cffi==1.17.1
|
||||
cfgv==3.4.0
|
||||
charset-normalizer==3.4.0
|
||||
click==8.1.7
|
||||
comm==0.2.2
|
||||
debugpy==1.8.7
|
||||
decorator==5.1.1
|
||||
defusedxml==0.7.1
|
||||
distlib==0.3.9
|
||||
executing==2.1.0
|
||||
fastjsonschema==2.20.0
|
||||
filelock==3.16.1
|
||||
fqdn==1.5.1
|
||||
h11==0.14.0
|
||||
httpcore==1.0.6
|
||||
httpx==0.27.2
|
||||
identify==2.6.1
|
||||
idna==3.10
|
||||
ipykernel==6.29.5
|
||||
ipython==8.28.0
|
||||
ipywidgets==8.1.5
|
||||
isoduration==20.11.0
|
||||
jedi==0.19.1
|
||||
Jinja2==3.1.4
|
||||
json5==0.9.25
|
||||
jsonpointer==3.0.0
|
||||
jsonschema==4.23.0
|
||||
jsonschema-specifications==2024.10.1
|
||||
jupyter==1.1.1
|
||||
jupyter-console==6.6.3
|
||||
jupyter-events==0.10.0
|
||||
jupyter-lsp==2.2.5
|
||||
jupyter_client==8.6.3
|
||||
jupyter_core==5.7.2
|
||||
jupyter_server==2.14.2
|
||||
jupyter_server_terminals==0.5.3
|
||||
jupyterlab==4.2.5
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.27.3
|
||||
jupyterlab_widgets==3.0.13
|
||||
markdownify==0.13.1
|
||||
MarkupSafe==3.0.2
|
||||
matplotlib-inline==0.1.7
|
||||
mistune==3.0.2
|
||||
mypy-extensions==1.0.0
|
||||
nbclient==0.10.0
|
||||
nbconvert==7.16.4
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
nodeenv==1.9.1
|
||||
notebook==7.2.2
|
||||
notebook_shim==0.2.4
|
||||
numpy==1.26.3
|
||||
overrides==7.7.0
|
||||
packaging==24.1
|
||||
pandas==2.2.3
|
||||
pandocfilters==1.5.1
|
||||
parso==0.8.4
|
||||
pathspec==0.12.1
|
||||
pexpect==4.9.0
|
||||
platformdirs==4.3.6
|
||||
pre_commit==4.0.1
|
||||
prometheus_client==0.21.0
|
||||
prompt_toolkit==3.0.48
|
||||
psutil==6.1.0
|
||||
ptyprocess==0.7.0
|
||||
pure_eval==0.2.3
|
||||
pycparser==2.22
|
||||
pydantic==2.9.2
|
||||
pydantic_core==2.23.4
|
||||
Pygments==2.18.0
|
||||
python-dateutil==2.9.0.post0
|
||||
-e git+https://github.com/fakebranden/JobSpy@60819a8fcabbd3eaba7741b673023612dc3d3692#egg=python_jobspy
|
||||
python-json-logger==2.0.7
|
||||
pytz==2024.2
|
||||
PyYAML==6.0.2
|
||||
pyzmq==26.2.0
|
||||
referencing==0.35.1
|
||||
regex==2024.9.11
|
||||
requests==2.32.3
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rpds-py==0.20.0
|
||||
Send2Trash==1.8.3
|
||||
setuptools==75.2.0
|
||||
six==1.16.0
|
||||
sniffio==1.3.1
|
||||
soupsieve==2.6
|
||||
stack-data==0.6.3
|
||||
terminado==0.18.1
|
||||
tinycss2==1.3.0
|
||||
tls-client==1.0.1
|
||||
tornado==6.4.1
|
||||
traitlets==5.14.3
|
||||
types-python-dateutil==2.9.0.20241003
|
||||
typing_extensions==4.12.2
|
||||
tzdata==2024.2
|
||||
uri-template==1.3.0
|
||||
urllib3==2.2.3
|
||||
virtualenv==20.27.0
|
||||
wcwidth==0.2.13
|
||||
webcolors==24.8.0
|
||||
webencodings==0.5.1
|
||||
websocket-client==1.8.0
|
||||
widgetsnbextension==4.0.13
|
||||
@@ -1,186 +0,0 @@
|
||||
import pandas as pd
|
||||
import concurrent.futures
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Tuple, Optional
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.glassdoor import GlassdoorScraper
|
||||
from .scrapers.linkedin import LinkedInScraper
|
||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
||||
from .scrapers.exceptions import (
|
||||
LinkedInException,
|
||||
IndeedException,
|
||||
ZipRecruiterException,
|
||||
GlassdoorException,
|
||||
)
|
||||
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedInScraper,
|
||||
Site.INDEED: IndeedScraper,
|
||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||
Site.GLASSDOOR: GlassdoorScraper,
|
||||
}
|
||||
|
||||
|
||||
def _map_str_to_site(site_name: str) -> Site:
|
||||
return Site[site_name.upper()]
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
is_remote: bool = False,
|
||||
job_type: str = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxy: Optional[str] = None,
|
||||
full_description: Optional[bool] = False,
|
||||
offset: Optional[int] = 0,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
:return: results_wanted: pandas dataframe containing job data
|
||||
"""
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
if type(site_name) == str:
|
||||
site_type = [_map_str_to_site(site_name)]
|
||||
else: #: if type(site_name) == list
|
||||
site_type = [
|
||||
_map_str_to_site(site) if type(site) == str else site_name
|
||||
for site in site_name
|
||||
]
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
site_type=site_type,
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
location=location,
|
||||
distance=distance,
|
||||
is_remote=is_remote,
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
full_description=full_description,
|
||||
results_wanted=results_wanted,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxy=proxy)
|
||||
|
||||
try:
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
||||
raise lie
|
||||
except Exception as e:
|
||||
if site == Site.LINKEDIN:
|
||||
raise LinkedInException(str(e))
|
||||
if site == Site.INDEED:
|
||||
raise IndeedException(str(e))
|
||||
if site == Site.ZIP_RECRUITER:
|
||||
raise ZipRecruiterException(str(e))
|
||||
if site == Site.GLASSDOOR:
|
||||
raise GlassdoorException(str(e))
|
||||
else:
|
||||
raise e
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
def worker(site):
|
||||
site_val, scraped_info = scrape_site(site)
|
||||
return site_val, scraped_info
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for future in concurrent.futures.as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: list[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
job_data = job.dict()
|
||||
job_data[
|
||||
"job_url_hyper"
|
||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||
if job_data["job_type"]
|
||||
else None
|
||||
)
|
||||
job_data["emails"] = (
|
||||
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||
)
|
||||
if job_data["location"]:
|
||||
job_data["location"] = Location(
|
||||
**job_data["location"]
|
||||
).display_location()
|
||||
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
else:
|
||||
job_data["interval"] = None
|
||||
job_data["min_amount"] = None
|
||||
job_data["max_amount"] = None
|
||||
job_data["currency"] = None
|
||||
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if jobs_dfs:
|
||||
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
||||
desired_order: list[str] = [
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"site",
|
||||
"title",
|
||||
"company",
|
||||
"company_url",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"num_urgent_words",
|
||||
"benefits",
|
||||
"emails",
|
||||
"description",
|
||||
]
|
||||
jobs_formatted_df = jobs_df[desired_order]
|
||||
else:
|
||||
jobs_formatted_df = pd.DataFrame()
|
||||
|
||||
return jobs_formatted_df
|
||||
@@ -1,34 +0,0 @@
|
||||
from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
||||
from typing import List, Optional, Any
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: List[Site]
|
||||
search_term: str
|
||||
|
||||
location: str = None
|
||||
country: Optional[Country] = Country.USA
|
||||
distance: Optional[int] = None
|
||||
is_remote: bool = False
|
||||
job_type: Optional[JobType] = None
|
||||
easy_apply: bool = None # linkedin
|
||||
full_description: bool = False
|
||||
offset: int = 0
|
||||
|
||||
results_wanted: int = 15
|
||||
|
||||
|
||||
class Scraper:
|
||||
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
...
|
||||
@@ -1,333 +0,0 @@
|
||||
"""
|
||||
jobspy.scrapers.glassdoor
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Glassdoor.
|
||||
"""
|
||||
import json
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from ..utils import count_urgent_words, extract_emails_from_text
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import GlassdoorException
|
||||
from ..utils import create_session, modify_and_get_description
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
|
||||
|
||||
class GlassdoorScraper(Scraper):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.url = None
|
||||
self.country = None
|
||||
self.jobs_per_page = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def fetch_jobs_page(
|
||||
self,
|
||||
scraper_input: ScraperInput,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> (list[JobPost], str | None):
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
try:
|
||||
payload = self.add_payload(
|
||||
scraper_input, location_id, location_type, page_num, cursor
|
||||
)
|
||||
session = create_session(self.proxy, is_tls=False, has_retry=True)
|
||||
response = session.post(
|
||||
f"{self.url}/graph", headers=self.headers(), timeout=10, data=payload
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
jobs = []
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {executor.submit(self.process_job, job): job for job in jobs_data}
|
||||
for future in as_completed(future_to_job_data):
|
||||
job_data = future_to_job_data[future]
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
except Exception as exc:
|
||||
raise GlassdoorException(f'Glassdoor generated an exception: {exc}')
|
||||
|
||||
return jobs, self.get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def process_job(self, job_data):
|
||||
"""Processes a single job and fetches its description."""
|
||||
job_id = job_data["jobview"]["job"]["listingId"]
|
||||
job_url = f'{self.url}job-listing/j?jl={job_id}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
self.seen_urls.add(job_url)
|
||||
job = job_data["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
company_id = job_data['jobview']['header']['employer']['id']
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
else:
|
||||
location = self.parse_location(location_name)
|
||||
|
||||
compensation = self.parse_compensation(job["header"])
|
||||
|
||||
try:
|
||||
description = self.fetch_job_description(job_id)
|
||||
except Exception as e :
|
||||
description = None
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
company_url=f"{self.url}Overview/W-EI_IE{company_id}.htm" if company_id else None,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
location=location,
|
||||
compensation=compensation,
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.country = scraper_input.country
|
||||
self.url = self.country.get_url()
|
||||
|
||||
location_id, location_type = self.get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
all_jobs: list[JobPost] = []
|
||||
cursor = None
|
||||
max_pages = 30
|
||||
|
||||
try:
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
max_pages + 1,
|
||||
),
|
||||
):
|
||||
try:
|
||||
jobs, cursor = self.fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
all_jobs.extend(jobs)
|
||||
if len(all_jobs) >= scraper_input.results_wanted:
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
return JobResponse(jobs=all_jobs)
|
||||
|
||||
def fetch_job_description(self, job_id):
|
||||
"""Fetches the job description for a single job ID."""
|
||||
url = f"{self.url}/graph"
|
||||
body = [
|
||||
{
|
||||
"operationName": "JobDetailQuery",
|
||||
"variables": {
|
||||
"jl": job_id,
|
||||
"queryString": "q",
|
||||
"pageTypeEnum": "SERP"
|
||||
},
|
||||
"query": """
|
||||
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||
jobview: jobView(
|
||||
listingId: $jl
|
||||
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||
) {
|
||||
job {
|
||||
description
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
}
|
||||
]
|
||||
response = requests.post(url, json=body, headers=GlassdoorScraper.headers())
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
data = response.json()[0]
|
||||
desc = data['data']['jobview']['job']['description']
|
||||
soup = BeautifulSoup(desc, 'html.parser')
|
||||
return modify_and_get_description(soup)
|
||||
|
||||
@staticmethod
|
||||
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
def get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
session = create_session(self.proxy, has_retry=True)
|
||||
response = session.get(url)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
items = response.json()
|
||||
if not items:
|
||||
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||
location_type = items[0]["locationType"]
|
||||
if location_type == "C":
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
@staticmethod
|
||||
def add_payload(
|
||||
scraper_input,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
payload = {
|
||||
"operationName": "JobSearchResultsQuery",
|
||||
"variables": {
|
||||
"excludeJobListingIds": [],
|
||||
"filterParams": [{"filterKey": "applicationType", "values": "1"}] if scraper_input.easy_apply else [],
|
||||
"keyword": scraper_input.search_term,
|
||||
"numJobsToShow": 30,
|
||||
"locationType": location_type,
|
||||
"locationId": int(location_id),
|
||||
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
},
|
||||
"query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n",
|
||||
}
|
||||
|
||||
job_type_filters = {
|
||||
JobType.FULL_TIME: "fulltime",
|
||||
JobType.PART_TIME: "parttime",
|
||||
JobType.CONTRACT: "contract",
|
||||
JobType.INTERNSHIP: "internship",
|
||||
JobType.TEMPORARY: "temporary",
|
||||
}
|
||||
|
||||
if scraper_input.job_type in job_type_filters:
|
||||
filter_value = job_type_filters[scraper_input.job_type]
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": filter_value}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
@staticmethod
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
@staticmethod
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"cookie": 'gdId=91e2dfc4-c8b5-4fa7-83d0-11512b80262c; G_ENABLED_IDPS=google; trs=https%3A%2F%2Fwww.redhat.com%2F:referral:referral:2023-07-05+09%3A50%3A14.862:undefined:undefined; g_state={"i_p":1688587331651,"i_l":1}; _cfuvid=.7llazxhYFZWi6EISSPdVjtqF0NMVwzxr_E.cB1jgLs-1697828392979-0-604800000; GSESSIONID=undefined; JSESSIONID=F03DD1B5EE02DB6D842FE42B142F88F3; cass=1; jobsClicked=true; indeedCtk=1hd77b301k79i801; asst=1697829114.2; G_AUTHUSER_H=0; uc=8013A8318C98C517FE6DD0024636DFDEF978FC33266D93A2FAFEF364EACA608949D8B8FA2DC243D62DE271D733EB189D809ABE5B08D7B1AE865D217BD4EEBB97C282F5DA5FEFE79C937E3F6110B2A3A0ADBBA3B4B6DF5A996FEE00516100A65FCB11DA26817BE8D1C1BF6CFE36B5B68A3FDC2CFEC83AB797F7841FBB157C202332FC7E077B56BD39B167BDF3D9866E3B; AWSALB=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; AWSALBCORS=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; gdsid=1697828393025:1697830776351:668396EDB9E6A832022D34414128093D; at=HkH8Hnqi9uaMC7eu0okqyIwqp07ht9hBvE1_St7E_hRqPvkO9pUeJ1Jcpds4F3g6LL5ADaCNlxrPn0o6DumGMfog8qI1-zxaV_jpiFs3pugntw6WpVyYWdfioIZ1IDKupyteeLQEM1AO4zhGjY_rPZynpsiZBPO_B1au94sKv64rv23yvP56OiWKKfI-8_9hhLACEwWvM-Az7X-4aE2QdFt93VJbXbbGVf07bdDZfimsIkTtgJCLSRhU1V0kEM1Efyu66vo3m77gFFaMW7lxyYnb36I5PdDtEXBm3aL-zR7-qa5ywd94ISEivgqQOA4FPItNhqIlX4XrfD1lxVz6rfPaoTIDi4DI6UMCUjwyPsuv8mn0rYqDfRnmJpZ97fJ5AnhrknAd_6ZWN5v1OrxJczHzcXd8LO820QPoqxzzG13bmSTXLwGSxMUCtSrVsq05hicimQ3jpRt0c1dA4OkTNqF7_770B9JfcHcM8cr8-C4IL56dnOjr9KBGfN1Q2IvZM2cOBRbV7okiNOzKVZ3qJ24AE34WA2F3U6Whiu6H8nIuGG5hSNkVygY6CtglNZfFF9p8pJAZm79PngrrBv-CXFBZmhYLFo46lmFetDkiJ6mirtez4tKpzTIYjIp4_JAkiZFwbLJ2QGH4mK8kyyW0lZiX1DTuQec50N_5wvRo0Gt7nlKxzLsApMnaNhuQeH5ygh_pa381ORo9mQGi0EYF9zk00pa2--z4PtjfQ8KFq36GgpxKy5-o4qgqygZj8F01L8r-FiX2G4C7PREMIpAyHX2A4-_JxA1IS2j12EyqKTLqE9VcP06qm2Z-YuIW3ctmpMxy5G9_KiEiGv17weizhSFnl6SbpAEY-2VSmQ5V6jm3hoMp2jemkuGCRkZeFstLDEPxlzFN7WM; __cf_bm=zGaVjIJw4irf40_7UVw54B6Ohm271RUX4Tc8KVScrbs-1697830777-0-AYv2GnKTnnCU+cY9xHbJunO0DwlLDO6SIBnC/s/qldpKsGK0rRAjD6y8lbyATT/KlS7g29OZaN4fbd0lrJg0KmWbIybZIzfWVLHSYePVuOhu; asst=1697829114.2; at=dFhXf64wsf2TlnWy41xLs7skJkuxgKToEGcjGtDfUvW4oEAJ4tTIR5dKQ8wbwT75aIaGgdCfvcb-da7vwrCGWscCncmfLFQpJ9l-LLwoRfk-pMsxHhd77wvf-W7I0HSm7-Q5lQJqI9WyNGRxOa-RpzBTf4L8_Et4-3FzjPaAoYY5pY1FhuwXbN5asGOAMW-p8cjpbfn3PumlIYuckguWnjrcY2F31YJ_1noeoHM9tCGpymANbqGXRkG6aXY7yCfVXtdgZU1K5SMeaSPZIuF_iLUxjc_corzpNiH6qq7BIAmh-e5Aa-g7cwpZcln1fmwTVw4uTMZf1eLIMTa9WzgqZNkvG-sGaq_XxKA_Wai6xTTkOHfRgm4632Ba2963wdJvkGmUUa3tb_L4_wTgk3eFnHp5JhghLfT2Pe3KidP-yX__vx8JOsqe3fndCkKXgVz7xQKe1Dur-sMNlGwi4LXfguTT2YUI8C5Miq3pj2IHc7dC97eyyAiAM4HvyGWfaXWZcei6oIGrOwMvYgy0AcwFry6SIP2SxLT5TrxinRRuem1r1IcOTJsMJyUPp1QsZ7bOyq9G_0060B4CPyovw5523hEuqLTM-R5e5yavY6C_1DHUyE15C3mrh7kdvmlGZeflnHqkFTEKwwOftm-Mv-CKD5Db9ABFGNxKB2FH7nDH67hfOvm4tGNMzceBPKYJ3wciTt9jK3wy39_7cOYVywfrZ-oLhw_XtsbGSSeGn3HytrfgSADAh2sT0Gg6eCC9Xy1vh-Za337SVLUDXZ73W2xJxxUHBkFzZs8L_Xndo5DsbpWhVs9IYUGyraJdqB3SLgDbAppIBCJl4fx6_DG8-xOQPBvuFMlTROe1JVdHOzXI1GElwFDTuH1pjkg4I2G0NhAbE06Y-1illQE; gdsid=1697828393025:1697831731408:99C30D94108AC3030D61C736DDCDF11C',
|
||||
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
@@ -1,371 +0,0 @@
|
||||
"""
|
||||
jobspy.scrapers.indeed
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Indeed.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
import io
|
||||
import json
|
||||
from typing import Any
|
||||
from datetime import datetime
|
||||
|
||||
import urllib.parse
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from ..exceptions import IndeedException
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
get_enum_from_job_type,
|
||||
modify_and_get_description
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self, proxy: str | None = None):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed job search url
|
||||
"""
|
||||
self.url = None
|
||||
self.country = None
|
||||
site = Site(Site.INDEED)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 25
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> tuple[list[JobPost], int]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
"""
|
||||
self.country = scraper_input.country
|
||||
domain = self.country.indeed_domain_value
|
||||
self.url = f"https://{domain}.indeed.com"
|
||||
|
||||
try:
|
||||
session = create_session(self.proxy)
|
||||
response = session.get(
|
||||
f"{self.url}/m/jobs",
|
||||
headers=self.get_headers(),
|
||||
params=self.add_params(scraper_input, page),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
raise IndeedException(
|
||||
f"bad response with status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
raise IndeedException("bad proxy")
|
||||
raise IndeedException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
if "did not match any jobs" in response.text:
|
||||
raise IndeedException("Parsing exception: Search did not match any jobs")
|
||||
|
||||
jobs = IndeedScraper.parse_jobs(
|
||||
soup
|
||||
) #: can raise exception, handled by main scrape function
|
||||
total_num_jobs = IndeedScraper.total_jobs(soup)
|
||||
|
||||
if (
|
||||
not jobs.get("metaData", {})
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
):
|
||||
raise IndeedException("No jobs found.")
|
||||
|
||||
def process_job(job: dict) -> JobPost | None:
|
||||
job_url = f'{self.url}/m/jobs/viewjob?jk={job["jobkey"]}'
|
||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
|
||||
extracted_salary = job.get("extractedSalary")
|
||||
compensation = None
|
||||
if extracted_salary:
|
||||
salary_snippet = job.get("salarySnippet")
|
||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
||||
interval = (extracted_salary.get("type"),)
|
||||
if isinstance(interval, tuple):
|
||||
interval = interval[0]
|
||||
|
||||
interval = interval.upper()
|
||||
if interval in CompensationInterval.__members__:
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval[interval],
|
||||
min_amount=int(extracted_salary.get("min")),
|
||||
max_amount=int(extracted_salary.get("max")),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
job_type = IndeedScraper.get_job_type(job)
|
||||
timestamp_seconds = job["pubDate"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||
|
||||
description = self.get_description(job_url) if scraper_input.full_description else None
|
||||
|
||||
with io.StringIO(job["snippet"]) as f:
|
||||
soup_io = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup_io.find_all("li")
|
||||
if description is None and li_elements:
|
||||
description = " ".join(li.text for li in li_elements)
|
||||
|
||||
job_post = JobPost(
|
||||
title=job["normTitle"],
|
||||
description=description,
|
||||
company_name=job["company"],
|
||||
company_url=self.url + job["companyOverviewLink"] if "companyOverviewLink" in job else None,
|
||||
location=Location(
|
||||
city=job.get("jobLocationCity"),
|
||||
state=job.get("jobLocationState"),
|
||||
country=self.country,
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=compensation,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url_client,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description)
|
||||
if description
|
||||
else None,
|
||||
is_remote=self.is_remote_job(job),
|
||||
)
|
||||
return job_post
|
||||
|
||||
workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback
|
||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
with ThreadPoolExecutor(max_workers=workers) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(process_job, job) for job in jobs
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
|
||||
return job_list, total_num_jobs
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
pages_to_process = (
|
||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||
)
|
||||
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
|
||||
job_list += jobs
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
job_response = JobResponse(
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def get_description(self, job_page_url: str) -> str | None:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description
|
||||
"""
|
||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
||||
params = urllib.parse.parse_qs(parsed_url.query)
|
||||
jk_value = params.get("jk", [None])[0]
|
||||
formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
|
||||
session = create_session(self.proxy)
|
||||
|
||||
try:
|
||||
response = session.get(
|
||||
formatted_url,
|
||||
headers=self.get_headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
return None
|
||||
|
||||
try:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
script_tags = soup.find_all('script')
|
||||
|
||||
job_description = ''
|
||||
for tag in script_tags:
|
||||
if 'window._initialData' in tag.text:
|
||||
json_str = tag.text
|
||||
json_str = json_str.split('window._initialData=')[1]
|
||||
json_str = json_str.rsplit(';', 1)[0]
|
||||
data = json.loads(json_str)
|
||||
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
|
||||
break
|
||||
except (KeyError, TypeError, IndexError):
|
||||
return None
|
||||
|
||||
soup = BeautifulSoup(job_description, "html.parser")
|
||||
return modify_and_get_description(soup)
|
||||
|
||||
@staticmethod
|
||||
def get_job_type(job: dict) -> list[JobType] | None:
|
||||
"""
|
||||
Parses the job to get list of job types
|
||||
:param job:
|
||||
:return:
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for taxonomy in job["taxonomyAttributes"]:
|
||||
if taxonomy["label"] == "job-types":
|
||||
for i in range(len(taxonomy["attributes"])):
|
||||
label = taxonomy["attributes"][i].get("label")
|
||||
if label:
|
||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
@staticmethod
|
||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
||||
"""
|
||||
Parses the jobs from the soup object
|
||||
:param soup:
|
||||
:return: jobs
|
||||
"""
|
||||
|
||||
def find_mosaic_script() -> Tag | None:
|
||||
"""
|
||||
Finds jobcards script tag
|
||||
:return: script_tag
|
||||
"""
|
||||
script_tags = soup.find_all("script")
|
||||
|
||||
for tag in script_tags:
|
||||
if (
|
||||
tag.string
|
||||
and "mosaic.providerData" in tag.string
|
||||
and "mosaic-provider-jobcards" in tag.string
|
||||
):
|
||||
return tag
|
||||
return None
|
||||
|
||||
script_tag = find_mosaic_script()
|
||||
|
||||
if script_tag:
|
||||
script_str = script_tag.string
|
||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
||||
p = re.compile(pattern, re.DOTALL)
|
||||
m = p.search(script_str)
|
||||
if m:
|
||||
jobs = json.loads(m.group(1).strip())
|
||||
return jobs
|
||||
else:
|
||||
raise IndeedException("Could not find mosaic provider job cards data")
|
||||
else:
|
||||
raise IndeedException(
|
||||
"Could not find any results for the search"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def total_jobs(soup: BeautifulSoup) -> int:
|
||||
"""
|
||||
Parses the total jobs for that search from soup object
|
||||
:param soup:
|
||||
:return: total_num_jobs
|
||||
"""
|
||||
script = soup.find("script", string=lambda t: t and "window._initialData" in t)
|
||||
|
||||
pattern = re.compile(r"window._initialData\s*=\s*({.*})\s*;", re.DOTALL)
|
||||
match = pattern.search(script.string)
|
||||
total_num_jobs = 0
|
||||
if match:
|
||||
json_str = match.group(1)
|
||||
data = json.loads(json_str)
|
||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
||||
return total_num_jobs
|
||||
|
||||
@staticmethod
|
||||
def get_headers():
|
||||
return {
|
||||
'Host': 'www.indeed.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
|
||||
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def is_remote_job(job: dict) -> bool:
|
||||
"""
|
||||
:param job:
|
||||
:return: bool
|
||||
"""
|
||||
for taxonomy in job.get("taxonomyAttributes", []):
|
||||
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"filter": 0,
|
||||
"start": scraper_input.offset + page * 10,
|
||||
"sort": "date"
|
||||
}
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
sc_values = []
|
||||
if scraper_input.is_remote:
|
||||
sc_values.append("attr(DSQF7)")
|
||||
if scraper_input.job_type:
|
||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
|
||||
if scraper_input.easy_apply:
|
||||
params['iafilter'] = 1
|
||||
|
||||
return params
|
||||
@@ -1,306 +0,0 @@
|
||||
"""
|
||||
jobspy.scrapers.linkedin
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape LinkedIn.
|
||||
"""
|
||||
import time
|
||||
import random
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from requests.exceptions import ProxyError
|
||||
from threading import Lock
|
||||
from bs4.element import Tag
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import LinkedInException
|
||||
from ..utils import create_session
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
Country,
|
||||
Compensation
|
||||
)
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
get_enum_from_job_type,
|
||||
currency_parser,
|
||||
modify_and_get_description
|
||||
)
|
||||
|
||||
|
||||
class LinkedInScraper(Scraper):
|
||||
DELAY = 3
|
||||
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
site = Site(Site.LINKEDIN)
|
||||
self.country = "worldwide"
|
||||
self.url = "https://www.linkedin.com"
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes LinkedIn for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
job_list: list[JobPost] = []
|
||||
seen_urls = set()
|
||||
url_lock = Lock()
|
||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||
|
||||
def job_type_code(job_type_enum):
|
||||
mapping = {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
JobType.INTERNSHIP: "I",
|
||||
JobType.CONTRACT: "C",
|
||||
JobType.TEMPORARY: "T",
|
||||
}
|
||||
|
||||
return mapping.get(job_type_enum, "")
|
||||
|
||||
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": 0,
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
response = session.get(
|
||||
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
proxies=self.proxy,
|
||||
headers=self.headers(),
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
except requests.HTTPError as e:
|
||||
raise LinkedInException(f"bad response status code: {e.response.status_code}")
|
||||
except ProxyError as e:
|
||||
raise LinkedInException("bad proxy")
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job_card in job_cards:
|
||||
job_url = None
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||
|
||||
with url_lock:
|
||||
if job_url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
|
||||
# Call process_job directly without threading
|
||||
try:
|
||||
job_post = self.process_job(job_card, job_url, scraper_input.full_description)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
except Exception as e:
|
||||
raise LinkedInException("Exception occurred while processing jobs")
|
||||
|
||||
page += 25
|
||||
time.sleep(random.uniform(LinkedInScraper.DELAY, LinkedInScraper.DELAY + 2))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find('span', class_='job-search-card__salary-info')
|
||||
|
||||
compensation = None
|
||||
if salary_tag:
|
||||
salary_text = salary_tag.get_text(separator=' ').strip()
|
||||
salary_values = [currency_parser(value) for value in salary_text.split('-')]
|
||||
salary_min = salary_values[0]
|
||||
salary_max = salary_values[1]
|
||||
currency = salary_text[0] if salary_text[0] != '$' else 'USD'
|
||||
|
||||
compensation = Compensation(
|
||||
min_amount=int(salary_min),
|
||||
max_amount=int(salary_max),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company_url = (
|
||||
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||
if company_a_tag and company_a_tag.has_attr("href")
|
||||
else ""
|
||||
)
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = (
|
||||
metadata_card.find("time", class_="job-search-card__listdate")
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = description = job_type = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except Exception as e:
|
||||
date_posted = None
|
||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||
if full_descr:
|
||||
description, job_type = self.get_job_description(job_url)
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
compensation=compensation,
|
||||
benefits=benefits,
|
||||
job_type=job_type,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def get_job_description(
|
||||
self, job_page_url: str
|
||||
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None
|
||||
"""
|
||||
try:
|
||||
session = create_session(is_tls=False, has_retry=True)
|
||||
response = session.get(job_page_url, timeout=5, proxies=self.proxy)
|
||||
response.raise_for_status()
|
||||
except requests.HTTPError as e:
|
||||
return None, None
|
||||
except Exception as e:
|
||||
return None, None
|
||||
if response.url == "https://www.linkedin.com/signup":
|
||||
return None, None
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
div_content = soup.find(
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
|
||||
description = None
|
||||
if div_content:
|
||||
description = modify_and_get_description(div_content)
|
||||
|
||||
def get_job_type(
|
||||
soup_job_type: BeautifulSoup,
|
||||
) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
)
|
||||
|
||||
employment_type = None
|
||||
if h3_tag:
|
||||
employment_type_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if employment_type_span:
|
||||
employment_type = employment_type_span.get_text(strip=True)
|
||||
employment_type = employment_type.lower()
|
||||
employment_type = employment_type.replace("-", "")
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
return description, get_job_type(soup)
|
||||
|
||||
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
Extracts the location data from the job metadata card.
|
||||
:param metadata_card
|
||||
:return: location
|
||||
"""
|
||||
location = Location(country=Country.from_string(self.country))
|
||||
if metadata_card is not None:
|
||||
location_tag = metadata_card.find(
|
||||
"span", class_="job-search-card__location"
|
||||
)
|
||||
location_string = location_tag.text.strip() if location_tag else "N/A"
|
||||
parts = location_string.split(", ")
|
||||
if len(parts) == 2:
|
||||
city, state = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(country),
|
||||
)
|
||||
|
||||
return location
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
return {
|
||||
'authority': 'www.linkedin.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'cache-control': 'max-age=0',
|
||||
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
|
||||
# 'sec-ch-ua-mobile': '?0',
|
||||
# 'sec-ch-ua-platform': '"macOS"',
|
||||
# 'sec-fetch-dest': 'document',
|
||||
# 'sec-fetch-mode': 'navigate',
|
||||
# 'sec-fetch-site': 'none',
|
||||
# 'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
import tls_client
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from ..jobs import JobType
|
||||
|
||||
|
||||
def modify_and_get_description(soup):
|
||||
for li in soup.find_all('li'):
|
||||
li.string = "- " + li.get_text()
|
||||
|
||||
description = soup.get_text(separator='\n').strip()
|
||||
description = re.sub(r'\n+', '\n', description)
|
||||
return description
|
||||
|
||||
|
||||
def count_urgent_words(description: str) -> int:
|
||||
"""
|
||||
Count the number of urgent words or phrases in a job description.
|
||||
"""
|
||||
urgent_patterns = re.compile(
|
||||
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
matches = re.findall(urgent_patterns, description)
|
||||
count = len(matches)
|
||||
|
||||
return count
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
|
||||
:return: A session object
|
||||
"""
|
||||
if is_tls:
|
||||
session = tls_client.Session(
|
||||
client_identifier="chrome112",
|
||||
random_tls_extension_order=True,
|
||||
)
|
||||
session.proxies = proxy
|
||||
else:
|
||||
session = requests.Session()
|
||||
session.allow_redirects = True
|
||||
if proxy:
|
||||
session.proxies.update(proxy)
|
||||
if has_retry:
|
||||
retries = Retry(total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
"""
|
||||
res = None
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
res = job_type
|
||||
return res
|
||||
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", '', cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", '', cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if '.' in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif ',' in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(',', '.'))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
||||
@@ -1,217 +0,0 @@
|
||||
"""
|
||||
jobspy.scrapers.ziprecruiter
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
import math
|
||||
import time
|
||||
import re
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import ZipRecruiterException
|
||||
from ...jobs import JobPost, Compensation, Location, JobResponse, JobType, Country
|
||||
from ..utils import count_urgent_words, extract_emails_from_text, create_session, modify_and_get_description
|
||||
|
||||
|
||||
class ZipRecruiterScraper(Scraper):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
site = Site(Site.ZIP_RECRUITER)
|
||||
self.url = "https://www.ziprecruiter.com"
|
||||
self.session = create_session(proxy)
|
||||
self.get_cookies()
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
) -> Tuple[list[JobPost], Optional[str]]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param continue_token:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
params = self.add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue"] = continue_token
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=self.add_params(scraper_input),
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with non 200 code" in str(e):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
|
||||
time.sleep(5)
|
||||
response_data = response.json()
|
||||
jobs_list = response_data.get("jobs", [])
|
||||
next_continue_token = response_data.get("continue", None)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self.process_job, job) for job in jobs_list]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
return job_list, next_continue_token
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
|
||||
jobs_on_page, continue_token = self.find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
|
||||
if not continue_token:
|
||||
break
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@staticmethod
|
||||
def process_job(job: dict) -> JobPost:
|
||||
"""Processes an individual job dict from the response"""
|
||||
title = job.get("name")
|
||||
job_url = job.get("job_url")
|
||||
|
||||
job_description_html = job.get("job_description", "").strip()
|
||||
description_soup = BeautifulSoup(job_description_html, "html.parser")
|
||||
description = modify_and_get_description(description_soup)
|
||||
|
||||
company = job["hiring_company"].get("name") if "hiring_company" in job else None
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
location = Location(
|
||||
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
|
||||
save_job_url = job.get("SaveJobURL", "")
|
||||
posted_time_match = re.search(
|
||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||
)
|
||||
if posted_time_match:
|
||||
date_time_str = posted_time_match.group(1)
|
||||
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
|
||||
date_posted = date_posted_obj.date()
|
||||
else:
|
||||
date_posted = date.today()
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval="yearly"
|
||||
if job.get("compensation_interval") == "annual"
|
||||
else job.get("compensation_interval"),
|
||||
min_amount=int(job["compensation_min"])
|
||||
if "compensation_min" in job
|
||||
else None,
|
||||
max_amount=int(job["compensation_max"])
|
||||
if "compensation_max" in job
|
||||
else None,
|
||||
currency=job.get("compensation_currency"),
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def get_cookies(self):
|
||||
url="https://api.ziprecruiter.com/jobs-app/event"
|
||||
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
self.session.post(url, data=data, headers=ZipRecruiterScraper.headers())
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
job_type_value = scraper_input.job_type.value
|
||||
if scraper_input.easy_apply:
|
||||
params['zipapply'] = 1
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_all():
|
||||
result = scrape_jobs(
|
||||
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
results_wanted=5,
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,11 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="glassdoor", search_term="software engineer", country_indeed="USA"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,11 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="indeed", search_term="software engineer", country_indeed="usa"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,12 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_linkedin():
|
||||
result = scrape_jobs(
|
||||
site_name="linkedin",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,13 +0,0 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_ziprecruiter():
|
||||
result = scrape_jobs(
|
||||
site_name="zip_recruiter",
|
||||
search_term="software engineer",
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
Reference in New Issue
Block a user