mirror of https://github.com/Bunsly/JobSpy
Compare commits
122 Commits
Author | SHA1 | Date |
---|---|---|
|
94d413bad1 | |
|
61205bcc77 | |
|
f1602eca70 | |
|
d4d52d05f5 | |
|
0946cb3373 | |
|
051981689f | |
|
903b7e6f1b | |
|
6782b9884e | |
|
94c74d60f2 | |
|
5463e5a664 | |
|
ed139e7e6b | |
|
5bd199d0a5 | |
|
4ec308a302 | |
|
7cb0c518fc | |
|
df70d4bc2e | |
|
3006063875 | |
|
1be009b8bc | |
|
81ed9b3ddf | |
|
11a9e9a56a | |
|
c6ade14784 | |
|
13c74a0fed | |
|
333e9e6760 | |
|
04032a0f91 | |
|
496896d0b5 | |
|
87ba1ad1bf | |
|
4e7ac9a583 | |
|
e44d13e1cf | |
|
d52e366ef7 | |
|
395ebf0017 | |
|
63fddd9b7f | |
|
58956868ae | |
|
4fce836222 | |
|
5ba25e7a7c | |
|
f7cb3e9206 | |
|
3ad3f121f7 | |
|
ff3c782912 | |
|
338d854b96 | |
|
811d4c40b4 | |
|
dba92d22c2 | |
|
10a3592a0f | |
|
b7905cc756 | |
|
6867d58829 | |
|
f6248c8386 | |
|
f395597fdd | |
|
6372e41bd9 | |
|
6c869decb8 | |
|
9f4083380d | |
|
9207ab56f6 | |
|
757a94853e | |
|
6bc191d5c7 | |
|
0cc34287f7 | |
|
923979093b | |
|
286f0e4487 | |
|
f7b29d43a2 | |
|
6f1490458c | |
|
6bb7d81ba8 | |
|
0e046432d1 | |
|
209e0e65b6 | |
|
8570c0651e | |
|
8678b0bbe4 | |
|
60d4d911c9 | |
|
2a0cba8c7e | |
|
de70189fa2 | |
|
b55c0eb86d | |
|
88c95c4ad5 | |
|
d8d33d602f | |
|
6330c14879 | |
|
48631ea271 | |
|
edffe18e65 | |
|
0988230a24 | |
|
d000a81eb3 | |
|
ccb0c17660 | |
|
df339610fa | |
|
c501006bd8 | |
|
89a3ee231c | |
|
6439f71433 | |
|
7f6271b2e0 | |
|
5cb7ffe5fd | |
|
cd29f79796 | |
|
65d2e5e707 | |
|
08d63a87a2 | |
|
1ffdb1756f | |
|
1185693422 | |
|
dcd7144318 | |
|
bf73c061bd | |
|
8dd08ed9fd | |
|
5d3df732e6 | |
|
86f858e06d | |
|
1089d1f0a5 | |
|
3e93454738 | |
|
0d150d519f | |
|
cc3497f929 | |
|
5986f75346 | |
|
4b7bdb9313 | |
|
80213f28d2 | |
|
ada38532c3 | |
|
3b0017964c | |
|
94d8f555fd | |
|
e8b4b376b8 | |
|
54ac1bad16 | |
|
0a669e9ba8 | |
|
a4f6851c32 | |
|
db01bc6bbb | |
|
f8a4eccc6b | |
|
ba3a16b228 | |
|
aeb1a50d2c | |
|
91b137ef86 | |
|
2563c5ca08 | |
|
32282305c8 | |
|
ccbea51f3c | |
|
6ec7c24f7f | |
|
02caf1b38d | |
|
8e2ab277da | |
|
ce3bd84ee5 | |
|
1ccf2290fe | |
|
ec2eefc58a | |
|
13c7694474 | |
|
bbe46fe3f4 | |
|
b97c73ffd6 | |
|
5b3627b244 | |
|
2ec3b04777 | |
|
89a5264391 |
|
@ -1,9 +1,13 @@
|
|||
name: Publish Python 🐍 distributions 📦 to PyPI
|
||||
on: push
|
||||
name: Publish JobSpy to PyPi
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-n-publish:
|
||||
name: Build and publish Python 🐍 distributions 📦 to PyPI
|
||||
name: Build and publish JobSpy to PyPi
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
@ -27,7 +31,7 @@ jobs:
|
|||
build
|
||||
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
if: startsWith(github.ref, 'refs/tags') || github.event_name == 'workflow_dispatch'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
@ -0,0 +1,7 @@
|
|||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python
|
||||
args: [--line-length=88, --quiet]
|
233
README.md
233
README.md
|
@ -1,27 +1,19 @@
|
|||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||
|
||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||
|
||||
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||
|
||||
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
|
||||
work with us.*
|
||||
**JobSpy** is a job scraping library with the goal of aggregating all the jobs from popular job boards with one tool.
|
||||
|
||||
## Features
|
||||
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||
- Aggregates the job postings in a Pandas DataFrame
|
||||
- Proxy support (HTTP/S, SOCKS)
|
||||
|
||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
||||
Updated for release v1.1.3
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, **Google**, **ZipRecruiter**, **Bayt** & **Naukri** concurrently
|
||||
- Aggregates the job postings in a dataframe
|
||||
- Proxies support to bypass blocking
|
||||
|
||||

|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
pip install python-jobspy
|
||||
pip install -U python-jobspy
|
||||
```
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
|
@ -29,88 +21,120 @@ _Python version >= [3.10](https://www.python.org/downloads/release/python-3100/)
|
|||
### Usage
|
||||
|
||||
```python
|
||||
import csv
|
||||
from jobspy import scrape_jobs
|
||||
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor", "google", "bayt", "naukri"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=10,
|
||||
country_indeed='USA' # only needed for indeed / glassdoor
|
||||
google_search_term="software engineer jobs near San Francisco, CA since yesterday",
|
||||
location="San Francisco, CA",
|
||||
results_wanted=20,
|
||||
hours_old=72,
|
||||
country_indeed='USA',
|
||||
|
||||
# linkedin_fetch_description=True # gets more info such as description, direct job url (slower)
|
||||
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||
)
|
||||
print(f"Found {len(jobs)} jobs")
|
||||
print(jobs.head())
|
||||
jobs.to_csv("jobs.csv", index=False) # to_xlsx
|
||||
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_excel
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
```
|
||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||
linkedin Full-Stack Software Engineer Rain New York NY fulltime yearly None None https://www.linkedin.com/jobs/view/3696158877 Rain’s mission is to create the fastest and ea...
|
||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||
|
||||
```
|
||||
|
||||
### Parameters for `scrape_jobs()`
|
||||
|
||||
```plaintext
|
||||
Required
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
||||
└── search_term (str)
|
||||
Optional
|
||||
├── location (int)
|
||||
├── distance (int): in miles
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||
├── site_name (list|str):
|
||||
| linkedin, zip_recruiter, indeed, glassdoor, google, bayt
|
||||
| (default is all)
|
||||
│
|
||||
├── search_term (str)
|
||||
|
|
||||
├── google_search_term (str)
|
||||
| search term for google jobs. This is the only param for filtering google jobs.
|
||||
│
|
||||
├── location (str)
|
||||
│
|
||||
├── distance (int):
|
||||
| in miles, default 50
|
||||
│
|
||||
├── job_type (str):
|
||||
| fulltime, parttime, internship, contract
|
||||
│
|
||||
├── proxies (list):
|
||||
| in format ['user:pass@host:port', 'localhost']
|
||||
| each job board scraper will round robin through the proxies
|
||||
|
|
||||
├── is_remote (bool)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
│
|
||||
├── results_wanted (int):
|
||||
| number of job results to retrieve for each site specified in 'site_name'
|
||||
│
|
||||
├── easy_apply (bool):
|
||||
| filters for jobs that are hosted on the job board site (LinkedIn easy apply filter no longer works)
|
||||
│
|
||||
├── description_format (str):
|
||||
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||
│
|
||||
├── offset (int):
|
||||
| starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
│
|
||||
├── hours_old (int):
|
||||
| filters jobs by the number of hours since the job was posted
|
||||
| (ZipRecruiter and Glassdoor round up to next day.)
|
||||
│
|
||||
├── verbose (int) {0, 1, 2}:
|
||||
| Controls the verbosity of the runtime printouts
|
||||
| (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||
|
||||
├── linkedin_fetch_description (bool):
|
||||
| fetches full description and direct job url for LinkedIn (Increases requests by O(n))
|
||||
│
|
||||
├── linkedin_company_ids (list[int]):
|
||||
| searches for linkedin jobs with specific company ids
|
||||
|
|
||||
├── country_indeed (str):
|
||||
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||
|
|
||||
├── enforce_annual_salary (bool):
|
||||
| converts wages to annual salary
|
||||
|
|
||||
├── ca_cert (str)
|
||||
| path to CA Certificate file for proxies
|
||||
```
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
│ ├── city (str)
|
||||
│ ├── state (str)
|
||||
├── description (str)
|
||||
├── job_type (str): fulltime, parttime, internship, contract
|
||||
├── compensation (object)
|
||||
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount (int)
|
||||
│ ├── max_amount (int)
|
||||
│ └── currency (enum)
|
||||
└── date_posted (date)
|
||||
└── emails (str)
|
||||
└── num_urgent_words (int)
|
||||
└── is_remote (bool)
|
||||
```
|
||||
|
||||
### Exceptions
|
||||
|
||||
The following exceptions may be raised when using JobSpy:
|
||||
|
||||
* `LinkedInException`
|
||||
* `IndeedException`
|
||||
* `ZipRecruiterException`
|
||||
* `GlassdoorException`
|
||||
├── Indeed limitations:
|
||||
| Only one from this list can be used in a search:
|
||||
| - hours_old
|
||||
| - job_type & is_remote
|
||||
| - easy_apply
|
||||
│
|
||||
└── LinkedIn limitations:
|
||||
| Only one from this list can be used in a search:
|
||||
| - hours_old
|
||||
| - easy_apply
|
||||
```
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
### **LinkedIn**
|
||||
|
||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using
|
||||
LinkedIn searches globally & uses only the `location` parameter.
|
||||
|
||||
### **ZipRecruiter**
|
||||
|
||||
|
@ -140,33 +164,94 @@ You can specify the following countries when searching on Indeed (use the exact
|
|||
| South Korea | Spain* | Sweden | Switzerland* |
|
||||
| Taiwan | Thailand | Turkey | Ukraine |
|
||||
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||
| Venezuela | Vietnam | | |
|
||||
| Venezuela | Vietnam* | | |
|
||||
|
||||
### **Bayt**
|
||||
|
||||
Bayt only uses the search_term parameter currently and searches internationally
|
||||
|
||||
|
||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||
|
||||
## Notes
|
||||
* Indeed is the best scraper currently with no rate limiting.
|
||||
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||
* LinkedIn is the most restrictive and usually rate limits around the 10th page with one ip. Proxies are a must basically.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
**Q: Why is Indeed giving unrelated roles?**
|
||||
**A:** Indeed searches the description too.
|
||||
|
||||
**Q: Encountering issues with your queries?**
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
- use - to remove words
|
||||
- "" for exact match
|
||||
|
||||
Example of a good Indeed query
|
||||
|
||||
```py
|
||||
search_term='"engineering intern" software summer (java OR python OR c++) 2025 -tax -marketing'
|
||||
```
|
||||
|
||||
This searches the description/title and must include software, summer, 2025, one of the languages, engineering intern exactly, no tax, no marketing.
|
||||
|
||||
---
|
||||
|
||||
**Q: No results when using "google"?**
|
||||
**A:** You have to use super specific syntax. Search for google jobs on your browser and then whatever pops up in the google jobs search box after applying some filters is what you need to copy & paste into the google_search_term.
|
||||
|
||||
---
|
||||
|
||||
**Q: Received a response code 429?**
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||
|
||||
- Waiting a few seconds between requests.
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
- Wait some time between scrapes (site-dependent).
|
||||
- Try using the proxies param to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
|
||||
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
|
||||
### JobPost Schema
|
||||
|
||||
- Upgrade to a newer version of MacOS
|
||||
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title
|
||||
├── company
|
||||
├── company_url
|
||||
├── job_url
|
||||
├── location
|
||||
│ ├── country
|
||||
│ ├── city
|
||||
│ ├── state
|
||||
├── is_remote
|
||||
├── description
|
||||
├── job_type: fulltime, parttime, internship, contract
|
||||
├── job_function
|
||||
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount
|
||||
│ ├── max_amount
|
||||
│ ├── currency
|
||||
│ └── salary_source: direct_data, description (parsed from posting)
|
||||
├── date_posted
|
||||
└── emails
|
||||
|
||||
Linkedin specific
|
||||
└── job_level
|
||||
|
||||
|
||||
Linkedin & Indeed specific
|
||||
└── company_industry
|
||||
|
||||
Indeed specific
|
||||
├── company_country
|
||||
├── company_addresses
|
||||
├── company_employees_label
|
||||
├── company_revenue_label
|
||||
├── company_description
|
||||
└── company_logo
|
||||
|
||||
Naukri specific
|
||||
├── skills
|
||||
├── experience_range
|
||||
├── company_rating
|
||||
├── company_reviews_count
|
||||
├── vacancy_count
|
||||
└── work_from_home_type
|
||||
```
|
||||
|
|
|
@ -1,167 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from jobspy import scrape_jobs\n",
|
||||
"import pandas as pd\n",
|
||||
"from IPython.display import display, HTML"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.set_option('display.max_columns', None)\n",
|
||||
"pd.set_option('display.max_rows', None)\n",
|
||||
"pd.set_option('display.width', None)\n",
|
||||
"pd.set_option('display.max_colwidth', 50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 1 (no hyperlinks, USA)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='san francisco',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" results_wanted=5,\n",
|
||||
"\n",
|
||||
" # use if you want to use a proxy\n",
|
||||
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
")\n",
|
||||
"display(jobs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a581b2d-f7da-4fac-868d-9efe143ee20a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 2 - remote USA & hyperlinks\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
|
||||
" # location='san francisco',\n",
|
||||
" search_term=\"software engineer\",\n",
|
||||
" country_indeed=\"USA\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" is_remote=True,\n",
|
||||
" results_wanted=5, \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe8289bc-5b64-4202-9a64-7c117c83fd9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "951c2fe1-52ff-407d-8bb1-068049b36777",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='berlin',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" results_wanted=5,\n",
|
||||
" easy_apply=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e37a521-caef-441c-8fc2-2eb5b2e7da62",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0650e608-0b58-4bf5-ae86-68348035b16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"indeed\"],\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" country_indeed = \"China\",\n",
|
||||
" hyperlinks=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "40913ac8-3f8a-4d7e-ac47-afb88316432b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho)
|
||||
country_indeed="USA",
|
||||
offset=25 # start jobs from an offset (use if search failed and want to continue)
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# formatting for pandas
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc
|
||||
|
||||
# 1: output to console
|
||||
print(jobs)
|
||||
|
||||
# 2: output to .csv
|
||||
jobs.to_csv("./jobs.csv", index=False)
|
||||
print("outputted to jobs.csv")
|
||||
|
||||
# 3: output to .xlsx
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
|
@ -0,0 +1,215 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Tuple
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from jobspy.bayt import BaytScraper
|
||||
from jobspy.glassdoor import Glassdoor
|
||||
from jobspy.google import Google
|
||||
from jobspy.indeed import Indeed
|
||||
from jobspy.linkedin import LinkedIn
|
||||
from jobspy.naukri import Naukri
|
||||
from jobspy.model import JobType, Location, JobResponse, Country
|
||||
from jobspy.model import SalarySource, ScraperInput, Site
|
||||
from jobspy.util import (
|
||||
set_logger_level,
|
||||
extract_salary,
|
||||
create_logger,
|
||||
get_enum_from_value,
|
||||
map_str_to_site,
|
||||
convert_to_annual,
|
||||
desired_order,
|
||||
)
|
||||
from jobspy.ziprecruiter import ZipRecruiter
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||
search_term: str | None = None,
|
||||
google_search_term: str | None = None,
|
||||
location: str | None = None,
|
||||
distance: int | None = 50,
|
||||
is_remote: bool = False,
|
||||
job_type: str | None = None,
|
||||
easy_apply: bool | None = None,
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
proxies: list[str] | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
description_format: str = "markdown",
|
||||
linkedin_fetch_description: bool | None = False,
|
||||
linkedin_company_ids: list[int] | None = None,
|
||||
offset: int | None = 0,
|
||||
hours_old: int = None,
|
||||
enforce_annual_salary: bool = False,
|
||||
verbose: int = 0,
|
||||
**kwargs,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Scrapes job data from job boards concurrently
|
||||
:return: Pandas DataFrame containing job data
|
||||
"""
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedIn,
|
||||
Site.INDEED: Indeed,
|
||||
Site.ZIP_RECRUITER: ZipRecruiter,
|
||||
Site.GLASSDOOR: Glassdoor,
|
||||
Site.GOOGLE: Google,
|
||||
Site.BAYT: BaytScraper,
|
||||
Site.NAUKRI: Naukri,
|
||||
}
|
||||
set_logger_level(verbose)
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
def get_site_type():
|
||||
site_types = list(Site)
|
||||
if isinstance(site_name, str):
|
||||
site_types = [map_str_to_site(site_name)]
|
||||
elif isinstance(site_name, Site):
|
||||
site_types = [site_name]
|
||||
elif isinstance(site_name, list):
|
||||
site_types = [
|
||||
map_str_to_site(site) if isinstance(site, str) else site
|
||||
for site in site_name
|
||||
]
|
||||
return site_types
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
site_type=get_site_type(),
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
google_search_term=google_search_term,
|
||||
location=location,
|
||||
distance=distance,
|
||||
is_remote=is_remote,
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
description_format=description_format,
|
||||
linkedin_fetch_description=linkedin_fetch_description,
|
||||
results_wanted=results_wanted,
|
||||
linkedin_company_ids=linkedin_company_ids,
|
||||
offset=offset,
|
||||
hours_old=hours_old,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxies=proxies, ca_cert=ca_cert)
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
cap_name = site.value.capitalize()
|
||||
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||
create_logger(site_name).info(f"finished scraping")
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
def worker(site):
|
||||
site_val, scraped_info = scrape_site(site)
|
||||
return site_val, scraped_info
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: list[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
job_data = job.dict()
|
||||
job_url = job_data["job_url"]
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||
if job_data["job_type"]
|
||||
else None
|
||||
)
|
||||
job_data["emails"] = (
|
||||
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||
)
|
||||
if job_data["location"]:
|
||||
job_data["location"] = Location(
|
||||
**job_data["location"]
|
||||
).display_location()
|
||||
|
||||
# Handle compensation
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||
if enforce_annual_salary and (
|
||||
job_data["interval"]
|
||||
and job_data["interval"] != "yearly"
|
||||
and job_data["min_amount"]
|
||||
and job_data["max_amount"]
|
||||
):
|
||||
convert_to_annual(job_data)
|
||||
else:
|
||||
if country_enum == Country.USA:
|
||||
(
|
||||
job_data["interval"],
|
||||
job_data["min_amount"],
|
||||
job_data["max_amount"],
|
||||
job_data["currency"],
|
||||
) = extract_salary(
|
||||
job_data["description"],
|
||||
enforce_annual_salary=enforce_annual_salary,
|
||||
)
|
||||
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||
|
||||
job_data["salary_source"] = (
|
||||
job_data["salary_source"]
|
||||
if "min_amount" in job_data and job_data["min_amount"]
|
||||
else None
|
||||
)
|
||||
|
||||
#naukri-specific fields
|
||||
job_data["skills"] = (
|
||||
", ".join(job_data["skills"]) if job_data["skills"] else None
|
||||
)
|
||||
job_data["experience_range"] = job_data.get("experience_range")
|
||||
job_data["company_rating"] = job_data.get("company_rating")
|
||||
job_data["company_reviews_count"] = job_data.get("company_reviews_count")
|
||||
job_data["vacancy_count"] = job_data.get("vacancy_count")
|
||||
job_data["work_from_home_type"] = job_data.get("work_from_home_type")
|
||||
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if jobs_dfs:
|
||||
# Step 1: Filter out all-NA columns from each DataFrame before concatenation
|
||||
filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
|
||||
|
||||
# Step 2: Concatenate the filtered DataFrames
|
||||
jobs_df = pd.concat(filtered_dfs, ignore_index=True)
|
||||
|
||||
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||
for column in desired_order:
|
||||
if column not in jobs_df.columns:
|
||||
jobs_df[column] = None # Add missing columns as empty
|
||||
|
||||
# Reorder the DataFrame according to the desired order
|
||||
jobs_df = jobs_df[desired_order]
|
||||
|
||||
# Step 4: Sort the DataFrame as required
|
||||
return jobs_df.sort_values(
|
||||
by=["site", "date_posted"], ascending=[True, False]
|
||||
).reset_index(drop=True)
|
||||
else:
|
||||
return pd.DataFrame()
|
|
@ -0,0 +1,145 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
JobResponse,
|
||||
Location,
|
||||
Country,
|
||||
)
|
||||
from jobspy.util import create_logger, create_session
|
||||
|
||||
log = create_logger("Bayt")
|
||||
|
||||
|
||||
class BaytScraper(Scraper):
|
||||
base_url = "https://www.bayt.com"
|
||||
delay = 2
|
||||
band_delay = 3
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
super().__init__(Site.BAYT, proxies=proxies, ca_cert=ca_cert)
|
||||
self.scraper_input = None
|
||||
self.session = None
|
||||
self.country = "worldwide"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
self.scraper_input = scraper_input
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
job_list: list[JobPost] = []
|
||||
page = 1
|
||||
results_wanted = (
|
||||
scraper_input.results_wanted if scraper_input.results_wanted else 10
|
||||
)
|
||||
|
||||
while len(job_list) < results_wanted:
|
||||
log.info(f"Fetching Bayt jobs page {page}")
|
||||
job_elements = self._fetch_jobs(self.scraper_input.search_term, page)
|
||||
if not job_elements:
|
||||
break
|
||||
|
||||
if job_elements:
|
||||
log.debug(
|
||||
"First job element snippet:\n" + job_elements[0].prettify()[:500]
|
||||
)
|
||||
|
||||
initial_count = len(job_list)
|
||||
for job in job_elements:
|
||||
try:
|
||||
job_post = self._extract_job_info(job)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if len(job_list) >= results_wanted:
|
||||
break
|
||||
else:
|
||||
log.debug(
|
||||
"Extraction returned None. Job snippet:\n"
|
||||
+ job.prettify()[:500]
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error extracting job info: {str(e)}")
|
||||
continue
|
||||
|
||||
if len(job_list) == initial_count:
|
||||
log.info(f"No new jobs found on page {page}. Ending pagination.")
|
||||
break
|
||||
|
||||
page += 1
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _fetch_jobs(self, query: str, page: int) -> list | None:
|
||||
"""
|
||||
Grabs the job results for the given query and page number.
|
||||
"""
|
||||
try:
|
||||
url = f"{self.base_url}/en/international/jobs/{query}-jobs/?page={page}"
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_listings = soup.find_all("li", attrs={"data-js-job": ""})
|
||||
log.debug(f"Found {len(job_listings)} job listing elements")
|
||||
return job_listings
|
||||
except Exception as e:
|
||||
log.error(f"Bayt: Error fetching jobs - {str(e)}")
|
||||
return None
|
||||
|
||||
def _extract_job_info(self, job: BeautifulSoup) -> JobPost | None:
|
||||
"""
|
||||
Extracts the job information from a single job listing.
|
||||
"""
|
||||
# Find the h2 element holding the title and link (no class filtering)
|
||||
job_general_information = job.find("h2")
|
||||
if not job_general_information:
|
||||
return
|
||||
|
||||
job_title = job_general_information.get_text(strip=True)
|
||||
job_url = self._extract_job_url(job_general_information)
|
||||
if not job_url:
|
||||
return
|
||||
|
||||
# Extract company name using the original approach:
|
||||
company_tag = job.find("div", class_="t-nowrap p10l")
|
||||
company_name = (
|
||||
company_tag.find("span").get_text(strip=True)
|
||||
if company_tag and company_tag.find("span")
|
||||
else None
|
||||
)
|
||||
|
||||
# Extract location using the original approach:
|
||||
location_tag = job.find("div", class_="t-mute t-small")
|
||||
location = location_tag.get_text(strip=True) if location_tag else None
|
||||
|
||||
job_id = f"bayt-{abs(hash(job_url))}"
|
||||
location_obj = Location(
|
||||
city=location,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
return JobPost(
|
||||
id=job_id,
|
||||
title=job_title,
|
||||
company_name=company_name,
|
||||
location=location_obj,
|
||||
job_url=job_url,
|
||||
)
|
||||
|
||||
def _extract_job_url(self, job_general_information: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Pulls the job URL from the 'a' within the h2 element.
|
||||
"""
|
||||
a_tag = job_general_information.find("a")
|
||||
if a_tag and a_tag.has_attr("href"):
|
||||
return self.base_url + a_tag["href"].strip()
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
jobspy.scrapers.exceptions
|
||||
jobspy.jobboard.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains the set of Scrapers' exceptions.
|
||||
|
@ -24,3 +24,17 @@ class ZipRecruiterException(Exception):
|
|||
class GlassdoorException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Glassdoor")
|
||||
|
||||
|
||||
class GoogleJobsException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Google Jobs")
|
||||
|
||||
|
||||
class BaytException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Bayt")
|
||||
|
||||
class NaukriException(Exception):
|
||||
def __init__(self,message=None):
|
||||
super().__init__(message or "An error occurred with Naukri")
|
|
@ -0,0 +1,320 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import json
|
||||
import requests
|
||||
from typing import Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from jobspy.glassdoor.constant import fallback_token, query_template, headers
|
||||
from jobspy.glassdoor.util import (
|
||||
get_cursor_for_page,
|
||||
parse_compensation,
|
||||
parse_location,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
create_logger,
|
||||
create_session,
|
||||
markdown_converter,
|
||||
)
|
||||
from jobspy.exception import GlassdoorException
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
JobResponse,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
|
||||
log = create_logger("Glassdoor")
|
||||
|
||||
|
||||
class Glassdoor(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.base_url = None
|
||||
self.country = None
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 30
|
||||
self.max_pages = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, has_retry=True
|
||||
)
|
||||
token = self._get_csrf_token()
|
||||
headers["gd-csrf-token"] = token if token else fallback_token
|
||||
self.session.headers.update(headers)
|
||||
|
||||
location_id, location_type = self._get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
if location_type is None:
|
||||
log.error("Glassdoor: location not parsed")
|
||||
return JobResponse(jobs=[])
|
||||
job_list: list[JobPost] = []
|
||||
cursor = None
|
||||
|
||||
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||
range_end = min(tot_pages, self.max_pages + 1)
|
||||
for page in range(range_start, range_end):
|
||||
log.info(f"search page: {page} / {range_end - 1}")
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
job_list.extend(jobs)
|
||||
if not jobs or len(job_list) >= scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
break
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _fetch_jobs_page(
|
||||
self,
|
||||
scraper_input: ScraperInput,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
jobs = []
|
||||
self.scraper_input = scraper_input
|
||||
try:
|
||||
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||
response = self.session.post(
|
||||
f"{self.base_url}/graph",
|
||||
timeout_seconds=15,
|
||||
data=payload,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
exc_msg = f"bad response status code: {response.status_code}"
|
||||
raise GlassdoorException(exc_msg)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except (
|
||||
requests.exceptions.ReadTimeout,
|
||||
GlassdoorException,
|
||||
ValueError,
|
||||
Exception,
|
||||
) as e:
|
||||
log.error(f"Glassdoor: {str(e)}")
|
||||
return jobs, None
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {
|
||||
executor.submit(self._process_job, job): job for job in jobs_data
|
||||
}
|
||||
for future in as_completed(future_to_job_data):
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
except Exception as exc:
|
||||
raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
|
||||
|
||||
return jobs, get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def _get_csrf_token(self):
|
||||
"""
|
||||
Fetches csrf token needed for API by visiting a generic page
|
||||
"""
|
||||
res = self.session.get(f"{self.base_url}/Job/computer-science-jobs.htm")
|
||||
pattern = r'"token":\s*"([^"]+)"'
|
||||
matches = re.findall(pattern, res.text)
|
||||
token = None
|
||||
if matches:
|
||||
token = matches[0]
|
||||
return token
|
||||
|
||||
def _process_job(self, job_data):
|
||||
"""
|
||||
Processes a single job and fetches its description.
|
||||
"""
|
||||
job_id = job_data["jobview"]["job"]["listingId"]
|
||||
job_url = f"{self.base_url}job-listing/j?jl={job_id}"
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
self.seen_urls.add(job_url)
|
||||
job = job_data["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
company_id = job_data["jobview"]["header"]["employer"]["id"]
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
|
||||
date_posted = date_diff if age_in_days is not None else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
else:
|
||||
location = parse_location(location_name)
|
||||
|
||||
compensation = parse_compensation(job["header"])
|
||||
try:
|
||||
description = self._fetch_job_description(job_id)
|
||||
except:
|
||||
description = None
|
||||
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||
company_logo = (
|
||||
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||
)
|
||||
listing_type = (
|
||||
job_data["jobview"]
|
||||
.get("header", {})
|
||||
.get("adOrderSponsorshipLevel", "")
|
||||
.lower()
|
||||
)
|
||||
return JobPost(
|
||||
id=f"gd-{job_id}",
|
||||
title=title,
|
||||
company_url=company_url if company_id else None,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
location=location,
|
||||
compensation=compensation,
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
company_logo=company_logo,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _fetch_job_description(self, job_id):
|
||||
"""
|
||||
Fetches the job description for a single job ID.
|
||||
"""
|
||||
url = f"{self.base_url}/graph"
|
||||
body = [
|
||||
{
|
||||
"operationName": "JobDetailQuery",
|
||||
"variables": {
|
||||
"jl": job_id,
|
||||
"queryString": "q",
|
||||
"pageTypeEnum": "SERP",
|
||||
},
|
||||
"query": """
|
||||
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||
jobview: jobView(
|
||||
listingId: $jl
|
||||
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||
) {
|
||||
job {
|
||||
description
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
]
|
||||
res = requests.post(url, json=body, headers=headers)
|
||||
if res.status_code != 200:
|
||||
return None
|
||||
data = res.json()[0]
|
||||
desc = data["data"]["jobview"]["job"]["description"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
desc = markdown_converter(desc)
|
||||
return desc
|
||||
|
||||
def _get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
res = self.session.get(url)
|
||||
if res.status_code != 200:
|
||||
if res.status_code == 429:
|
||||
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||
log.error(err)
|
||||
return None, None
|
||||
else:
|
||||
err = f"Glassdoor response status code {res.status_code}"
|
||||
err += f" - {res.text}"
|
||||
log.error(f"Glassdoor response status code {res.status_code}")
|
||||
return None, None
|
||||
items = res.json()
|
||||
|
||||
if not items:
|
||||
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||
location_type = items[0]["locationType"]
|
||||
if location_type == "C":
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
elif location_type == "N":
|
||||
location_type = "COUNTRY"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
def _add_payload(
|
||||
self,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
fromage = None
|
||||
if self.scraper_input.hours_old:
|
||||
fromage = max(self.scraper_input.hours_old // 24, 1)
|
||||
filter_params = []
|
||||
if self.scraper_input.easy_apply:
|
||||
filter_params.append({"filterKey": "applicationType", "values": "1"})
|
||||
if fromage:
|
||||
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
|
||||
payload = {
|
||||
"operationName": "JobSearchResultsQuery",
|
||||
"variables": {
|
||||
"excludeJobListingIds": [],
|
||||
"filterParams": filter_params,
|
||||
"keyword": self.scraper_input.search_term,
|
||||
"numJobsToShow": 30,
|
||||
"locationType": location_type,
|
||||
"locationId": int(location_id),
|
||||
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
"fromage": fromage,
|
||||
"sort": "date",
|
||||
},
|
||||
"query": query_template,
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||
)
|
||||
return json.dumps([payload])
|
|
@ -0,0 +1,184 @@
|
|||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
query_template = """
|
||||
query JobSearchResultsQuery(
|
||||
$excludeJobListingIds: [Long!],
|
||||
$keyword: String,
|
||||
$locationId: Int,
|
||||
$locationType: LocationTypeEnum,
|
||||
$numJobsToShow: Int!,
|
||||
$pageCursor: String,
|
||||
$pageNumber: Int,
|
||||
$filterParams: [FilterParams],
|
||||
$originalPageUrl: String,
|
||||
$seoFriendlyUrlInput: String,
|
||||
$parameterUrlInput: String,
|
||||
$seoUrl: Boolean
|
||||
) {
|
||||
jobListings(
|
||||
contextHolder: {
|
||||
searchParams: {
|
||||
excludeJobListingIds: $excludeJobListingIds,
|
||||
keyword: $keyword,
|
||||
locationId: $locationId,
|
||||
locationType: $locationType,
|
||||
numPerPage: $numJobsToShow,
|
||||
pageCursor: $pageCursor,
|
||||
pageNumber: $pageNumber,
|
||||
filterParams: $filterParams,
|
||||
originalPageUrl: $originalPageUrl,
|
||||
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||
parameterUrlInput: $parameterUrlInput,
|
||||
seoUrl: $seoUrl,
|
||||
searchType: SR
|
||||
}
|
||||
}
|
||||
) {
|
||||
companyFilterOptions {
|
||||
id
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
filterOptions
|
||||
indeedCtk
|
||||
jobListings {
|
||||
...JobView
|
||||
__typename
|
||||
}
|
||||
jobListingSeoLinks {
|
||||
linkItems {
|
||||
position
|
||||
url
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
jobSearchTrackingKey
|
||||
jobsPageSeoData {
|
||||
pageMetaDescription
|
||||
pageTitle
|
||||
__typename
|
||||
}
|
||||
paginationCursors {
|
||||
cursor
|
||||
pageNumber
|
||||
__typename
|
||||
}
|
||||
indexablePageForSeo
|
||||
searchResultsMetadata {
|
||||
searchCriteria {
|
||||
implicitLocation {
|
||||
id
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
keyword
|
||||
location {
|
||||
id
|
||||
shortName
|
||||
localizedShortName
|
||||
localizedDisplayName
|
||||
type
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
helpCenterDomain
|
||||
helpCenterLocale
|
||||
jobSerpJobOutlook {
|
||||
occupation
|
||||
paragraph
|
||||
__typename
|
||||
}
|
||||
showMachineReadableJobs
|
||||
__typename
|
||||
}
|
||||
totalJobsCount
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
fragment JobView on JobListingSearchResult {
|
||||
jobview {
|
||||
header {
|
||||
adOrderId
|
||||
advertiserType
|
||||
adOrderSponsorshipLevel
|
||||
ageInDays
|
||||
divisionEmployerName
|
||||
easyApply
|
||||
employer {
|
||||
id
|
||||
name
|
||||
shortName
|
||||
__typename
|
||||
}
|
||||
employerNameFromSearch
|
||||
goc
|
||||
gocConfidence
|
||||
gocId
|
||||
jobCountryId
|
||||
jobLink
|
||||
jobResultTrackingKey
|
||||
jobTitleText
|
||||
locationName
|
||||
locationType
|
||||
locId
|
||||
needsCommission
|
||||
payCurrency
|
||||
payPeriod
|
||||
payPeriodAdjustedPay {
|
||||
p10
|
||||
p50
|
||||
p90
|
||||
__typename
|
||||
}
|
||||
rating
|
||||
salarySource
|
||||
savedJobId
|
||||
sponsored
|
||||
__typename
|
||||
}
|
||||
job {
|
||||
description
|
||||
importConfigId
|
||||
jobTitleId
|
||||
jobTitleText
|
||||
listingId
|
||||
__typename
|
||||
}
|
||||
jobListingAdminDetails {
|
||||
cpcVal
|
||||
importConfigId
|
||||
jobListingId
|
||||
jobSourceId
|
||||
userEligibleForAdminJobDetails
|
||||
__typename
|
||||
}
|
||||
overview {
|
||||
shortName
|
||||
squareLogoUrl
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
"""
|
||||
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
|
@ -0,0 +1,42 @@
|
|||
from jobspy.model import Compensation, CompensationInterval, Location, JobType
|
||||
|
||||
|
||||
def parse_compensation(data: dict) -> Compensation | None:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
|
@ -0,0 +1,202 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import re
|
||||
import json
|
||||
from typing import Tuple
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from jobspy.google.constant import headers_jobs, headers_initial, async_param
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
JobResponse,
|
||||
Location,
|
||||
JobType,
|
||||
)
|
||||
from jobspy.util import extract_emails_from_text, extract_job_type, create_session
|
||||
from jobspy.google.util import log, find_job_info_initial_page, find_job_info
|
||||
|
||||
|
||||
class Google(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes Google Scraper with the Goodle jobs search url
|
||||
"""
|
||||
site = Site(Site.GOOGLE)
|
||||
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||
|
||||
self.country = None
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 10
|
||||
self.seen_urls = set()
|
||||
self.url = "https://www.google.com/search"
|
||||
self.jobs_url = "https://www.google.com/async/callback:550"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Google for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=False, has_retry=True
|
||||
)
|
||||
forward_cursor, job_list = self._get_initial_cursor_and_jobs()
|
||||
if forward_cursor is None:
|
||||
log.warning(
|
||||
"initial cursor not found, try changing your query or there was at most 10 results"
|
||||
)
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
page = 1
|
||||
|
||||
while (
|
||||
len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset
|
||||
and forward_cursor
|
||||
):
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
try:
|
||||
jobs, forward_cursor = self._get_jobs_next_page(forward_cursor)
|
||||
except Exception as e:
|
||||
log.error(f"failed to get jobs on page: {page}, {e}")
|
||||
break
|
||||
if not jobs:
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _get_initial_cursor_and_jobs(self) -> Tuple[str, list[JobPost]]:
|
||||
"""Gets initial cursor and jobs to paginate through job listings"""
|
||||
query = f"{self.scraper_input.search_term} jobs"
|
||||
|
||||
def get_time_range(hours_old):
|
||||
if hours_old <= 24:
|
||||
return "since yesterday"
|
||||
elif hours_old <= 72:
|
||||
return "in the last 3 days"
|
||||
elif hours_old <= 168:
|
||||
return "in the last week"
|
||||
else:
|
||||
return "in the last month"
|
||||
|
||||
job_type_mapping = {
|
||||
JobType.FULL_TIME: "Full time",
|
||||
JobType.PART_TIME: "Part time",
|
||||
JobType.INTERNSHIP: "Internship",
|
||||
JobType.CONTRACT: "Contract",
|
||||
}
|
||||
|
||||
if self.scraper_input.job_type in job_type_mapping:
|
||||
query += f" {job_type_mapping[self.scraper_input.job_type]}"
|
||||
|
||||
if self.scraper_input.location:
|
||||
query += f" near {self.scraper_input.location}"
|
||||
|
||||
if self.scraper_input.hours_old:
|
||||
time_filter = get_time_range(self.scraper_input.hours_old)
|
||||
query += f" {time_filter}"
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
query += " remote"
|
||||
|
||||
if self.scraper_input.google_search_term:
|
||||
query = self.scraper_input.google_search_term
|
||||
|
||||
params = {"q": query, "udm": "8"}
|
||||
response = self.session.get(self.url, headers=headers_initial, params=params)
|
||||
|
||||
pattern_fc = r'<div jsname="Yust4d"[^>]+data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, response.text)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_raw = find_job_info_initial_page(response.text)
|
||||
jobs = []
|
||||
for job_raw in jobs_raw:
|
||||
job_post = self._parse_job(job_raw)
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
return data_async_fc, jobs
|
||||
|
||||
def _get_jobs_next_page(self, forward_cursor: str) -> Tuple[list[JobPost], str]:
|
||||
params = {"fc": [forward_cursor], "fcv": ["3"], "async": [async_param]}
|
||||
response = self.session.get(self.jobs_url, headers=headers_jobs, params=params)
|
||||
return self._parse_jobs(response.text)
|
||||
|
||||
def _parse_jobs(self, job_data: str) -> Tuple[list[JobPost], str]:
|
||||
"""
|
||||
Parses jobs on a page with next page cursor
|
||||
"""
|
||||
start_idx = job_data.find("[[[")
|
||||
end_idx = job_data.rindex("]]]") + 3
|
||||
s = job_data[start_idx:end_idx]
|
||||
parsed = json.loads(s)[0]
|
||||
|
||||
pattern_fc = r'data-async-fc="([^"]+)"'
|
||||
match_fc = re.search(pattern_fc, job_data)
|
||||
data_async_fc = match_fc.group(1) if match_fc else None
|
||||
jobs_on_page = []
|
||||
for array in parsed:
|
||||
_, job_data = array
|
||||
if not job_data.startswith("[[["):
|
||||
continue
|
||||
job_d = json.loads(job_data)
|
||||
|
||||
job_info = find_job_info(job_d)
|
||||
job_post = self._parse_job(job_info)
|
||||
if job_post:
|
||||
jobs_on_page.append(job_post)
|
||||
return jobs_on_page, data_async_fc
|
||||
|
||||
def _parse_job(self, job_info: list):
|
||||
job_url = job_info[3][0][0] if job_info[3] and job_info[3][0] else None
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
title = job_info[0]
|
||||
company_name = job_info[1]
|
||||
location = city = job_info[2]
|
||||
state = country = date_posted = None
|
||||
if location and "," in location:
|
||||
city, state, *country = [*map(lambda x: x.strip(), location.split(","))]
|
||||
|
||||
days_ago_str = job_info[12]
|
||||
if type(days_ago_str) == str:
|
||||
match = re.search(r"\d+", days_ago_str)
|
||||
days_ago = int(match.group()) if match else None
|
||||
date_posted = (datetime.now() - timedelta(days=days_ago)).date()
|
||||
|
||||
description = job_info[19]
|
||||
|
||||
job_post = JobPost(
|
||||
id=f"go-{job_info[28]}",
|
||||
title=title,
|
||||
company_name=company_name,
|
||||
location=Location(
|
||||
city=city, state=state, country=country[0] if country else None
|
||||
),
|
||||
job_url=job_url,
|
||||
date_posted=date_posted,
|
||||
is_remote="remote" in description.lower() or "wfh" in description.lower(),
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description),
|
||||
job_type=extract_job_type(description),
|
||||
)
|
||||
return job_post
|
|
@ -0,0 +1,52 @@
|
|||
headers_initial = {
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=0, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
"x-browser-channel": "stable",
|
||||
"x-browser-copyright": "Copyright 2024 Google LLC. All rights reserved.",
|
||||
"x-browser-year": "2024",
|
||||
}
|
||||
|
||||
headers_jobs = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://www.google.com/",
|
||||
"sec-ch-prefers-color-scheme": "dark",
|
||||
"sec-ch-ua": '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"arm"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-form-factors": '"Desktop"',
|
||||
"sec-ch-ua-full-version": '"130.0.6723.58"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="130.0.6723.58", "Google Chrome";v="130.0.6723.58", "Not?A_Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-ch-ua-platform-version": '"15.0.1"',
|
||||
"sec-ch-ua-wow64": "?0",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
async_param = "_basejs:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/am=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAACAAAoICAAAAAAAKMAfAAAAIAQAAAAAAAAAAAAACCAAAEJDAAACAAAAAGABAIAAARBAAABAAAAAgAgQAABAASKAfv8JAAABAAAAAAwAQAQACQAAAAAAcAEAQABoCAAAABAAAIABAACAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAACAQADoBwAAAAAAAAAAAAAQBAAAAATQAAoACOAHAAAAAAAAAQAAAIIAAAA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/dg=0/br=1/rs=ACT90oGxMeaFMCopIHq5tuQM-6_3M_VMjQ,_basecss:/xjs/_/ss/k=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAIAIAIAoEwCAADIC8AfsgEAawwAPkAAjgoAGAAAAAAAAEADAAAAAAIgAECHAAAAAAAAAAABAQAggAARQAAAQCEAAAAAIAAAABgAAAAAIAQIACCAAfB-AAFIQABoCEA_CgEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAAAAQEAAABAgAMCPAAA4AoE2BAEAggSAAIoAQAAAAAgAAAAACCAQAAAxEwA_ZAACAAAAAAAAAAkAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAQAEAAAAAAAAAAAAAAAAAAAAAQA/br=1/rs=ACT90oGZc36t3uUQkj0srnIvvbHjO2hgyg,_basecomb:/xjs/_/js/k=xjs.s.en_US.JwveA-JiKmg.2018.O/ck=xjs.s.IwsGu62EDtU.L.B1.O/am=QOoQIAQAAAQAREADEBAAAAAAAAAAAAAAAAAAAAAgAQAAIAAAgAQAAAKAIAoIqEwCAADIK8AfsgEAawwAPkAAjgoAGAAACCAAAEJDAAACAAIgAGCHAIAAARBAAABBAQAggAgRQABAQSOAfv8JIAABABgAAAwAYAQICSCAAfB-cAFIQABoCEA_ChEAAIABAACEgHAEwwAEFQAM4CgAAAAAAAAAAAAACABCAACAQEDoBxAgAMCPAAA4AoE2BAEAggTQAIoASOAHAAgAAAAACSAQAIIxEwA_ZAACAAAAAAAAcB8APB4wHFJ4AAAAAAAAAAAAAAAACECCYA5If0EACAAAAAAAAAAAAAAAAAAAUgRNXG4AMAE/d=1/ed=1/dg=0/br=1/ujg=1/rs=ACT90oFNLTjPzD_OAqhhtXwe2pg1T3WpBg,_fmt:prog,_id:fc_5FwaZ86OKsfdwN4P4La3yA4_2"
|
|
@ -0,0 +1,41 @@
|
|||
import re
|
||||
|
||||
from jobspy.util import create_logger
|
||||
|
||||
log = create_logger("Google")
|
||||
|
||||
|
||||
def find_job_info(jobs_data: list | dict) -> list | None:
|
||||
"""Iterates through the JSON data to find the job listings"""
|
||||
if isinstance(jobs_data, dict):
|
||||
for key, value in jobs_data.items():
|
||||
if key == "520084652" and isinstance(value, list):
|
||||
return value
|
||||
else:
|
||||
result = find_job_info(value)
|
||||
if result:
|
||||
return result
|
||||
elif isinstance(jobs_data, list):
|
||||
for item in jobs_data:
|
||||
result = find_job_info(item)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
|
||||
def find_job_info_initial_page(html_text: str):
|
||||
pattern = f'520084652":(' + r"\[.*?\]\s*])\s*}\s*]\s*]\s*]\s*]\s*]"
|
||||
results = []
|
||||
matches = re.finditer(pattern, html_text)
|
||||
|
||||
import json
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
parsed_data = json.loads(match.group(1))
|
||||
results.append(parsed_data)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
log.error(f"Failed to parse match: {str(e)}")
|
||||
results.append({"raw_match": match.group(0), "error": str(e)})
|
||||
return results
|
|
@ -0,0 +1,260 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from datetime import datetime
|
||||
from typing import Tuple
|
||||
|
||||
from jobspy.indeed.constant import job_search_query, api_headers
|
||||
from jobspy.indeed.util import is_job_remote, get_compensation, get_job_type
|
||||
from jobspy.model import (
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
DescriptionFormat,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
markdown_converter,
|
||||
create_session,
|
||||
create_logger,
|
||||
)
|
||||
|
||||
log = create_logger("Indeed")
|
||||
|
||||
|
||||
class Indeed(Scraper):
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed API url
|
||||
"""
|
||||
super().__init__(Site.INDEED, proxies=proxies)
|
||||
|
||||
self.session = create_session(
|
||||
proxies=self.proxies, ca_cert=ca_cert, is_tls=False
|
||||
)
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 100
|
||||
self.num_workers = 10
|
||||
self.seen_urls = set()
|
||||
self.headers = None
|
||||
self.api_country_code = None
|
||||
self.base_url = None
|
||||
self.api_url = "https://apis.indeed.com/graphql"
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
self.headers = api_headers.copy()
|
||||
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||
job_list = []
|
||||
page = 1
|
||||
|
||||
cursor = None
|
||||
|
||||
while len(self.seen_urls) < scraper_input.results_wanted + scraper_input.offset:
|
||||
log.info(
|
||||
f"search page: {page} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)}"
|
||||
)
|
||||
jobs, cursor = self._scrape_page(cursor)
|
||||
if not jobs:
|
||||
log.info(f"found no jobs on page: {page}")
|
||||
break
|
||||
job_list += jobs
|
||||
page += 1
|
||||
return JobResponse(
|
||||
jobs=job_list[
|
||||
scraper_input.offset : scraper_input.offset
|
||||
+ scraper_input.results_wanted
|
||||
]
|
||||
)
|
||||
|
||||
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param cursor:
|
||||
:return: jobs found on page, next page cursor
|
||||
"""
|
||||
jobs = []
|
||||
new_cursor = None
|
||||
filters = self._build_filters()
|
||||
search_term = (
|
||||
self.scraper_input.search_term.replace('"', '\\"')
|
||||
if self.scraper_input.search_term
|
||||
else ""
|
||||
)
|
||||
query = job_search_query.format(
|
||||
what=(f'what: "{search_term}"' if search_term else ""),
|
||||
location=(
|
||||
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||
if self.scraper_input.location
|
||||
else ""
|
||||
),
|
||||
dateOnIndeed=self.scraper_input.hours_old,
|
||||
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||
filters=filters,
|
||||
)
|
||||
payload = {
|
||||
"query": query,
|
||||
}
|
||||
api_headers_temp = api_headers.copy()
|
||||
api_headers_temp["indeed-co"] = self.api_country_code
|
||||
response = self.session.post(
|
||||
self.api_url,
|
||||
headers=api_headers_temp,
|
||||
json=payload,
|
||||
timeout=10,
|
||||
verify=False,
|
||||
)
|
||||
if not response.ok:
|
||||
log.info(
|
||||
f"responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||
)
|
||||
return jobs, new_cursor
|
||||
data = response.json()
|
||||
jobs = data["data"]["jobSearch"]["results"]
|
||||
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||
|
||||
job_list = []
|
||||
for job in jobs:
|
||||
processed_job = self._process_job(job["job"])
|
||||
if processed_job:
|
||||
job_list.append(processed_job)
|
||||
|
||||
return job_list, new_cursor
|
||||
|
||||
def _build_filters(self):
|
||||
"""
|
||||
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||
"""
|
||||
filters_str = ""
|
||||
if self.scraper_input.hours_old:
|
||||
filters_str = """
|
||||
filters: {{
|
||||
date: {{
|
||||
field: "dateOnIndeed",
|
||||
start: "{start}h"
|
||||
}}
|
||||
}}
|
||||
""".format(
|
||||
start=self.scraper_input.hours_old
|
||||
)
|
||||
elif self.scraper_input.easy_apply:
|
||||
filters_str = """
|
||||
filters: {
|
||||
keyword: {
|
||||
field: "indeedApplyScope",
|
||||
keys: ["DESKTOP"]
|
||||
}
|
||||
}
|
||||
"""
|
||||
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||
job_type_key_mapping = {
|
||||
JobType.FULL_TIME: "CF3CP",
|
||||
JobType.PART_TIME: "75GKK",
|
||||
JobType.CONTRACT: "NJXCK",
|
||||
JobType.INTERNSHIP: "VDTG7",
|
||||
}
|
||||
|
||||
keys = []
|
||||
if self.scraper_input.job_type:
|
||||
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||
keys.append(key)
|
||||
|
||||
if self.scraper_input.is_remote:
|
||||
keys.append("DSQF7")
|
||||
|
||||
if keys:
|
||||
keys_str = '", "'.join(keys)
|
||||
filters_str = f"""
|
||||
filters: {{
|
||||
composite: {{
|
||||
filters: [{{
|
||||
keyword: {{
|
||||
field: "attributes",
|
||||
keys: ["{keys_str}"]
|
||||
}}
|
||||
}}]
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
return filters_str
|
||||
|
||||
def _process_job(self, job: dict) -> JobPost | None:
|
||||
"""
|
||||
Parses the job dict into JobPost model
|
||||
:param job: dict to parse
|
||||
:return: JobPost if it's a new job
|
||||
"""
|
||||
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
description = job["description"]["html"]
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
|
||||
job_type = get_job_type(job["attributes"])
|
||||
timestamp_seconds = job["datePublished"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||
employer = job["employer"].get("dossier") if job["employer"] else None
|
||||
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||
return JobPost(
|
||||
id=f'in-{job["key"]}',
|
||||
title=job["title"],
|
||||
description=description,
|
||||
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||
company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
|
||||
company_url_direct=(
|
||||
employer["links"]["corporateWebsite"] if employer else None
|
||||
),
|
||||
location=Location(
|
||||
city=job.get("location", {}).get("city"),
|
||||
state=job.get("location", {}).get("admin1Code"),
|
||||
country=job.get("location", {}).get("countryCode"),
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=get_compensation(job["compensation"]),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_url_direct=(
|
||||
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
|
||||
),
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
is_remote=is_job_remote(job, description),
|
||||
company_addresses=(
|
||||
employer_details["addresses"][0]
|
||||
if employer_details.get("addresses")
|
||||
else None
|
||||
),
|
||||
company_industry=(
|
||||
employer_details["industry"]
|
||||
.replace("Iv1", "")
|
||||
.replace("_", " ")
|
||||
.title()
|
||||
.strip()
|
||||
if employer_details.get("industry")
|
||||
else None
|
||||
),
|
||||
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||
company_description=employer_details.get("briefDescription"),
|
||||
company_logo=(
|
||||
employer["images"].get("squareLogoUrl")
|
||||
if employer and employer.get("images")
|
||||
else None
|
||||
),
|
||||
)
|
|
@ -0,0 +1,109 @@
|
|||
job_search_query = """
|
||||
query GetJobData {{
|
||||
jobSearch(
|
||||
{what}
|
||||
{location}
|
||||
limit: 100
|
||||
{cursor}
|
||||
sort: RELEVANCE
|
||||
{filters}
|
||||
) {{
|
||||
pageInfo {{
|
||||
nextCursor
|
||||
}}
|
||||
results {{
|
||||
trackingKey
|
||||
job {{
|
||||
source {{
|
||||
name
|
||||
}}
|
||||
key
|
||||
title
|
||||
datePublished
|
||||
dateOnIndeed
|
||||
description {{
|
||||
html
|
||||
}}
|
||||
location {{
|
||||
countryName
|
||||
countryCode
|
||||
admin1Code
|
||||
city
|
||||
postalCode
|
||||
streetAddress
|
||||
formatted {{
|
||||
short
|
||||
long
|
||||
}}
|
||||
}}
|
||||
compensation {{
|
||||
estimated {{
|
||||
currencyCode
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
baseSalary {{
|
||||
unitOfWork
|
||||
range {{
|
||||
... on Range {{
|
||||
min
|
||||
max
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
currencyCode
|
||||
}}
|
||||
attributes {{
|
||||
key
|
||||
label
|
||||
}}
|
||||
employer {{
|
||||
relativeCompanyPageUrl
|
||||
name
|
||||
dossier {{
|
||||
employerDetails {{
|
||||
addresses
|
||||
industry
|
||||
employeesLocalizedLabel
|
||||
revenueLocalizedLabel
|
||||
briefDescription
|
||||
ceoName
|
||||
ceoPhotoUrl
|
||||
}}
|
||||
images {{
|
||||
headerImageUrl
|
||||
squareLogoUrl
|
||||
}}
|
||||
links {{
|
||||
corporateWebsite
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
recruit {{
|
||||
viewJobUrl
|
||||
detailedSalary
|
||||
workSchedule
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
api_headers = {
|
||||
"Host": "apis.indeed.com",
|
||||
"content-type": "application/json",
|
||||
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||
"accept": "application/json",
|
||||
"indeed-locale": "en-US",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
from jobspy.model import CompensationInterval, JobType, Compensation
|
||||
from jobspy.util import get_enum_from_job_type
|
||||
|
||||
|
||||
def get_job_type(attributes: list) -> list[JobType]:
|
||||
"""
|
||||
Parses the attributes to get list of job types
|
||||
:param attributes:
|
||||
:return: list of JobType
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for attribute in attributes:
|
||||
job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
|
||||
def get_compensation(compensation: dict) -> Compensation | None:
|
||||
"""
|
||||
Parses the job to get compensation
|
||||
:param compensation:
|
||||
:return: compensation object
|
||||
"""
|
||||
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||
return None
|
||||
comp = (
|
||||
compensation["baseSalary"]
|
||||
if compensation["baseSalary"]
|
||||
else compensation["estimated"]["baseSalary"]
|
||||
)
|
||||
if not comp:
|
||||
return None
|
||||
interval = get_compensation_interval(comp["unitOfWork"])
|
||||
if not interval:
|
||||
return None
|
||||
min_range = comp["range"].get("min")
|
||||
max_range = comp["range"].get("max")
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=int(min_range) if min_range is not None else None,
|
||||
max_amount=int(max_range) if max_range is not None else None,
|
||||
currency=(
|
||||
compensation["estimated"]["currencyCode"]
|
||||
if compensation["estimated"]
|
||||
else compensation["currencyCode"]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def is_job_remote(job: dict, description: str) -> bool:
|
||||
"""
|
||||
Searches the description, location, and attributes to check if job is remote
|
||||
"""
|
||||
remote_keywords = ["remote", "work from home", "wfh"]
|
||||
is_remote_in_attributes = any(
|
||||
any(keyword in attr["label"].lower() for keyword in remote_keywords)
|
||||
for attr in job["attributes"]
|
||||
)
|
||||
is_remote_in_description = any(
|
||||
keyword in description.lower() for keyword in remote_keywords
|
||||
)
|
||||
is_remote_in_location = any(
|
||||
keyword in job["location"]["formatted"]["long"].lower()
|
||||
for keyword in remote_keywords
|
||||
)
|
||||
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location
|
||||
|
||||
|
||||
def get_compensation_interval(interval: str) -> CompensationInterval:
|
||||
interval_mapping = {
|
||||
"DAY": "DAILY",
|
||||
"YEAR": "YEARLY",
|
||||
"HOUR": "HOURLY",
|
||||
"WEEK": "WEEKLY",
|
||||
"MONTH": "MONTHLY",
|
||||
}
|
||||
mapped_interval = interval_mapping.get(interval.upper(), None)
|
||||
if mapped_interval and mapped_interval in CompensationInterval.__members__:
|
||||
return CompensationInterval[mapped_interval]
|
||||
else:
|
||||
raise ValueError(f"Unsupported interval: {interval}")
|
|
@ -0,0 +1,339 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from urllib.parse import urlparse, urlunparse, unquote
|
||||
|
||||
import regex as re
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
|
||||
from jobspy.exception import LinkedInException
|
||||
from jobspy.linkedin.constant import headers
|
||||
from jobspy.linkedin.util import (
|
||||
is_job_remote,
|
||||
job_type_code,
|
||||
parse_job_type,
|
||||
parse_job_level,
|
||||
parse_company_industry
|
||||
)
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
Country,
|
||||
Compensation,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
currency_parser,
|
||||
markdown_converter,
|
||||
create_session,
|
||||
remove_attributes,
|
||||
create_logger,
|
||||
)
|
||||
|
||||
log = create_logger("LinkedIn")
|
||||
|
||||
|
||||
class LinkedIn(Scraper):
|
||||
base_url = "https://www.linkedin.com"
|
||||
delay = 3
|
||||
band_delay = 4
|
||||
jobs_per_page = 25
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
super().__init__(Site.LINKEDIN, proxies=proxies, ca_cert=ca_cert)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies,
|
||||
ca_cert=ca_cert,
|
||||
is_tls=False,
|
||||
has_retry=True,
|
||||
delay=5,
|
||||
clear_cookies=True,
|
||||
)
|
||||
self.session.headers.update(headers)
|
||||
self.scraper_input = None
|
||||
self.country = "worldwide"
|
||||
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes LinkedIn for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
seen_ids = set()
|
||||
start = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||
request_count = 0
|
||||
seconds_old = (
|
||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||
)
|
||||
continue_search = (
|
||||
lambda: len(job_list) < scraper_input.results_wanted and start < 1000
|
||||
)
|
||||
while continue_search():
|
||||
request_count += 1
|
||||
log.info(
|
||||
f"search page: {request_count} / {math.ceil(scraper_input.results_wanted / 10)}"
|
||||
)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": (
|
||||
job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None
|
||||
),
|
||||
"pageNum": 0,
|
||||
"start": start,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": (
|
||||
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||
if scraper_input.linkedin_company_ids
|
||||
else None
|
||||
),
|
||||
}
|
||||
if seconds_old is not None:
|
||||
params["f_TPR"] = f"r{seconds_old}"
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
if response.status_code == 429:
|
||||
err = (
|
||||
f"429 Response - Blocked by LinkedIn for too many requests"
|
||||
)
|
||||
else:
|
||||
err = f"LinkedIn response status code {response.status_code}"
|
||||
err += f" - {response.text}"
|
||||
log.error(err)
|
||||
return JobResponse(jobs=job_list)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
log.error(f"LinkedIn: Bad proxy")
|
||||
else:
|
||||
log.error(f"LinkedIn: {str(e)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job_card in job_cards:
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
|
||||
if job_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(job_id)
|
||||
|
||||
try:
|
||||
fetch_desc = scraper_input.linkedin_fetch_description
|
||||
job_post = self._process_job(job_card, job_id, fetch_desc)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
if not continue_search():
|
||||
break
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
start += len(job_list)
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _process_job(
|
||||
self, job_card: Tag, job_id: str, full_descr: bool
|
||||
) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||
|
||||
compensation = description = None
|
||||
if salary_tag:
|
||||
salary_text = salary_tag.get_text(separator=" ").strip()
|
||||
salary_values = [currency_parser(value) for value in salary_text.split("-")]
|
||||
salary_min = salary_values[0]
|
||||
salary_max = salary_values[1]
|
||||
currency = salary_text[0] if salary_text[0] != "$" else "USD"
|
||||
|
||||
compensation = Compensation(
|
||||
min_amount=int(salary_min),
|
||||
max_amount=int(salary_max),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company_url = (
|
||||
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||
if company_a_tag and company_a_tag.has_attr("href")
|
||||
else ""
|
||||
)
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self._get_location(metadata_card)
|
||||
|
||||
datetime_tag = (
|
||||
metadata_card.find("time", class_="job-search-card__listdate")
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except:
|
||||
date_posted = None
|
||||
job_details = {}
|
||||
if full_descr:
|
||||
job_details = self._get_job_details(job_id)
|
||||
description = job_details.get("description")
|
||||
is_remote = is_job_remote(title, description, location)
|
||||
|
||||
return JobPost(
|
||||
id=f"li-{job_id}",
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
is_remote=is_remote,
|
||||
date_posted=date_posted,
|
||||
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||
compensation=compensation,
|
||||
job_type=job_details.get("job_type"),
|
||||
job_level=job_details.get("job_level", "").lower(),
|
||||
company_industry=job_details.get("company_industry"),
|
||||
description=job_details.get("description"),
|
||||
job_url_direct=job_details.get("job_url_direct"),
|
||||
emails=extract_emails_from_text(description),
|
||||
company_logo=job_details.get("company_logo"),
|
||||
job_function=job_details.get("job_function"),
|
||||
)
|
||||
|
||||
def _get_job_details(self, job_id: str) -> dict:
|
||||
"""
|
||||
Retrieves job description and other job details by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: dict
|
||||
"""
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/jobs/view/{job_id}", timeout=5
|
||||
)
|
||||
response.raise_for_status()
|
||||
except:
|
||||
return {}
|
||||
if "linkedin.com/signup" in response.url:
|
||||
return {}
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
div_content = soup.find(
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
description = None
|
||||
if div_content is not None:
|
||||
div_content = remove_attributes(div_content)
|
||||
description = div_content.prettify(formatter="html")
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
|
||||
h3_tag = soup.find(
|
||||
"h3", text=lambda text: text and "Job function" in text.strip()
|
||||
)
|
||||
|
||||
job_function = None
|
||||
if h3_tag:
|
||||
job_function_span = h3_tag.find_next(
|
||||
"span", class_="description__job-criteria-text"
|
||||
)
|
||||
if job_function_span:
|
||||
job_function = job_function_span.text.strip()
|
||||
|
||||
company_logo = (
|
||||
logo_image.get("data-delayed-url")
|
||||
if (logo_image := soup.find("img", {"class": "artdeco-entity-image"}))
|
||||
else None
|
||||
)
|
||||
return {
|
||||
"description": description,
|
||||
"job_level": parse_job_level(soup),
|
||||
"company_industry": parse_company_industry(soup),
|
||||
"job_type": parse_job_type(soup),
|
||||
"job_url_direct": self._parse_job_url_direct(soup),
|
||||
"company_logo": company_logo,
|
||||
"job_function": job_function,
|
||||
}
|
||||
|
||||
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
Extracts the location data from the job metadata card.
|
||||
:param metadata_card
|
||||
:return: location
|
||||
"""
|
||||
location = Location(country=Country.from_string(self.country))
|
||||
if metadata_card is not None:
|
||||
location_tag = metadata_card.find(
|
||||
"span", class_="job-search-card__location"
|
||||
)
|
||||
location_string = location_tag.text.strip() if location_tag else "N/A"
|
||||
parts = location_string.split(", ")
|
||||
if len(parts) == 2:
|
||||
city, state = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
country = Country.from_string(country)
|
||||
location = Location(city=city, state=state, country=country)
|
||||
return location
|
||||
|
||||
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job url direct from job page
|
||||
:param soup:
|
||||
:return: str
|
||||
"""
|
||||
job_url_direct = None
|
||||
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||
if job_url_direct_content:
|
||||
job_url_direct_match = self.job_url_direct_regex.search(
|
||||
job_url_direct_content.decode_contents().strip()
|
||||
)
|
||||
if job_url_direct_match:
|
||||
job_url_direct = unquote(job_url_direct_match.group())
|
||||
|
||||
return job_url_direct
|
|
@ -0,0 +1,8 @@
|
|||
headers = {
|
||||
"authority": "www.linkedin.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.model import JobType, Location
|
||||
from jobspy.util import get_enum_from_job_type
|
||||
|
||||
|
||||
def job_type_code(job_type_enum: JobType) -> str:
|
||||
return {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
JobType.INTERNSHIP: "I",
|
||||
JobType.CONTRACT: "C",
|
||||
JobType.TEMPORARY: "T",
|
||||
}.get(job_type_enum, "")
|
||||
|
||||
|
||||
def parse_job_type(soup_job_type: BeautifulSoup) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
)
|
||||
employment_type = None
|
||||
if h3_tag:
|
||||
employment_type_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if employment_type_span:
|
||||
employment_type = employment_type_span.get_text(strip=True)
|
||||
employment_type = employment_type.lower()
|
||||
employment_type = employment_type.replace("-", "")
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
|
||||
def parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the job level from job page
|
||||
:param soup_job_level:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_job_level.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Seniority level" in text,
|
||||
)
|
||||
job_level = None
|
||||
if h3_tag:
|
||||
job_level_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if job_level_span:
|
||||
job_level = job_level_span.get_text(strip=True)
|
||||
|
||||
return job_level
|
||||
|
||||
|
||||
def parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the company industry from job page
|
||||
:param soup_industry:
|
||||
:return: str
|
||||
"""
|
||||
h3_tag = soup_industry.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Industries" in text,
|
||||
)
|
||||
industry = None
|
||||
if h3_tag:
|
||||
industry_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if industry_span:
|
||||
industry = industry_span.get_text(strip=True)
|
||||
|
||||
return industry
|
||||
|
||||
|
||||
def is_job_remote(title: dict, description: str, location: Location) -> bool:
|
||||
"""
|
||||
Searches the title, location, and description to check if job is remote
|
||||
"""
|
||||
remote_keywords = ["remote", "work from home", "wfh"]
|
||||
location = location.display_location()
|
||||
full_string = f'{title} {description} {location}'.lower()
|
||||
is_remote = any(keyword in full_string for keyword in remote_keywords)
|
||||
return is_remote
|
|
@ -1,7 +1,10 @@
|
|||
from typing import Union, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, validator
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class JobType(Enum):
|
||||
|
@ -57,7 +60,7 @@ class JobType(Enum):
|
|||
class Country(Enum):
|
||||
"""
|
||||
Gets the subdomain for Indeed and Glassdoor.
|
||||
The second item in the tuple is the subdomain for Indeed
|
||||
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||
"""
|
||||
|
||||
|
@ -66,16 +69,20 @@ class Country(Enum):
|
|||
AUSTRIA = ("austria", "at", "at")
|
||||
BAHRAIN = ("bahrain", "bh")
|
||||
BELGIUM = ("belgium", "be", "fr:be")
|
||||
BULGARIA = ("bulgaria", "bg")
|
||||
BRAZIL = ("brazil", "br", "com.br")
|
||||
CANADA = ("canada", "ca", "ca")
|
||||
CHILE = ("chile", "cl")
|
||||
CHINA = ("china", "cn")
|
||||
COLOMBIA = ("colombia", "co")
|
||||
COSTARICA = ("costa rica", "cr")
|
||||
CROATIA = ("croatia", "hr")
|
||||
CYPRUS = ("cyprus", "cy")
|
||||
CZECHREPUBLIC = ("czech republic,czechia", "cz")
|
||||
DENMARK = ("denmark", "dk")
|
||||
ECUADOR = ("ecuador", "ec")
|
||||
EGYPT = ("egypt", "eg")
|
||||
ESTONIA = ("estonia", "ee")
|
||||
FINLAND = ("finland", "fi")
|
||||
FRANCE = ("france", "fr", "fr")
|
||||
GERMANY = ("germany", "de", "de")
|
||||
|
@ -89,8 +96,11 @@ class Country(Enum):
|
|||
ITALY = ("italy", "it", "it")
|
||||
JAPAN = ("japan", "jp")
|
||||
KUWAIT = ("kuwait", "kw")
|
||||
LATVIA = ("latvia", "lv")
|
||||
LITHUANIA = ("lithuania", "lt")
|
||||
LUXEMBOURG = ("luxembourg", "lu")
|
||||
MALAYSIA = ("malaysia", "malaysia")
|
||||
MALAYSIA = ("malaysia", "malaysia:my", "com")
|
||||
MALTA = ("malta", "malta:mt", "mt")
|
||||
MEXICO = ("mexico", "mx", "com.mx")
|
||||
MOROCCO = ("morocco", "ma")
|
||||
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||
|
@ -108,6 +118,8 @@ class Country(Enum):
|
|||
ROMANIA = ("romania", "ro")
|
||||
SAUDIARABIA = ("saudi arabia", "sa")
|
||||
SINGAPORE = ("singapore", "sg", "sg")
|
||||
SLOVAKIA = ("slovakia", "sk")
|
||||
SLOVENIA = ("slovenia", "sl")
|
||||
SOUTHAFRICA = ("south africa", "za")
|
||||
SOUTHKOREA = ("south korea", "kr")
|
||||
SPAIN = ("spain", "es", "es")
|
||||
|
@ -115,14 +127,14 @@ class Country(Enum):
|
|||
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||
TAIWAN = ("taiwan", "tw")
|
||||
THAILAND = ("thailand", "th")
|
||||
TURKEY = ("turkey", "tr")
|
||||
TURKEY = ("türkiye,turkey", "tr")
|
||||
UKRAINE = ("ukraine", "ua")
|
||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||
UK = ("uk,united kingdom", "uk", "co.uk")
|
||||
USA = ("usa,us,united states", "www", "com")
|
||||
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||
USA = ("usa,us,united states", "www:us", "com")
|
||||
URUGUAY = ("uruguay", "uy")
|
||||
VENEZUELA = ("venezuela", "ve")
|
||||
VIETNAM = ("vietnam", "vn")
|
||||
VIETNAM = ("vietnam", "vn", "com")
|
||||
|
||||
# internal for ziprecruiter
|
||||
US_CANADA = ("usa/ca", "www")
|
||||
|
@ -132,7 +144,10 @@ class Country(Enum):
|
|||
|
||||
@property
|
||||
def indeed_domain_value(self):
|
||||
return self.value[1]
|
||||
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||
if subdomain and api_country_code:
|
||||
return subdomain, api_country_code.upper()
|
||||
return self.value[1], self.value[1].upper()
|
||||
|
||||
@property
|
||||
def glassdoor_domain_value(self):
|
||||
|
@ -145,7 +160,7 @@ class Country(Enum):
|
|||
else:
|
||||
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||
|
||||
def get_url(self):
|
||||
def get_glassdoor_url(self):
|
||||
return f"https://{self.glassdoor_domain_value}/"
|
||||
|
||||
@classmethod
|
||||
|
@ -153,7 +168,7 @@ class Country(Enum):
|
|||
"""Convert a string to the corresponding Country enum."""
|
||||
country_str = country_str.strip().lower()
|
||||
for country in cls:
|
||||
country_names = country.value[0].split(',')
|
||||
country_names = country.value[0].split(",")
|
||||
if country_str in country_names:
|
||||
return country
|
||||
valid_countries = [country.value for country in cls]
|
||||
|
@ -163,7 +178,7 @@ class Country(Enum):
|
|||
|
||||
|
||||
class Location(BaseModel):
|
||||
country: Country | None = None
|
||||
country: Country | str | None = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
|
||||
|
@ -173,7 +188,12 @@ class Location(BaseModel):
|
|||
location_parts.append(self.city)
|
||||
if self.state:
|
||||
location_parts.append(self.state)
|
||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
||||
if isinstance(self.country, str):
|
||||
location_parts.append(self.country)
|
||||
elif self.country and self.country not in (
|
||||
Country.US_CANADA,
|
||||
Country.WORLDWIDE,
|
||||
):
|
||||
country_name = self.country.value[0]
|
||||
if "," in country_name:
|
||||
country_name = country_name.split(",")[0]
|
||||
|
@ -193,34 +213,118 @@ class CompensationInterval(Enum):
|
|||
|
||||
@classmethod
|
||||
def get_interval(cls, pay_period):
|
||||
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||
interval_mapping = {
|
||||
"YEAR": cls.YEARLY,
|
||||
"HOUR": cls.HOURLY,
|
||||
}
|
||||
if pay_period in interval_mapping:
|
||||
return interval_mapping[pay_period].value
|
||||
else:
|
||||
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||
|
||||
|
||||
class Compensation(BaseModel):
|
||||
interval: Optional[CompensationInterval] = None
|
||||
min_amount: int | None = None
|
||||
max_amount: int | None = None
|
||||
min_amount: float | None = None
|
||||
max_amount: float | None = None
|
||||
currency: Optional[str] = "USD"
|
||||
|
||||
|
||||
class DescriptionFormat(Enum):
|
||||
MARKDOWN = "markdown"
|
||||
HTML = "html"
|
||||
|
||||
|
||||
class JobPost(BaseModel):
|
||||
id: str | None = None
|
||||
title: str
|
||||
company_name: str
|
||||
company_name: str | None
|
||||
job_url: str
|
||||
job_url_direct: str | None = None
|
||||
location: Optional[Location]
|
||||
|
||||
description: str | None = None
|
||||
company_url: str | None = None
|
||||
company_url_direct: str | None = None
|
||||
|
||||
job_type: list[JobType] | None = None
|
||||
compensation: Compensation | None = None
|
||||
date_posted: date | None = None
|
||||
benefits: str | None = None
|
||||
emails: list[str] | None = None
|
||||
num_urgent_words: int | None = None
|
||||
is_remote: bool | None = None
|
||||
# company_industry: str | None = None
|
||||
listing_type: str | None = None
|
||||
|
||||
# LinkedIn specific
|
||||
job_level: str | None = None
|
||||
|
||||
# LinkedIn and Indeed specific
|
||||
company_industry: str | None = None
|
||||
|
||||
# Indeed specific
|
||||
company_addresses: str | None = None
|
||||
company_num_employees: str | None = None
|
||||
company_revenue: str | None = None
|
||||
company_description: str | None = None
|
||||
company_logo: str | None = None
|
||||
banner_photo_url: str | None = None
|
||||
|
||||
# LinkedIn only atm
|
||||
job_function: str | None = None
|
||||
|
||||
# Naukri specific
|
||||
skills: list[str] | None = None #from tagsAndSkills
|
||||
experience_range: str | None = None #from experienceText
|
||||
company_rating: float | None = None #from ambitionBoxData.AggregateRating
|
||||
company_reviews_count: int | None = None #from ambitionBoxData.ReviewsCount
|
||||
vacancy_count: int | None = None #from vacancy
|
||||
work_from_home_type: str | None = None #from clusters.wfhType (e.g., "Hybrid", "Remote")
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
jobs: list[JobPost] = []
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
GOOGLE = "google"
|
||||
BAYT = "bayt"
|
||||
NAUKRI = "naukri"
|
||||
|
||||
|
||||
class SalarySource(Enum):
|
||||
DIRECT_DATA = "direct_data"
|
||||
DESCRIPTION = "description"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: list[Site]
|
||||
search_term: str | None = None
|
||||
google_search_term: str | None = None
|
||||
|
||||
location: str | None = None
|
||||
country: Country | None = Country.USA
|
||||
distance: int | None = None
|
||||
is_remote: bool = False
|
||||
job_type: JobType | None = None
|
||||
easy_apply: bool | None = None
|
||||
offset: int = 0
|
||||
linkedin_fetch_description: bool = False
|
||||
linkedin_company_ids: list[int] | None = None
|
||||
description_format: DescriptionFormat | None = DescriptionFormat.MARKDOWN
|
||||
|
||||
results_wanted: int = 15
|
||||
hours_old: int | None = None
|
||||
|
||||
|
||||
class Scraper(ABC):
|
||||
def __init__(
|
||||
self, site: Site, proxies: list[str] | None = None, ca_cert: str | None = None
|
||||
):
|
||||
self.site = site
|
||||
self.proxies = proxies
|
||||
self.ca_cert = ca_cert
|
||||
|
||||
@abstractmethod
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
|
@ -0,0 +1,301 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime, date, timedelta
|
||||
from typing import Optional
|
||||
|
||||
import regex as re
|
||||
import requests
|
||||
|
||||
from jobspy.exception import NaukriException
|
||||
from jobspy.naukri.constant import headers as naukri_headers
|
||||
from jobspy.naukri.util import (
|
||||
is_job_remote,
|
||||
parse_job_type,
|
||||
parse_company_industry,
|
||||
)
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
Country,
|
||||
Compensation,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
currency_parser,
|
||||
markdown_converter,
|
||||
create_session,
|
||||
create_logger,
|
||||
)
|
||||
|
||||
log = create_logger("Naukri")
|
||||
|
||||
class Naukri(Scraper):
|
||||
base_url = "https://www.naukri.com/jobapi/v3/search"
|
||||
delay = 3
|
||||
band_delay = 4
|
||||
jobs_per_page = 20
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes NaukriScraper with the Naukri API URL
|
||||
"""
|
||||
super().__init__(Site.NAUKRI, proxies=proxies, ca_cert=ca_cert)
|
||||
self.session = create_session(
|
||||
proxies=self.proxies,
|
||||
ca_cert=ca_cert,
|
||||
is_tls=False,
|
||||
has_retry=True,
|
||||
delay=5,
|
||||
clear_cookies=True,
|
||||
)
|
||||
self.session.headers.update(naukri_headers)
|
||||
self.scraper_input = None
|
||||
self.country = "India" #naukri is india-focused by default
|
||||
log.info("Naukri scraper initialized")
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Naukri API for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
seen_ids = set()
|
||||
start = scraper_input.offset or 0
|
||||
page = (start // self.jobs_per_page) + 1
|
||||
request_count = 0
|
||||
seconds_old = (
|
||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||
)
|
||||
continue_search = (
|
||||
lambda: len(job_list) < scraper_input.results_wanted and page <= 50 # Arbitrary limit
|
||||
)
|
||||
|
||||
while continue_search():
|
||||
request_count += 1
|
||||
log.info(
|
||||
f"Scraping page {request_count} / {math.ceil(scraper_input.results_wanted / self.jobs_per_page)} "
|
||||
f"for search term: {scraper_input.search_term}"
|
||||
)
|
||||
params = {
|
||||
"noOfResults": self.jobs_per_page,
|
||||
"urlType": "search_by_keyword",
|
||||
"searchType": "adv",
|
||||
"keyword": scraper_input.search_term,
|
||||
"pageNo": page,
|
||||
"k": scraper_input.search_term,
|
||||
"seoKey": f"{scraper_input.search_term.lower().replace(' ', '-')}-jobs",
|
||||
"src": "jobsearchDesk",
|
||||
"latLong": "",
|
||||
"location": scraper_input.location,
|
||||
"remote": "true" if scraper_input.is_remote else None,
|
||||
}
|
||||
if seconds_old:
|
||||
params["days"] = seconds_old // 86400 # Convert to days
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
log.debug(f"Sending request to {self.base_url} with params: {params}")
|
||||
response = self.session.get(self.base_url, params=params, timeout=10)
|
||||
if response.status_code not in range(200, 400):
|
||||
err = f"Naukri API response status code {response.status_code} - {response.text}"
|
||||
log.error(err)
|
||||
return JobResponse(jobs=job_list)
|
||||
data = response.json()
|
||||
job_details = data.get("jobDetails", [])
|
||||
log.info(f"Received {len(job_details)} job entries from API")
|
||||
if not job_details:
|
||||
log.warning("No job details found in API response")
|
||||
break
|
||||
except Exception as e:
|
||||
log.error(f"Naukri API request failed: {str(e)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job in job_details:
|
||||
job_id = job.get("jobId")
|
||||
if not job_id or job_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(job_id)
|
||||
log.debug(f"Processing job ID: {job_id}")
|
||||
|
||||
try:
|
||||
fetch_desc = scraper_input.linkedin_fetch_description
|
||||
job_post = self._process_job(job, job_id, fetch_desc)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
log.info(f"Added job: {job_post.title} (ID: {job_id})")
|
||||
if not continue_search():
|
||||
break
|
||||
except Exception as e:
|
||||
log.error(f"Error processing job ID {job_id}: {str(e)}")
|
||||
raise NaukriException(str(e))
|
||||
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
page += 1
|
||||
|
||||
job_list = job_list[:scraper_input.results_wanted]
|
||||
log.info(f"Scraping completed. Total jobs collected: {len(job_list)}")
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def _process_job(
|
||||
self, job: dict, job_id: str, full_descr: bool
|
||||
) -> Optional[JobPost]:
|
||||
"""
|
||||
Processes a single job from API response into a JobPost object
|
||||
"""
|
||||
title = job.get("title", "N/A")
|
||||
company = job.get("companyName", "N/A")
|
||||
company_url = f"https://www.naukri.com/{job.get('staticUrl', '')}" if job.get("staticUrl") else None
|
||||
|
||||
location = self._get_location(job.get("placeholders", []))
|
||||
compensation = self._get_compensation(job.get("placeholders", []))
|
||||
date_posted = self._parse_date(job.get("footerPlaceholderLabel"), job.get("createdDate"))
|
||||
|
||||
job_url = f"https://www.naukri.com{job.get('jdURL', f'/job/{job_id}')}"
|
||||
description = job.get("jobDescription") if full_descr else None
|
||||
if description and self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description = markdown_converter(description)
|
||||
|
||||
job_type = parse_job_type(description) if description else None
|
||||
company_industry = parse_company_industry(description) if description else None
|
||||
is_remote = is_job_remote(title, description or "", location)
|
||||
company_logo = job.get("logoPathV3") or job.get("logoPath")
|
||||
|
||||
# Naukri-specific fields
|
||||
skills = job.get("tagsAndSkills", "").split(",") if job.get("tagsAndSkills") else None
|
||||
experience_range = job.get("experienceText")
|
||||
ambition_box = job.get("ambitionBoxData", {})
|
||||
company_rating = float(ambition_box.get("AggregateRating")) if ambition_box.get("AggregateRating") else None
|
||||
company_reviews_count = ambition_box.get("ReviewsCount")
|
||||
vacancy_count = job.get("vacancy")
|
||||
work_from_home_type = self._infer_work_from_home_type(job.get("placeholders", []), title, description or "")
|
||||
|
||||
job_post = JobPost(
|
||||
id=f"nk-{job_id}",
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
is_remote=is_remote,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
compensation=compensation,
|
||||
job_type=job_type,
|
||||
company_industry=company_industry,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description or ""),
|
||||
company_logo=company_logo,
|
||||
skills=skills,
|
||||
experience_range=experience_range,
|
||||
company_rating=company_rating,
|
||||
company_reviews_count=company_reviews_count,
|
||||
vacancy_count=vacancy_count,
|
||||
work_from_home_type=work_from_home_type,
|
||||
)
|
||||
log.debug(f"Processed job: {title} at {company}")
|
||||
return job_post
|
||||
|
||||
def _get_location(self, placeholders: list[dict]) -> Location:
|
||||
"""
|
||||
Extracts location data from placeholders
|
||||
"""
|
||||
location = Location(country=Country.INDIA)
|
||||
for placeholder in placeholders:
|
||||
if placeholder.get("type") == "location":
|
||||
location_str = placeholder.get("label", "")
|
||||
parts = location_str.split(", ")
|
||||
city = parts[0] if parts else None
|
||||
state = parts[1] if len(parts) > 1 else None
|
||||
location = Location(city=city, state=state, country=Country.INDIA)
|
||||
log.debug(f"Parsed location: {location.display_location()}")
|
||||
break
|
||||
return location
|
||||
|
||||
def _get_compensation(self, placeholders: list[dict]) -> Optional[Compensation]:
|
||||
"""
|
||||
Extracts compensation data from placeholders, handling Indian salary formats (Lakhs, Crores)
|
||||
"""
|
||||
for placeholder in placeholders:
|
||||
if placeholder.get("type") == "salary":
|
||||
salary_text = placeholder.get("label", "").strip()
|
||||
if salary_text == "Not disclosed":
|
||||
log.debug("Salary not disclosed")
|
||||
return None
|
||||
|
||||
# Handle Indian salary formats (e.g., "12-16 Lacs P.A.", "1-5 Cr")
|
||||
salary_match = re.match(r"(\d+(?:\.\d+)?)\s*-\s*(\d+(?:\.\d+)?)\s*(Lacs|Lakh|Cr)\s*(P\.A\.)?", salary_text, re.IGNORECASE)
|
||||
if salary_match:
|
||||
min_salary, max_salary, unit = salary_match.groups()[:3]
|
||||
min_salary, max_salary = float(min_salary), float(max_salary)
|
||||
currency = "INR"
|
||||
|
||||
# Convert to base units (INR)
|
||||
if unit.lower() in ("lacs", "lakh"):
|
||||
min_salary *= 100000 # 1 Lakh = 100,000 INR
|
||||
max_salary *= 100000
|
||||
elif unit.lower() == "cr":
|
||||
min_salary *= 10000000 # 1 Crore = 10,000,000 INR
|
||||
max_salary *= 10000000
|
||||
|
||||
log.debug(f"Parsed salary: {min_salary} - {max_salary} INR")
|
||||
return Compensation(
|
||||
min_amount=int(min_salary),
|
||||
max_amount=int(max_salary),
|
||||
currency=currency,
|
||||
)
|
||||
else:
|
||||
log.debug(f"Could not parse salary: {salary_text}")
|
||||
return None
|
||||
return None
|
||||
|
||||
def _parse_date(self, label: str, created_date: int) -> Optional[date]:
|
||||
"""
|
||||
Parses date from footerPlaceholderLabel or createdDate, returning a date object
|
||||
"""
|
||||
today = datetime.now()
|
||||
if not label:
|
||||
if created_date:
|
||||
return datetime.fromtimestamp(created_date / 1000).date() # Convert to date
|
||||
return None
|
||||
label = label.lower()
|
||||
if "today" in label or "just now" in label or "few hours" in label:
|
||||
log.debug("Date parsed as today")
|
||||
return today.date()
|
||||
elif "ago" in label:
|
||||
match = re.search(r"(\d+)\s*day", label)
|
||||
if match:
|
||||
days = int(match.group(1))
|
||||
parsed_date = (today - timedelta(days = days)).date()
|
||||
log.debug(f"Date parsed: {days} days ago -> {parsed_date}")
|
||||
return parsed_date
|
||||
elif created_date:
|
||||
parsed_date = datetime.fromtimestamp(created_date / 1000).date()
|
||||
log.debug(f"Date parsed from timestamp: {parsed_date}")
|
||||
return parsed_date
|
||||
log.debug("No date parsed")
|
||||
return None
|
||||
|
||||
def _infer_work_from_home_type(self, placeholders: list[dict], title: str, description: str) -> Optional[str]:
|
||||
"""
|
||||
Infers work-from-home type from job data (e.g., 'Hybrid', 'Remote', 'Work from office')
|
||||
"""
|
||||
location_str = next((p["label"] for p in placeholders if p["type"] == "location"), "").lower()
|
||||
if "hybrid" in location_str or "hybrid" in title.lower() or "hybrid" in description.lower():
|
||||
return "Hybrid"
|
||||
elif "remote" in location_str or "remote" in title.lower() or "remote" in description.lower():
|
||||
return "Remote"
|
||||
elif "work from office" in description.lower() or not ("remote" in description.lower() or "hybrid" in description.lower()):
|
||||
return "Work from office"
|
||||
return None
|
|
@ -0,0 +1,11 @@
|
|||
headers = {
|
||||
"authority": "www.naukri.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"appid": "109",
|
||||
"systemid": "Naukri",
|
||||
"Nkparam": "Ppy0YK9uSHqPtG3bEejYc04RTpUN2CjJOrqA68tzQt0SKJHXZKzz9M8cZtKLVkoOuQmfe4cTb1r2CwfHaxW5Tg==",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from jobspy.model import JobType, Location
|
||||
from jobspy.util import get_enum_from_job_type
|
||||
|
||||
|
||||
def parse_job_type(soup: BeautifulSoup) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from the job page
|
||||
"""
|
||||
job_type_tag = soup.find("span", class_="job-type")
|
||||
if job_type_tag:
|
||||
job_type_str = job_type_tag.get_text(strip=True).lower().replace("-", "")
|
||||
return [get_enum_from_job_type(job_type_str)] if job_type_str else None
|
||||
return None
|
||||
|
||||
|
||||
def parse_company_industry(soup: BeautifulSoup) -> str | None:
|
||||
"""
|
||||
Gets the company industry from the job page
|
||||
"""
|
||||
industry_tag = soup.find("span", class_="industry")
|
||||
return industry_tag.get_text(strip=True) if industry_tag else None
|
||||
|
||||
|
||||
def is_job_remote(title: str, description: str, location: Location) -> bool:
|
||||
"""
|
||||
Searches the title, description, and location to check if the job is remote
|
||||
"""
|
||||
remote_keywords = ["remote", "work from home", "wfh"]
|
||||
location_str = location.display_location()
|
||||
full_string = f"{title} {description} {location_str}".lower()
|
||||
return any(keyword in full_string for keyword in remote_keywords)
|
|
@ -0,0 +1,354 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from itertools import cycle
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
import tls_client
|
||||
import urllib3
|
||||
from markdownify import markdownify as md
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from jobspy.model import CompensationInterval, JobType, Site
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
def create_logger(name: str):
|
||||
logger = logging.getLogger(f"JobSpy:{name}")
|
||||
logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
||||
formatter = logging.Formatter(format)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
return logger
|
||||
|
||||
|
||||
class RotatingProxySession:
|
||||
def __init__(self, proxies=None):
|
||||
if isinstance(proxies, str):
|
||||
self.proxy_cycle = cycle([self.format_proxy(proxies)])
|
||||
elif isinstance(proxies, list):
|
||||
self.proxy_cycle = (
|
||||
cycle([self.format_proxy(proxy) for proxy in proxies])
|
||||
if proxies
|
||||
else None
|
||||
)
|
||||
else:
|
||||
self.proxy_cycle = None
|
||||
|
||||
@staticmethod
|
||||
def format_proxy(proxy):
|
||||
"""Utility method to format a proxy string into a dictionary."""
|
||||
if proxy.startswith("http://") or proxy.startswith("https://"):
|
||||
return {"http": proxy, "https": proxy}
|
||||
if proxy.startswith("socks5://"):
|
||||
return {"http": proxy, "https": proxy}
|
||||
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
|
||||
|
||||
|
||||
class RequestsRotating(RotatingProxySession, requests.Session):
|
||||
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
|
||||
RotatingProxySession.__init__(self, proxies=proxies)
|
||||
requests.Session.__init__(self)
|
||||
self.clear_cookies = clear_cookies
|
||||
self.allow_redirects = True
|
||||
self.setup_session(has_retry, delay)
|
||||
|
||||
def setup_session(self, has_retry, delay):
|
||||
if has_retry:
|
||||
retries = Retry(
|
||||
total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay,
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
self.mount("http://", adapter)
|
||||
self.mount("https://", adapter)
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
if self.clear_cookies:
|
||||
self.cookies.clear()
|
||||
|
||||
if self.proxy_cycle:
|
||||
next_proxy = next(self.proxy_cycle)
|
||||
if next_proxy["http"] != "http://localhost":
|
||||
self.proxies = next_proxy
|
||||
else:
|
||||
self.proxies = {}
|
||||
return requests.Session.request(self, method, url, **kwargs)
|
||||
|
||||
|
||||
class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||
def __init__(self, proxies=None):
|
||||
RotatingProxySession.__init__(self, proxies=proxies)
|
||||
tls_client.Session.__init__(self, random_tls_extension_order=True)
|
||||
|
||||
def execute_request(self, *args, **kwargs):
|
||||
if self.proxy_cycle:
|
||||
next_proxy = next(self.proxy_cycle)
|
||||
if next_proxy["http"] != "http://localhost":
|
||||
self.proxies = next_proxy
|
||||
else:
|
||||
self.proxies = {}
|
||||
response = tls_client.Session.execute_request(self, *args, **kwargs)
|
||||
response.ok = response.status_code in range(200, 400)
|
||||
return response
|
||||
|
||||
|
||||
def create_session(
|
||||
*,
|
||||
proxies: dict | str | None = None,
|
||||
ca_cert: str | None = None,
|
||||
is_tls: bool = True,
|
||||
has_retry: bool = False,
|
||||
delay: int = 1,
|
||||
clear_cookies: bool = False,
|
||||
) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
:return: A session object
|
||||
"""
|
||||
if is_tls:
|
||||
session = TLSRotating(proxies=proxies)
|
||||
else:
|
||||
session = RequestsRotating(
|
||||
proxies=proxies,
|
||||
has_retry=has_retry,
|
||||
delay=delay,
|
||||
clear_cookies=clear_cookies,
|
||||
)
|
||||
|
||||
if ca_cert:
|
||||
session.verify = ca_cert
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def set_logger_level(verbose: int):
|
||||
"""
|
||||
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||
|
||||
Parameters:
|
||||
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||
"""
|
||||
if verbose is None:
|
||||
return
|
||||
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||
level = getattr(logging, level_name.upper(), None)
|
||||
if level is not None:
|
||||
for logger_name in logging.root.manager.loggerDict:
|
||||
if logger_name.startswith("JobSpy:"):
|
||||
logging.getLogger(logger_name).setLevel(level)
|
||||
else:
|
||||
raise ValueError(f"Invalid log level: {level_name}")
|
||||
|
||||
|
||||
def markdown_converter(description_html: str):
|
||||
if description_html is None:
|
||||
return None
|
||||
markdown = md(description_html)
|
||||
return markdown.strip()
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
"""
|
||||
res = None
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
res = job_type
|
||||
return res
|
||||
|
||||
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", "", cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if "." in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif "," in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(",", "."))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
||||
|
||||
|
||||
def remove_attributes(tag):
|
||||
for attr in list(tag.attrs):
|
||||
del tag[attr]
|
||||
return tag
|
||||
|
||||
|
||||
def extract_salary(
|
||||
salary_str,
|
||||
lower_limit=1000,
|
||||
upper_limit=700000,
|
||||
hourly_threshold=350,
|
||||
monthly_threshold=30000,
|
||||
enforce_annual_salary=False,
|
||||
):
|
||||
"""
|
||||
Extracts salary information from a string and returns the salary interval, min and max salary values, and currency.
|
||||
(TODO: Needs test cases as the regex is complicated and may not cover all edge cases)
|
||||
"""
|
||||
if not salary_str:
|
||||
return None, None, None, None
|
||||
|
||||
annual_max_salary = None
|
||||
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||
|
||||
def to_int(s):
|
||||
return int(float(s.replace(",", "")))
|
||||
|
||||
def convert_hourly_to_annual(hourly_wage):
|
||||
return hourly_wage * 2080
|
||||
|
||||
def convert_monthly_to_annual(monthly_wage):
|
||||
return monthly_wage * 12
|
||||
|
||||
match = re.search(min_max_pattern, salary_str)
|
||||
|
||||
if match:
|
||||
min_salary = to_int(match.group(1))
|
||||
max_salary = to_int(match.group(3))
|
||||
# Handle 'k' suffix for min and max salaries independently
|
||||
if "k" in match.group(2).lower() or "k" in match.group(4).lower():
|
||||
min_salary *= 1000
|
||||
max_salary *= 1000
|
||||
|
||||
# Convert to annual if less than the hourly threshold
|
||||
if min_salary < hourly_threshold:
|
||||
interval = CompensationInterval.HOURLY.value
|
||||
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||
if max_salary < hourly_threshold:
|
||||
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||
|
||||
elif min_salary < monthly_threshold:
|
||||
interval = CompensationInterval.MONTHLY.value
|
||||
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||
if max_salary < monthly_threshold:
|
||||
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||
|
||||
else:
|
||||
interval = CompensationInterval.YEARLY.value
|
||||
annual_min_salary = min_salary
|
||||
annual_max_salary = max_salary
|
||||
|
||||
# Ensure salary range is within specified limits
|
||||
if not annual_max_salary:
|
||||
return None, None, None, None
|
||||
if (
|
||||
lower_limit <= annual_min_salary <= upper_limit
|
||||
and lower_limit <= annual_max_salary <= upper_limit
|
||||
and annual_min_salary < annual_max_salary
|
||||
):
|
||||
if enforce_annual_salary:
|
||||
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||
else:
|
||||
return interval, min_salary, max_salary, "USD"
|
||||
return None, None, None, None
|
||||
|
||||
|
||||
def extract_job_type(description: str):
|
||||
if not description:
|
||||
return []
|
||||
|
||||
keywords = {
|
||||
JobType.FULL_TIME: r"full\s?time",
|
||||
JobType.PART_TIME: r"part\s?time",
|
||||
JobType.INTERNSHIP: r"internship",
|
||||
JobType.CONTRACT: r"contract",
|
||||
}
|
||||
|
||||
listing_types = []
|
||||
for key, pattern in keywords.items():
|
||||
if re.search(pattern, description, re.IGNORECASE):
|
||||
listing_types.append(key)
|
||||
|
||||
return listing_types if listing_types else None
|
||||
|
||||
|
||||
def map_str_to_site(site_name: str) -> Site:
|
||||
return Site[site_name.upper()]
|
||||
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
|
||||
def convert_to_annual(job_data: dict):
|
||||
if job_data["interval"] == "hourly":
|
||||
job_data["min_amount"] *= 2080
|
||||
job_data["max_amount"] *= 2080
|
||||
if job_data["interval"] == "monthly":
|
||||
job_data["min_amount"] *= 12
|
||||
job_data["max_amount"] *= 12
|
||||
if job_data["interval"] == "weekly":
|
||||
job_data["min_amount"] *= 52
|
||||
job_data["max_amount"] *= 52
|
||||
if job_data["interval"] == "daily":
|
||||
job_data["min_amount"] *= 260
|
||||
job_data["max_amount"] *= 260
|
||||
job_data["interval"] = "yearly"
|
||||
|
||||
|
||||
desired_order = [
|
||||
"id",
|
||||
"site",
|
||||
"job_url",
|
||||
"job_url_direct",
|
||||
"title",
|
||||
"company",
|
||||
"location",
|
||||
"date_posted",
|
||||
"job_type",
|
||||
"salary_source",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"job_level",
|
||||
"job_function",
|
||||
"listing_type",
|
||||
"emails",
|
||||
"description",
|
||||
"company_industry",
|
||||
"company_url",
|
||||
"company_logo",
|
||||
"company_url_direct",
|
||||
"company_addresses",
|
||||
"company_num_employees",
|
||||
"company_revenue",
|
||||
"company_description",
|
||||
# naukri-specific fields
|
||||
"skills",
|
||||
"experience_range",
|
||||
"company_rating",
|
||||
"company_reviews_count",
|
||||
"vacancy_count",
|
||||
"work_from_home_type",
|
||||
]
|
|
@ -0,0 +1,219 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from jobspy.ziprecruiter.constant import headers, get_cookie_data
|
||||
from jobspy.util import (
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
markdown_converter,
|
||||
remove_attributes,
|
||||
create_logger,
|
||||
)
|
||||
from jobspy.model import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
Location,
|
||||
JobResponse,
|
||||
Country,
|
||||
DescriptionFormat,
|
||||
Scraper,
|
||||
ScraperInput,
|
||||
Site,
|
||||
)
|
||||
from jobspy.ziprecruiter.util import get_job_type_enum, add_params
|
||||
|
||||
log = create_logger("ZipRecruiter")
|
||||
|
||||
|
||||
class ZipRecruiter(Scraper):
|
||||
base_url = "https://www.ziprecruiter.com"
|
||||
api_url = "https://api.ziprecruiter.com"
|
||||
|
||||
def __init__(
|
||||
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||
):
|
||||
"""
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||
|
||||
self.scraper_input = None
|
||||
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
|
||||
self.session.headers.update(headers)
|
||||
self._get_cookies()
|
||||
|
||||
self.delay = 5
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
log.info(f"search page: {page} / {max_pages}")
|
||||
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
else:
|
||||
break
|
||||
if not continue_token:
|
||||
break
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
|
||||
def _find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
) -> tuple[list[JobPost], str | None]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param continue_token:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
jobs_list = []
|
||||
params = add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
|
||||
if res.status_code not in range(200, 400):
|
||||
if res.status_code == 429:
|
||||
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||
else:
|
||||
err = f"ZipRecruiter response status code {res.status_code}"
|
||||
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||
log.error(err)
|
||||
return jobs_list, ""
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
log.error(f"Indeed: Bad proxy")
|
||||
else:
|
||||
log.error(f"Indeed: {str(e)}")
|
||||
return jobs_list, ""
|
||||
|
||||
res_data = res.json()
|
||||
jobs_list = res_data.get("jobs", [])
|
||||
next_continue_token = res_data.get("continue", None)
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self._process_job, job) for job in jobs_list]
|
||||
|
||||
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||
return job_list, next_continue_token
|
||||
|
||||
def _process_job(self, job: dict) -> JobPost | None:
|
||||
"""
|
||||
Processes an individual job dict from the response
|
||||
"""
|
||||
title = job.get("name")
|
||||
job_url = f"{self.base_url}/jobs//j?lvk={job['listing_key']}"
|
||||
if job_url in self.seen_urls:
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
description = job.get("job_description", "").strip()
|
||||
listing_type = job.get("buyer_type", "")
|
||||
description = (
|
||||
markdown_converter(description)
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||
else description
|
||||
)
|
||||
company = job.get("hiring_company", {}).get("name")
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
location = Location(
|
||||
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||
)
|
||||
job_type = get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
|
||||
comp_interval = job.get("compensation_interval")
|
||||
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
|
||||
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||
comp_currency = job.get("compensation_currency")
|
||||
description_full, job_url_direct = self._get_descr(job_url)
|
||||
|
||||
return JobPost(
|
||||
id=f'zr-{job["listing_key"]}',
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval=comp_interval,
|
||||
min_amount=comp_min,
|
||||
max_amount=comp_max,
|
||||
currency=comp_currency,
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description_full if description_full else description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
job_url_direct=job_url_direct,
|
||||
listing_type=listing_type,
|
||||
)
|
||||
|
||||
def _get_descr(self, job_url):
|
||||
res = self.session.get(job_url, allow_redirects=True)
|
||||
description_full = job_url_direct = None
|
||||
if res.ok:
|
||||
soup = BeautifulSoup(res.text, "html.parser")
|
||||
job_descr_div = soup.find("div", class_="job_description")
|
||||
company_descr_section = soup.find("section", class_="company_description")
|
||||
job_description_clean = (
|
||||
remove_attributes(job_descr_div).prettify(formatter="html")
|
||||
if job_descr_div
|
||||
else ""
|
||||
)
|
||||
company_description_clean = (
|
||||
remove_attributes(company_descr_section).prettify(formatter="html")
|
||||
if company_descr_section
|
||||
else ""
|
||||
)
|
||||
description_full = job_description_clean + company_description_clean
|
||||
|
||||
try:
|
||||
script_tag = soup.find("script", type="application/json")
|
||||
if script_tag:
|
||||
job_json = json.loads(script_tag.string)
|
||||
job_url_val = job_json["model"].get("saveJobURL", "")
|
||||
m = re.search(r"job_url=(.+)", job_url_val)
|
||||
if m:
|
||||
job_url_direct = m.group(1)
|
||||
except:
|
||||
job_url_direct = None
|
||||
|
||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||
description_full = markdown_converter(description_full)
|
||||
|
||||
return description_full, job_url_direct
|
||||
|
||||
def _get_cookies(self):
|
||||
"""
|
||||
Sends a session event to the API with device properties.
|
||||
"""
|
||||
url = f"{self.api_url}/jobs-app/event"
|
||||
self.session.post(url, data=get_cookie_data)
|
|
@ -0,0 +1,29 @@
|
|||
headers = {
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
|
||||
get_cookie_data = [
|
||||
("event_type", "session"),
|
||||
("logged_in", "false"),
|
||||
("number_of_retry", "1"),
|
||||
("property", "model:iPhone"),
|
||||
("property", "os:iOS"),
|
||||
("property", "locale:en_us"),
|
||||
("property", "app_build_number:4734"),
|
||||
("property", "app_version:91.0"),
|
||||
("property", "manufacturer:Apple"),
|
||||
("property", "timestamp:2025-01-12T12:04:42-06:00"),
|
||||
("property", "screen_height:852"),
|
||||
("property", "os_version:16.6.1"),
|
||||
("property", "source:install"),
|
||||
("property", "screen_width:393"),
|
||||
("property", "device_model:iPhone 14 Pro"),
|
||||
("property", "brand:Apple"),
|
||||
]
|
|
@ -0,0 +1,31 @@
|
|||
from jobspy.model import JobType
|
||||
|
||||
|
||||
def add_params(scraper_input) -> dict[str, str | int]:
|
||||
params: dict[str, str | int] = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
}
|
||||
if scraper_input.hours_old:
|
||||
params["days"] = max(scraper_input.hours_old // 24, 1)
|
||||
|
||||
job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
|
||||
if scraper_input.job_type:
|
||||
job_type = scraper_input.job_type
|
||||
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
|
||||
|
||||
if scraper_input.easy_apply:
|
||||
params["zipapply"] = 1
|
||||
if scraper_input.is_remote:
|
||||
params["remote"] = 1
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
return None
|
File diff suppressed because it is too large
Load Diff
|
@ -1,29 +1,33 @@
|
|||
[build-system]
|
||||
requires = [ "poetry-core",]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.33"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
version = "1.1.80"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor, ZipRecruiter & Bayt"
|
||||
authors = ["Cullen Watson <cullen@cullenwatson.com>", "Zachary Hampton <zachary@zacharysproducts.com>"]
|
||||
homepage = "https://github.com/cullenwatson/JobSpy"
|
||||
readme = "README.md"
|
||||
keywords = [ "jobs-scraper", "linkedin", "indeed", "glassdoor", "ziprecruiter", "bayt", "naukri"]
|
||||
[[tool.poetry.packages]]
|
||||
include = "jobspy"
|
||||
|
||||
packages = [
|
||||
{ include = "jobspy", from = "src" }
|
||||
]
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
requests = "^2.31.0"
|
||||
tls-client = "^0.2.1"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
pandas = "^2.1.0"
|
||||
NUMPY = "1.24.2"
|
||||
NUMPY = "1.26.3"
|
||||
pydantic = "^2.3.0"
|
||||
|
||||
tls-client = "^1.0.1"
|
||||
markdownify = "^0.13.1"
|
||||
regex = "^2024.4.28"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^7.4.1"
|
||||
jupyter = "^1.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
black = "*"
|
||||
pre-commit = "*"
|
||||
|
|
|
@ -1,184 +0,0 @@
|
|||
import pandas as pd
|
||||
import concurrent.futures
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Tuple, Optional
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.glassdoor import GlassdoorScraper
|
||||
from .scrapers.linkedin import LinkedInScraper
|
||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
||||
from .scrapers.exceptions import (
|
||||
LinkedInException,
|
||||
IndeedException,
|
||||
ZipRecruiterException,
|
||||
GlassdoorException,
|
||||
)
|
||||
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedInScraper,
|
||||
Site.INDEED: IndeedScraper,
|
||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||
Site.GLASSDOOR: GlassdoorScraper,
|
||||
}
|
||||
|
||||
|
||||
def _map_str_to_site(site_name: str) -> Site:
|
||||
return Site[site_name.upper()]
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | list[str] | Site | list[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
is_remote: bool = False,
|
||||
job_type: str = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxy: Optional[str] = None,
|
||||
offset: Optional[int] = 0,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
:return: results_wanted: pandas dataframe containing job data
|
||||
"""
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
if type(site_name) == str:
|
||||
site_type = [_map_str_to_site(site_name)]
|
||||
else: #: if type(site_name) == list
|
||||
site_type = [
|
||||
_map_str_to_site(site) if type(site) == str else site_name
|
||||
for site in site_name
|
||||
]
|
||||
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
scraper_input = ScraperInput(
|
||||
site_type=site_type,
|
||||
country=country_enum,
|
||||
search_term=search_term,
|
||||
location=location,
|
||||
distance=distance,
|
||||
is_remote=is_remote,
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
results_wanted=results_wanted,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class(proxy=proxy)
|
||||
|
||||
try:
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
||||
raise lie
|
||||
except Exception as e:
|
||||
if site == Site.LINKEDIN:
|
||||
raise LinkedInException(str(e))
|
||||
if site == Site.INDEED:
|
||||
raise IndeedException(str(e))
|
||||
if site == Site.ZIP_RECRUITER:
|
||||
raise ZipRecruiterException(str(e))
|
||||
if site == Site.GLASSDOOR:
|
||||
raise GlassdoorException(str(e))
|
||||
else:
|
||||
raise e
|
||||
return site.value, scraped_data
|
||||
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
def worker(site):
|
||||
site_val, scraped_info = scrape_site(site)
|
||||
return site_val, scraped_info
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for future in concurrent.futures.as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: list[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
job_data = job.dict()
|
||||
job_data[
|
||||
"job_url_hyper"
|
||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||
if job_data["job_type"]
|
||||
else None
|
||||
)
|
||||
job_data["emails"] = (
|
||||
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||
)
|
||||
if job_data["location"]:
|
||||
job_data["location"] = Location(
|
||||
**job_data["location"]
|
||||
).display_location()
|
||||
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
else:
|
||||
job_data["interval"] = None
|
||||
job_data["min_amount"] = None
|
||||
job_data["max_amount"] = None
|
||||
job_data["currency"] = None
|
||||
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if jobs_dfs:
|
||||
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
||||
desired_order: list[str] = [
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"site",
|
||||
"title",
|
||||
"company",
|
||||
"company_url",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"is_remote",
|
||||
"num_urgent_words",
|
||||
"benefits",
|
||||
"emails",
|
||||
"description",
|
||||
]
|
||||
jobs_formatted_df = jobs_df[desired_order]
|
||||
else:
|
||||
jobs_formatted_df = pd.DataFrame()
|
||||
|
||||
return jobs_formatted_df
|
|
@ -1,33 +0,0 @@
|
|||
from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
||||
from typing import List, Optional, Any
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
site_type: List[Site]
|
||||
search_term: str
|
||||
|
||||
location: str = None
|
||||
country: Optional[Country] = Country.USA
|
||||
distance: Optional[int] = None
|
||||
is_remote: bool = False
|
||||
job_type: Optional[JobType] = None
|
||||
easy_apply: bool = None # linkedin
|
||||
offset: int = 0
|
||||
|
||||
results_wanted: int = 15
|
||||
|
||||
|
||||
class Scraper:
|
||||
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
...
|
|
@ -1,274 +0,0 @@
|
|||
"""
|
||||
jobspy.scrapers.glassdoor
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Glassdoor.
|
||||
"""
|
||||
import json
|
||||
from typing import Optional, Any
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import GlassdoorException
|
||||
from ..utils import create_session
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
|
||||
|
||||
class GlassdoorScraper(Scraper):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.url = None
|
||||
self.country = None
|
||||
self.jobs_per_page = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def fetch_jobs_page(
|
||||
self,
|
||||
scraper_input: ScraperInput,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> (list[JobPost], str | None):
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
try:
|
||||
payload = self.add_payload(
|
||||
scraper_input, location_id, location_type, page_num, cursor
|
||||
)
|
||||
session = create_session(self.proxy, is_tls=False, has_retry=True)
|
||||
response = session.post(
|
||||
f"{self.url}/graph", headers=self.headers(), timeout=10, data=payload
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
jobs = []
|
||||
for i, job in enumerate(jobs_data):
|
||||
job_url = res_json["data"]["jobListings"]["jobListingSeoLinks"][
|
||||
"linkItems"
|
||||
][i]["url"]
|
||||
if job_url in self.seen_urls:
|
||||
continue
|
||||
self.seen_urls.add(job_url)
|
||||
job = job["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
else:
|
||||
location = self.parse_location(location_name)
|
||||
|
||||
compensation = self.parse_compensation(job["header"])
|
||||
|
||||
job = JobPost(
|
||||
title=title,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
location=location,
|
||||
compensation=compensation,
|
||||
is_remote=is_remote
|
||||
)
|
||||
jobs.append(job)
|
||||
|
||||
return jobs, self.get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
self.country = scraper_input.country
|
||||
self.url = self.country.get_url()
|
||||
|
||||
location_id, location_type = self.get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
all_jobs: list[JobPost] = []
|
||||
cursor = None
|
||||
max_pages = 30
|
||||
|
||||
try:
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
max_pages + 1,
|
||||
),
|
||||
):
|
||||
try:
|
||||
jobs, cursor = self.fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
all_jobs.extend(jobs)
|
||||
if len(all_jobs) >= scraper_input.results_wanted:
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
return JobResponse(jobs=all_jobs)
|
||||
|
||||
@staticmethod
|
||||
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
def get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
session = create_session(self.proxy, has_retry=True)
|
||||
response = session.get(url)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
items = response.json()
|
||||
if not items:
|
||||
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||
location_type = items[0]["locationType"]
|
||||
if location_type == "C":
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
@staticmethod
|
||||
def add_payload(
|
||||
scraper_input,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
payload = {
|
||||
"operationName": "JobSearchResultsQuery",
|
||||
"variables": {
|
||||
"excludeJobListingIds": [],
|
||||
"filterParams": [],
|
||||
"keyword": scraper_input.search_term,
|
||||
"numJobsToShow": 30,
|
||||
"locationType": location_type,
|
||||
"locationId": int(location_id),
|
||||
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
},
|
||||
"query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n",
|
||||
}
|
||||
|
||||
job_type_filters = {
|
||||
JobType.FULL_TIME: "fulltime",
|
||||
JobType.PART_TIME: "parttime",
|
||||
JobType.CONTRACT: "contract",
|
||||
JobType.INTERNSHIP: "internship",
|
||||
JobType.TEMPORARY: "temporary",
|
||||
}
|
||||
|
||||
if scraper_input.job_type in job_type_filters:
|
||||
filter_value = job_type_filters[scraper_input.job_type]
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": filter_value}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def parse_location(location_name: str) -> Location:
|
||||
if not location_name or location_name == "Remote":
|
||||
return None
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
@staticmethod
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"cookie": 'gdId=91e2dfc4-c8b5-4fa7-83d0-11512b80262c; G_ENABLED_IDPS=google; trs=https%3A%2F%2Fwww.redhat.com%2F:referral:referral:2023-07-05+09%3A50%3A14.862:undefined:undefined; g_state={"i_p":1688587331651,"i_l":1}; _cfuvid=.7llazxhYFZWi6EISSPdVjtqF0NMVwzxr_E.cB1jgLs-1697828392979-0-604800000; GSESSIONID=undefined; JSESSIONID=F03DD1B5EE02DB6D842FE42B142F88F3; cass=1; jobsClicked=true; indeedCtk=1hd77b301k79i801; asst=1697829114.2; G_AUTHUSER_H=0; uc=8013A8318C98C517FE6DD0024636DFDEF978FC33266D93A2FAFEF364EACA608949D8B8FA2DC243D62DE271D733EB189D809ABE5B08D7B1AE865D217BD4EEBB97C282F5DA5FEFE79C937E3F6110B2A3A0ADBBA3B4B6DF5A996FEE00516100A65FCB11DA26817BE8D1C1BF6CFE36B5B68A3FDC2CFEC83AB797F7841FBB157C202332FC7E077B56BD39B167BDF3D9866E3B; AWSALB=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; AWSALBCORS=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; gdsid=1697828393025:1697830776351:668396EDB9E6A832022D34414128093D; at=HkH8Hnqi9uaMC7eu0okqyIwqp07ht9hBvE1_St7E_hRqPvkO9pUeJ1Jcpds4F3g6LL5ADaCNlxrPn0o6DumGMfog8qI1-zxaV_jpiFs3pugntw6WpVyYWdfioIZ1IDKupyteeLQEM1AO4zhGjY_rPZynpsiZBPO_B1au94sKv64rv23yvP56OiWKKfI-8_9hhLACEwWvM-Az7X-4aE2QdFt93VJbXbbGVf07bdDZfimsIkTtgJCLSRhU1V0kEM1Efyu66vo3m77gFFaMW7lxyYnb36I5PdDtEXBm3aL-zR7-qa5ywd94ISEivgqQOA4FPItNhqIlX4XrfD1lxVz6rfPaoTIDi4DI6UMCUjwyPsuv8mn0rYqDfRnmJpZ97fJ5AnhrknAd_6ZWN5v1OrxJczHzcXd8LO820QPoqxzzG13bmSTXLwGSxMUCtSrVsq05hicimQ3jpRt0c1dA4OkTNqF7_770B9JfcHcM8cr8-C4IL56dnOjr9KBGfN1Q2IvZM2cOBRbV7okiNOzKVZ3qJ24AE34WA2F3U6Whiu6H8nIuGG5hSNkVygY6CtglNZfFF9p8pJAZm79PngrrBv-CXFBZmhYLFo46lmFetDkiJ6mirtez4tKpzTIYjIp4_JAkiZFwbLJ2QGH4mK8kyyW0lZiX1DTuQec50N_5wvRo0Gt7nlKxzLsApMnaNhuQeH5ygh_pa381ORo9mQGi0EYF9zk00pa2--z4PtjfQ8KFq36GgpxKy5-o4qgqygZj8F01L8r-FiX2G4C7PREMIpAyHX2A4-_JxA1IS2j12EyqKTLqE9VcP06qm2Z-YuIW3ctmpMxy5G9_KiEiGv17weizhSFnl6SbpAEY-2VSmQ5V6jm3hoMp2jemkuGCRkZeFstLDEPxlzFN7WM; __cf_bm=zGaVjIJw4irf40_7UVw54B6Ohm271RUX4Tc8KVScrbs-1697830777-0-AYv2GnKTnnCU+cY9xHbJunO0DwlLDO6SIBnC/s/qldpKsGK0rRAjD6y8lbyATT/KlS7g29OZaN4fbd0lrJg0KmWbIybZIzfWVLHSYePVuOhu; asst=1697829114.2; at=dFhXf64wsf2TlnWy41xLs7skJkuxgKToEGcjGtDfUvW4oEAJ4tTIR5dKQ8wbwT75aIaGgdCfvcb-da7vwrCGWscCncmfLFQpJ9l-LLwoRfk-pMsxHhd77wvf-W7I0HSm7-Q5lQJqI9WyNGRxOa-RpzBTf4L8_Et4-3FzjPaAoYY5pY1FhuwXbN5asGOAMW-p8cjpbfn3PumlIYuckguWnjrcY2F31YJ_1noeoHM9tCGpymANbqGXRkG6aXY7yCfVXtdgZU1K5SMeaSPZIuF_iLUxjc_corzpNiH6qq7BIAmh-e5Aa-g7cwpZcln1fmwTVw4uTMZf1eLIMTa9WzgqZNkvG-sGaq_XxKA_Wai6xTTkOHfRgm4632Ba2963wdJvkGmUUa3tb_L4_wTgk3eFnHp5JhghLfT2Pe3KidP-yX__vx8JOsqe3fndCkKXgVz7xQKe1Dur-sMNlGwi4LXfguTT2YUI8C5Miq3pj2IHc7dC97eyyAiAM4HvyGWfaXWZcei6oIGrOwMvYgy0AcwFry6SIP2SxLT5TrxinRRuem1r1IcOTJsMJyUPp1QsZ7bOyq9G_0060B4CPyovw5523hEuqLTM-R5e5yavY6C_1DHUyE15C3mrh7kdvmlGZeflnHqkFTEKwwOftm-Mv-CKD5Db9ABFGNxKB2FH7nDH67hfOvm4tGNMzceBPKYJ3wciTt9jK3wy39_7cOYVywfrZ-oLhw_XtsbGSSeGn3HytrfgSADAh2sT0Gg6eCC9Xy1vh-Za337SVLUDXZ73W2xJxxUHBkFzZs8L_Xndo5DsbpWhVs9IYUGyraJdqB3SLgDbAppIBCJl4fx6_DG8-xOQPBvuFMlTROe1JVdHOzXI1GElwFDTuH1pjkg4I2G0NhAbE06Y-1illQE; gdsid=1697828393025:1697831731408:99C30D94108AC3030D61C736DDCDF11C',
|
||||
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
|
@ -1,356 +0,0 @@
|
|||
"""
|
||||
jobspy.scrapers.indeed
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Indeed.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import urllib.parse
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from ..exceptions import IndeedException
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
get_enum_from_job_type,
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self, proxy: str | None = None):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed job search url
|
||||
"""
|
||||
self.url = None
|
||||
self.country = None
|
||||
site = Site(Site.INDEED)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 15
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> tuple[list[JobPost], int]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
"""
|
||||
self.country = scraper_input.country
|
||||
domain = self.country.indeed_domain_value
|
||||
self.url = f"https://{domain}.indeed.com"
|
||||
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"filter": 0,
|
||||
"start": scraper_input.offset + page * 10,
|
||||
"sort": "date"
|
||||
}
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
sc_values = []
|
||||
if scraper_input.is_remote:
|
||||
sc_values.append("attr(DSQF7)")
|
||||
if scraper_input.job_type:
|
||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
try:
|
||||
session = create_session(self.proxy, is_tls=True)
|
||||
response = session.get(
|
||||
f"{self.url}/jobs",
|
||||
headers=self.get_headers(),
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
raise IndeedException(
|
||||
f"bad response with status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
raise IndeedException("bad proxy")
|
||||
raise IndeedException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
if "did not match any jobs" in response.text:
|
||||
raise IndeedException("Parsing exception: Search did not match any jobs")
|
||||
|
||||
jobs = IndeedScraper.parse_jobs(
|
||||
soup
|
||||
) #: can raise exception, handled by main scrape function
|
||||
total_num_jobs = IndeedScraper.total_jobs(soup)
|
||||
|
||||
if (
|
||||
not jobs.get("metaData", {})
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
):
|
||||
raise IndeedException("No jobs found.")
|
||||
|
||||
def process_job(job) -> JobPost | None:
|
||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
|
||||
extracted_salary = job.get("extractedSalary")
|
||||
compensation = None
|
||||
if extracted_salary:
|
||||
salary_snippet = job.get("salarySnippet")
|
||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
||||
interval = (extracted_salary.get("type"),)
|
||||
if isinstance(interval, tuple):
|
||||
interval = interval[0]
|
||||
|
||||
interval = interval.upper()
|
||||
if interval in CompensationInterval.__members__:
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval[interval],
|
||||
min_amount=int(extracted_salary.get("min")),
|
||||
max_amount=int(extracted_salary.get("max")),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
job_type = IndeedScraper.get_job_type(job)
|
||||
timestamp_seconds = job["pubDate"] / 1000
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||
|
||||
description = self.get_description(job_url)
|
||||
with io.StringIO(job["snippet"]) as f:
|
||||
soup_io = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup_io.find_all("li")
|
||||
if description is None and li_elements:
|
||||
description = " ".join(li.text for li in li_elements)
|
||||
|
||||
job_post = JobPost(
|
||||
title=job["normTitle"],
|
||||
description=description,
|
||||
company_name=job["company"],
|
||||
company_url=self.url + job["companyOverviewLink"] if "companyOverviewLink" in job else None,
|
||||
location=Location(
|
||||
city=job.get("jobLocationCity"),
|
||||
state=job.get("jobLocationState"),
|
||||
country=self.country,
|
||||
),
|
||||
job_type=job_type,
|
||||
compensation=compensation,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url_client,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description)
|
||||
if description
|
||||
else None,
|
||||
is_remote=self.is_remote_job(job),
|
||||
)
|
||||
return job_post
|
||||
|
||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(process_job, job) for job in jobs
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
|
||||
return job_list, total_num_jobs
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
pages_to_process = (
|
||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||
)
|
||||
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
|
||||
job_list += jobs
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
job_response = JobResponse(
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def get_description(self, job_page_url: str) -> str | None:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description
|
||||
"""
|
||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
||||
params = urllib.parse.parse_qs(parsed_url.query)
|
||||
jk_value = params.get("jk", [None])[0]
|
||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
||||
session = create_session(self.proxy)
|
||||
|
||||
try:
|
||||
response = session.get(
|
||||
formatted_url,
|
||||
headers=self.get_headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
return None
|
||||
|
||||
try:
|
||||
data = json.loads(response.text)
|
||||
job_description = data["body"]["jobInfoWrapperModel"]["jobInfoModel"][
|
||||
"sanitizedJobDescription"
|
||||
]
|
||||
except (KeyError, TypeError, IndexError):
|
||||
return None
|
||||
|
||||
soup = BeautifulSoup(job_description, "html.parser")
|
||||
text_content = " ".join(soup.get_text(separator=" ").split()).strip()
|
||||
|
||||
return text_content
|
||||
|
||||
@staticmethod
|
||||
def get_job_type(job: dict) -> list[JobType] | None:
|
||||
"""
|
||||
Parses the job to get list of job types
|
||||
:param job:
|
||||
:return:
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for taxonomy in job["taxonomyAttributes"]:
|
||||
if taxonomy["label"] == "job-types":
|
||||
for i in range(len(taxonomy["attributes"])):
|
||||
label = taxonomy["attributes"][i].get("label")
|
||||
if label:
|
||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
@staticmethod
|
||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
||||
"""
|
||||
Parses the jobs from the soup object
|
||||
:param soup:
|
||||
:return: jobs
|
||||
"""
|
||||
|
||||
def find_mosaic_script() -> Tag | None:
|
||||
"""
|
||||
Finds jobcards script tag
|
||||
:return: script_tag
|
||||
"""
|
||||
script_tags = soup.find_all("script")
|
||||
|
||||
for tag in script_tags:
|
||||
if (
|
||||
tag.string
|
||||
and "mosaic.providerData" in tag.string
|
||||
and "mosaic-provider-jobcards" in tag.string
|
||||
):
|
||||
return tag
|
||||
return None
|
||||
|
||||
script_tag = find_mosaic_script()
|
||||
|
||||
if script_tag:
|
||||
script_str = script_tag.string
|
||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
||||
p = re.compile(pattern, re.DOTALL)
|
||||
m = p.search(script_str)
|
||||
if m:
|
||||
jobs = json.loads(m.group(1).strip())
|
||||
return jobs
|
||||
else:
|
||||
raise IndeedException("Could not find mosaic provider job cards data")
|
||||
else:
|
||||
raise IndeedException(
|
||||
"Could not find any results for the search"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def total_jobs(soup: BeautifulSoup) -> int:
|
||||
"""
|
||||
Parses the total jobs for that search from soup object
|
||||
:param soup:
|
||||
:return: total_num_jobs
|
||||
"""
|
||||
script = soup.find("script", string=lambda t: t and "window._initialData" in t)
|
||||
|
||||
pattern = re.compile(r"window._initialData\s*=\s*({.*})\s*;", re.DOTALL)
|
||||
match = pattern.search(script.string)
|
||||
total_num_jobs = 0
|
||||
if match:
|
||||
json_str = match.group(1)
|
||||
data = json.loads(json_str)
|
||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
||||
return total_num_jobs
|
||||
|
||||
@staticmethod
|
||||
def get_headers():
|
||||
return {
|
||||
"authority": "www.indeed.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"referer": "https://www.indeed.com/viewjob?jk=fe6182337d72c7b1&tk=1hcbfcmd0k62t802&from=serp&vjs=3&advn=8132938064490989&adid=408692607&ad=-6NYlbfkN0A3Osc99MJFDKjquSk4WOGT28ALb_ad4QMtrHreCb9ICg6MiSVy9oDAp3evvOrI7Q-O9qOtQTg1EPbthP9xWtBN2cOuVeHQijxHjHpJC65TjDtftH3AXeINjBvAyDrE8DrRaAXl8LD3Fs1e_xuDHQIssdZ2Mlzcav8m5jHrA0fA64ZaqJV77myldaNlM7-qyQpy4AsJQfvg9iR2MY7qeC5_FnjIgjKIy_lNi9OPMOjGRWXA94CuvC7zC6WeiJmBQCHISl8IOBxf7EdJZlYdtzgae3593TFxbkd6LUwbijAfjax39aAuuCXy3s9C4YgcEP3TwEFGQoTpYu9Pmle-Ae1tHGPgsjxwXkgMm7Cz5mBBdJioglRCj9pssn-1u1blHZM4uL1nK9p1Y6HoFgPUU9xvKQTHjKGdH8d4y4ETyCMoNF4hAIyUaysCKdJKitC8PXoYaWhDqFtSMR4Jys8UPqUV&xkcb=SoDD-_M3JLQfWnQTDh0LbzkdCdPP&xpse=SoBa6_I3JLW9FlWZlB0PbzkdCdPP&sjdu=i6xVERweJM_pVUvgf-MzuaunBTY7G71J5eEX6t4DrDs5EMPQdODrX7Nn-WIPMezoqr5wA_l7Of-3CtoiUawcHw",
|
||||
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def is_remote_job(job: dict) -> bool:
|
||||
"""
|
||||
:param job:
|
||||
:return: bool
|
||||
"""
|
||||
for taxonomy in job.get("taxonomyAttributes", []):
|
||||
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0:
|
||||
return True
|
||||
return False
|
|
@ -1,294 +0,0 @@
|
|||
"""
|
||||
jobspy.scrapers.linkedin
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape LinkedIn.
|
||||
"""
|
||||
import random
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
import time
|
||||
from requests.exceptions import ProxyError
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from threading import Lock
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import LinkedInException
|
||||
from ..utils import create_session
|
||||
from ...jobs import JobPost, Location, JobResponse, JobType, Country, Compensation
|
||||
from ..utils import count_urgent_words, extract_emails_from_text, get_enum_from_job_type, currency_parser
|
||||
|
||||
|
||||
class LinkedInScraper(Scraper):
|
||||
DELAY = 3
|
||||
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
site = Site(Site.LINKEDIN)
|
||||
self.country = "worldwide"
|
||||
self.url = "https://www.linkedin.com"
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes LinkedIn for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
job_list: list[JobPost] = []
|
||||
seen_urls = set()
|
||||
url_lock = Lock()
|
||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||
|
||||
def job_type_code(job_type_enum):
|
||||
mapping = {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
JobType.INTERNSHIP: "I",
|
||||
JobType.CONTRACT: "C",
|
||||
JobType.TEMPORARY: "T",
|
||||
}
|
||||
|
||||
return mapping.get(job_type_enum, "")
|
||||
|
||||
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": 0,
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
response = session.get(
|
||||
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
proxies=self.proxy,
|
||||
headers=self.headers(),
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
except requests.HTTPError as e:
|
||||
raise LinkedInException(f"bad response status code: {e.response.status_code}")
|
||||
except ProxyError as e:
|
||||
raise LinkedInException("bad proxy")
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job_card in job_cards:
|
||||
job_url = None
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||
|
||||
with url_lock:
|
||||
if job_url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
|
||||
# Call process_job directly without threading
|
||||
try:
|
||||
job_post = self.process_job(job_card, job_url)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
except Exception as e:
|
||||
raise LinkedInException("Exception occurred while processing jobs")
|
||||
|
||||
page += 25
|
||||
time.sleep(random.uniform(LinkedInScraper.DELAY, LinkedInScraper.DELAY + 2))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
def process_job(self, job_card: Tag, job_url: str) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find('span', class_='job-search-card__salary-info')
|
||||
|
||||
compensation = None
|
||||
if salary_tag:
|
||||
salary_text = salary_tag.get_text(separator=' ').strip()
|
||||
salary_values = [currency_parser(value) for value in salary_text.split('-')]
|
||||
salary_min = salary_values[0]
|
||||
salary_max = salary_values[1]
|
||||
currency = salary_text[0] if salary_text[0] != '$' else 'USD'
|
||||
|
||||
compensation = Compensation(
|
||||
min_amount=int(salary_min),
|
||||
max_amount=int(salary_max),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company_url = (
|
||||
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||
if company_a_tag and company_a_tag.has_attr("href")
|
||||
else ""
|
||||
)
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = (
|
||||
metadata_card.find("time", class_="job-search-card__listdate")
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except Exception as e:
|
||||
date_posted = None
|
||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||
|
||||
# removed to speed up scraping
|
||||
# description, job_type = self.get_job_description(job_url)
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
compensation=compensation,
|
||||
benefits=benefits,
|
||||
# job_type=job_type,
|
||||
# description=description,
|
||||
# emails=extract_emails_from_text(description) if description else None,
|
||||
# num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def get_job_description(
|
||||
self, job_page_url: str
|
||||
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None
|
||||
"""
|
||||
try:
|
||||
session = create_session(is_tls=False, has_retry=True)
|
||||
response = session.get(job_page_url, timeout=5, proxies=self.proxy)
|
||||
response.raise_for_status()
|
||||
except requests.HTTPError as e:
|
||||
return None, None
|
||||
except Exception as e:
|
||||
return None, None
|
||||
if response.url == "https://www.linkedin.com/signup":
|
||||
return None, None
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
div_content = soup.find(
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
|
||||
description = None
|
||||
if div_content:
|
||||
description = " ".join(div_content.get_text().split()).strip()
|
||||
|
||||
def get_job_type(
|
||||
soup_job_type: BeautifulSoup,
|
||||
) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
)
|
||||
|
||||
employment_type = None
|
||||
if h3_tag:
|
||||
employment_type_span = h3_tag.find_next_sibling(
|
||||
"span",
|
||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||
)
|
||||
if employment_type_span:
|
||||
employment_type = employment_type_span.get_text(strip=True)
|
||||
employment_type = employment_type.lower()
|
||||
employment_type = employment_type.replace("-", "")
|
||||
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
return description, get_job_type(soup)
|
||||
|
||||
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
Extracts the location data from the job metadata card.
|
||||
:param metadata_card
|
||||
:return: location
|
||||
"""
|
||||
location = Location(country=Country.from_string(self.country))
|
||||
if metadata_card is not None:
|
||||
location_tag = metadata_card.find(
|
||||
"span", class_="job-search-card__location"
|
||||
)
|
||||
location_string = location_tag.text.strip() if location_tag else "N/A"
|
||||
parts = location_string.split(", ")
|
||||
if len(parts) == 2:
|
||||
city, state = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(country),
|
||||
)
|
||||
|
||||
return location
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
return {
|
||||
'authority': 'www.linkedin.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'cache-control': 'max-age=0',
|
||||
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
|
||||
# 'sec-ch-ua-mobile': '?0',
|
||||
# 'sec-ch-ua-platform': '"macOS"',
|
||||
# 'sec-fetch-dest': 'document',
|
||||
# 'sec-fetch-mode': 'navigate',
|
||||
# 'sec-fetch-site': 'none',
|
||||
# 'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
import re
|
||||
import numpy as np
|
||||
|
||||
import tls_client
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from ..jobs import JobType
|
||||
|
||||
|
||||
def count_urgent_words(description: str) -> int:
|
||||
"""
|
||||
Count the number of urgent words or phrases in a job description.
|
||||
"""
|
||||
urgent_patterns = re.compile(
|
||||
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
matches = re.findall(urgent_patterns, description)
|
||||
count = len(matches)
|
||||
|
||||
return count
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
|
||||
:return: A session object
|
||||
"""
|
||||
if is_tls:
|
||||
session = tls_client.Session(
|
||||
client_identifier="chrome112",
|
||||
random_tls_extension_order=True,
|
||||
)
|
||||
session.proxies = proxy
|
||||
else:
|
||||
session = requests.Session()
|
||||
session.allow_redirects = True
|
||||
if proxy:
|
||||
session.proxies.update(proxy)
|
||||
if has_retry:
|
||||
retries = Retry(total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
"""
|
||||
res = None
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
res = job_type
|
||||
return res
|
||||
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", '', cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", '', cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if '.' in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif ',' in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(',', '.'))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
|
@ -1,206 +0,0 @@
|
|||
"""
|
||||
jobspy.scrapers.ziprecruiter
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
import math
|
||||
import time
|
||||
import re
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import ZipRecruiterException
|
||||
from ..utils import count_urgent_words, extract_emails_from_text, create_session
|
||||
from ...jobs import JobPost, Compensation, Location, JobResponse, JobType, Country
|
||||
|
||||
|
||||
class ZipRecruiterScraper(Scraper):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
site = Site(Site.ZIP_RECRUITER)
|
||||
self.url = "https://www.ziprecruiter.com"
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
) -> Tuple[list[JobPost], Optional[str]]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param continue_token:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
params = self.add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue"] = continue_token
|
||||
try:
|
||||
session = create_session(self.proxy, is_tls=True)
|
||||
response = session.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=self.add_params(scraper_input),
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with non 200 code" in str(e):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
|
||||
time.sleep(5)
|
||||
response_data = response.json()
|
||||
jobs_list = response_data.get("jobs", [])
|
||||
next_continue_token = response_data.get("continue", None)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self.process_job, job) for job in jobs_list]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
return job_list, next_continue_token
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
|
||||
jobs_on_page, continue_token = self.find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
|
||||
if not continue_token:
|
||||
break
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@staticmethod
|
||||
def process_job(job: dict) -> JobPost:
|
||||
"""Processes an individual job dict from the response"""
|
||||
title = job.get("name")
|
||||
job_url = job.get("job_url")
|
||||
|
||||
description = BeautifulSoup(
|
||||
job.get("job_description", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
|
||||
company = job["hiring_company"].get("name") if "hiring_company" in job else None
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
location = Location(
|
||||
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
|
||||
save_job_url = job.get("SaveJobURL", "")
|
||||
posted_time_match = re.search(
|
||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||
)
|
||||
if posted_time_match:
|
||||
date_time_str = posted_time_match.group(1)
|
||||
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
|
||||
date_posted = date_posted_obj.date()
|
||||
else:
|
||||
date_posted = date.today()
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval="yearly"
|
||||
if job.get("compensation_interval") == "annual"
|
||||
else job.get("compensation_interval"),
|
||||
min_amount=int(job["compensation_min"])
|
||||
if "compensation_min" in job
|
||||
else None,
|
||||
max_amount=int(job["compensation_max"])
|
||||
if "compensation_max" in job
|
||||
else None,
|
||||
currency=job.get("compensation_currency"),
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
job_type_value = scraper_input.job_type.value
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for ZipRecruiter API requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
'Host': 'api.ziprecruiter.com',
|
||||
'accept': '*/*',
|
||||
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
|
||||
'Cookie': '__cf_bm=DZ7eJOw6lka.Bwy5jLeDqWanaZ8BJlVAwaXrmcbYnxM-1701505132-0-AfGaVIfTA2kJlmleK14o722vbVwpZ+4UxFznsWv+guvzXSpD9KVEy/+pNzvEZUx88yaEShJwGt3/EVjhHirX/ASustKxg47V/aXRd2XIO2QN; zglobalid=61f94830-1990-4130-b222-d9d0e09c7825.57da9ea9581c.656ae86b; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; zva=100000000%3Bvid%3AZWroa0x_F1KEeGeU'
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_all():
|
||||
result = scrape_jobs(
|
||||
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
results_wanted=5,
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
|
@ -1,11 +0,0 @@
|
|||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="glassdoor", search_term="software engineer", country_indeed="USA"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
|
@ -1,11 +0,0 @@
|
|||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="indeed", search_term="software engineer", country_indeed="usa"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
|
@ -1,12 +0,0 @@
|
|||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_linkedin():
|
||||
result = scrape_jobs(
|
||||
site_name="linkedin",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
|
@ -1,13 +0,0 @@
|
|||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_ziprecruiter():
|
||||
result = scrape_jobs(
|
||||
site_name="zip_recruiter",
|
||||
search_term="software engineer",
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
Loading…
Reference in New Issue