mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-04 19:44:30 -08:00
Compare commits
79 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e2ab277da | ||
|
|
ce3bd84ee5 | ||
|
|
1ccf2290fe | ||
|
|
ec2eefc58a | ||
|
|
13c7694474 | ||
|
|
bbe46fe3f4 | ||
|
|
b97c73ffd6 | ||
|
|
5b3627b244 | ||
|
|
2ec3b04777 | ||
|
|
89a5264391 | ||
|
|
a7ad616567 | ||
|
|
53bc33a43a | ||
|
|
22870438c7 | ||
|
|
aeb93b99f5 | ||
|
|
a5916edcdd | ||
|
|
33d442bf1e | ||
|
|
6587e464fa | ||
|
|
eed7fca300 | ||
|
|
dfb8c18c51 | ||
|
|
81f70ff8a5 | ||
|
|
cc9e7866b7 | ||
|
|
a2c8fe046e | ||
|
|
2b7fea40a5 | ||
|
|
d37f86e1b9 | ||
|
|
0302ab14f5 | ||
|
|
3f2b582445 | ||
|
|
93223b6a38 | ||
|
|
e3fc222eb5 | ||
|
|
b303b3f841 | ||
|
|
1a0c75f323 | ||
|
|
e2f6885d61 | ||
|
|
8d65d1b652 | ||
|
|
216d3fd39f | ||
|
|
d3bfdc0a6e | ||
|
|
ba5ed803ca | ||
|
|
ff1eb0f7b0 | ||
|
|
f2cc74b7f2 | ||
|
|
5e71866630 | ||
|
|
4e67c6e5a3 | ||
|
|
caf655525a | ||
|
|
90fa4a4c4f | ||
|
|
e5353e604d | ||
|
|
628f4dee9c | ||
|
|
2e59ab03e3 | ||
|
|
008ca61e12 | ||
|
|
8fc4c3bf90 | ||
|
|
bff39a2625 | ||
|
|
c676050dc0 | ||
|
|
37976f7ec2 | ||
|
|
9fb2fdd80f | ||
|
|
af07c1ecbd | ||
|
|
286b9e1256 | ||
|
|
162dd40b0f | ||
|
|
558e352939 | ||
|
|
efad1a1b7d | ||
|
|
eaa481c2f4 | ||
|
|
b914aa6449 | ||
|
|
6adbfb8b29 | ||
|
|
a3b9dd50ff | ||
|
|
d3ba3a4878 | ||
|
|
f524789d74 | ||
|
|
f3890d4830 | ||
|
|
60c9728691 | ||
|
|
f79d975e5f | ||
|
|
d6368f909b | ||
|
|
6fcf7f666e | ||
|
|
4406f9350f | ||
|
|
ca5155f234 | ||
|
|
822a55783e | ||
|
|
59f739018a | ||
|
|
a37e7f235e | ||
|
|
690739e858 | ||
|
|
43eb2fe0e8 | ||
|
|
e50227bba6 | ||
|
|
45c2d76e15 | ||
|
|
fd883178be | ||
|
|
70e2218c67 | ||
|
|
d6947ecdd7 | ||
|
|
5191658562 |
42
.github/workflows/publish-to-pypi.yml
vendored
42
.github/workflows/publish-to-pypi.yml
vendored
@@ -7,27 +7,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install poetry
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
poetry
|
||||
--user
|
||||
- name: Install poetry
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
poetry
|
||||
--user
|
||||
|
||||
- name: Build distribution 📦
|
||||
run: >-
|
||||
python3 -m
|
||||
poetry
|
||||
build
|
||||
- name: Build distribution 📦
|
||||
run: >-
|
||||
python3 -m
|
||||
poetry
|
||||
build
|
||||
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
||||
/.idea
|
||||
**/.DS_Store
|
||||
/venv/
|
||||
/ven/
|
||||
/.idea
|
||||
**/__pycache__/
|
||||
**/.pytest_cache/
|
||||
/.ipynb_checkpoints/
|
||||
**/output/
|
||||
**/.DS_Store
|
||||
*.pyc
|
||||
.env
|
||||
dist
|
||||
/.ipynb_checkpoints/
|
||||
dist
|
||||
1304
JobSpy_Demo.ipynb
1304
JobSpy_Demo.ipynb
File diff suppressed because it is too large
Load Diff
204
README.md
204
README.md
@@ -1,53 +1,50 @@
|
||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||
|
||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||
|
||||
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||
|
||||
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
|
||||
work with us.*
|
||||
|
||||
## Features
|
||||
|
||||
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||
- Aggregates the job postings in a Pandas DataFrame
|
||||
|
||||
- Proxy support (HTTP/S, SOCKS)
|
||||
|
||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
||||
Updated for release v1.1.3
|
||||
|
||||

|
||||
|
||||
|
||||
### Installation
|
||||
`pip install python-jobspy`
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
|
||||
```
|
||||
pip install python-jobspy
|
||||
```
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
|
||||
### Usage
|
||||
|
||||
```python
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=10,
|
||||
|
||||
# country: only needed for indeed
|
||||
country='USA'
|
||||
country_indeed='USA' # only needed for indeed / glassdoor
|
||||
)
|
||||
|
||||
if jobs.empty:
|
||||
print("No jobs found.")
|
||||
else:
|
||||
#1 print
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_rows', None)
|
||||
pd.set_option('display.width', None)
|
||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
||||
print(jobs)
|
||||
|
||||
#2 display in Jupyter Notebook
|
||||
#display(jobs)
|
||||
|
||||
#3 output to .csv
|
||||
#jobs.to_csv('jobs.csv', index=False)
|
||||
print(f"Found {len(jobs)} jobs")
|
||||
print(jobs.head())
|
||||
jobs.to_csv("jobs.csv", index=False) # to_xlsx
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
```
|
||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||
@@ -57,139 +54,116 @@ linkedin Full-Stack Software Engineer Rain New York
|
||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||
```
|
||||
|
||||
### Parameters for `scrape_jobs()`
|
||||
|
||||
```plaintext
|
||||
Required
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
||||
└── search_term (str)
|
||||
Optional
|
||||
├── location (int)
|
||||
├── distance (int): in miles
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||
├── is_remote (bool)
|
||||
├── full_description (bool): fetches full description for Indeed / LinkedIn (much slower)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs on LinkedIn that have the 'Easy Apply' option
|
||||
├── country (enum): uses the corresponding subdomain on Indeed (e.g. Canada on Indeed is ca.indeed.com
|
||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
```
|
||||
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company_name (str)
|
||||
├── company (str)
|
||||
├── company_url (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
│ ├── city (str)
|
||||
│ ├── state (str)
|
||||
├── description (str)
|
||||
├── job_type (enum)
|
||||
├── job_type (str): fulltime, parttime, internship, contract
|
||||
├── compensation (object)
|
||||
│ ├── interval (CompensationInterval): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount (int)
|
||||
│ ├── max_amount (int)
|
||||
│ └── currency (str)
|
||||
└── date_posted (datetime)
|
||||
│ └── currency (enum)
|
||||
└── date_posted (date)
|
||||
└── emails (str)
|
||||
└── num_urgent_words (int)
|
||||
└── is_remote (bool)
|
||||
```
|
||||
|
||||
### Exceptions
|
||||
|
||||
The following exceptions may be raised when using JobSpy:
|
||||
|
||||
* `LinkedInException`
|
||||
* `IndeedException`
|
||||
* `ZipRecruiterException`
|
||||
* `GlassdoorException`
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
|
||||
### **LinkedIn**
|
||||
|
||||
LinkedIn searches globally. Use the `location` parameter
|
||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using
|
||||
|
||||
### **ZipRecruiter**
|
||||
|
||||
ZipRecruiter searches for jobs in US/Canada. Use the `location` parameter
|
||||
ZipRecruiter searches for jobs in **US/Canada** & uses only the `location` parameter.
|
||||
|
||||
### **Indeed / Glassdoor**
|
||||
|
||||
Indeed & Glassdoor supports most countries, but the `country_indeed` parameter is required. Additionally, use the `location`
|
||||
parameter to narrow down the location, e.g. city & state if necessary.
|
||||
|
||||
You can specify the following countries when searching on Indeed (use the exact name, * indicates support for Glassdoor):
|
||||
|
||||
| | | | |
|
||||
|----------------------|--------------|------------|----------------|
|
||||
| Argentina | Australia* | Austria* | Bahrain |
|
||||
| Belgium* | Brazil* | Canada* | Chile |
|
||||
| China | Colombia | Costa Rica | Czech Republic |
|
||||
| Denmark | Ecuador | Egypt | Finland |
|
||||
| France* | Germany* | Greece | Hong Kong* |
|
||||
| Hungary | India* | Indonesia | Ireland* |
|
||||
| Israel | Italy* | Japan | Kuwait |
|
||||
| Luxembourg | Malaysia | Mexico* | Morocco |
|
||||
| Netherlands* | New Zealand* | Nigeria | Norway |
|
||||
| Oman | Pakistan | Panama | Peru |
|
||||
| Philippines | Poland | Portugal | Qatar |
|
||||
| Romania | Saudi Arabia | Singapore* | South Africa |
|
||||
| South Korea | Spain* | Sweden | Switzerland* |
|
||||
| Taiwan | Thailand | Turkey | Ukraine |
|
||||
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||
| Venezuela | Vietnam | | |
|
||||
|
||||
|
||||
### **Indeed**
|
||||
For Indeed, you `location` along with `country` param
|
||||
|
||||
You can specify the following countries when searching on Indeed (use the exact name):
|
||||
|
||||
- Argentina
|
||||
- Australia
|
||||
- Austria
|
||||
- Bahrain
|
||||
- Belgium
|
||||
- Brazil
|
||||
- Canada
|
||||
- Chile
|
||||
- China
|
||||
- Colombia
|
||||
- Costa Rica
|
||||
- Czech Republic
|
||||
- Denmark
|
||||
- Ecuador
|
||||
- Egypt
|
||||
- Finland
|
||||
- France
|
||||
- Germany
|
||||
- Greece
|
||||
- Hong Kong
|
||||
- Hungary
|
||||
- India
|
||||
- Indonesia
|
||||
- Ireland
|
||||
- Israel
|
||||
- Italy
|
||||
- Japan
|
||||
- Kuwait
|
||||
- Luxembourg
|
||||
- Malaysia
|
||||
- Mexico
|
||||
- Morocco
|
||||
- Netherlands
|
||||
- New Zealand
|
||||
- Nigeria
|
||||
- Norway
|
||||
- Oman
|
||||
- Pakistan
|
||||
- Panama
|
||||
- Peru
|
||||
- Philippines
|
||||
- Poland
|
||||
- Portugal
|
||||
- Qatar
|
||||
- Romania
|
||||
- Saudi Arabia
|
||||
- Singapore
|
||||
- South Africa
|
||||
- South Korea
|
||||
- Spain
|
||||
- Sweden
|
||||
- Switzerland
|
||||
- Taiwan
|
||||
- Thailand
|
||||
- Turkey
|
||||
- Ukraine
|
||||
- United Arab Emirates
|
||||
- UK
|
||||
- USA
|
||||
- Uruguay
|
||||
- Venezuela
|
||||
- Vietnam
|
||||
|
||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
|
||||
**Q: Encountering issues with your queries?**
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems persist, [submit an issue](#).
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
|
||||
---
|
||||
|
||||
**Q: Received a response code 429?**
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, **ZipRecruiter** is particularly aggressive with blocking. We recommend:
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||
|
||||
- Waiting a few seconds between requests.
|
||||
- Trying a VPN to change your IP address.
|
||||
|
||||
**Note:** Proxy support is in development and coming soon!
|
||||
- Waiting some time between scrapes (site-dependent).
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
30
examples/JobSpy_AllSites.py
Normal file
30
examples/JobSpy_AllSites.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=25, # be wary the higher it is, the more likey you'll get blocked (rotating proxy can help tho)
|
||||
country_indeed="USA",
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# formatting for pandas
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc
|
||||
|
||||
# 1: output to console
|
||||
print(jobs)
|
||||
|
||||
# 2: output to .csv
|
||||
jobs.to_csv("./jobs.csv", index=False)
|
||||
print("outputted to jobs.csv")
|
||||
|
||||
# 3: output to .xlsx
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
||||
167
examples/JobSpy_Demo.ipynb
Normal file
167
examples/JobSpy_Demo.ipynb
Normal file
@@ -0,0 +1,167 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from jobspy import scrape_jobs\n",
|
||||
"import pandas as pd\n",
|
||||
"from IPython.display import display, HTML"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.set_option('display.max_columns', None)\n",
|
||||
"pd.set_option('display.max_rows', None)\n",
|
||||
"pd.set_option('display.width', None)\n",
|
||||
"pd.set_option('display.max_colwidth', 50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 1 (no hyperlinks, USA)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='san francisco',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" results_wanted=5,\n",
|
||||
"\n",
|
||||
" # use if you want to use a proxy\n",
|
||||
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
")\n",
|
||||
"display(jobs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a581b2d-f7da-4fac-868d-9efe143ee20a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 2 - remote USA & hyperlinks\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
|
||||
" # location='san francisco',\n",
|
||||
" search_term=\"software engineer\",\n",
|
||||
" country_indeed=\"USA\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" is_remote=True,\n",
|
||||
" results_wanted=5, \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe8289bc-5b64-4202-9a64-7c117c83fd9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "951c2fe1-52ff-407d-8bb1-068049b36777",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='berlin',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" results_wanted=5,\n",
|
||||
" easy_apply=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e37a521-caef-441c-8fc2-2eb5b2e7da62",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0650e608-0b58-4bf5-ae86-68348035b16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"indeed\"],\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" country_indeed = \"China\",\n",
|
||||
" hyperlinks=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "40913ac8-3f8a-4d7e-ac47-afb88316432b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
77
examples/JobSpy_LongScrape.py
Normal file
77
examples/JobSpy_LongScrape.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
import os
|
||||
import time
|
||||
|
||||
# creates csv a new filename if the jobs.csv already exists.
|
||||
csv_filename = "jobs.csv"
|
||||
counter = 1
|
||||
while os.path.exists(csv_filename):
|
||||
csv_filename = f"jobs_{counter}.csv"
|
||||
counter += 1
|
||||
|
||||
# results wanted and offset
|
||||
results_wanted = 1000
|
||||
offset = 0
|
||||
|
||||
all_jobs = []
|
||||
|
||||
# max retries
|
||||
max_retries = 3
|
||||
|
||||
# nuumber of results at each iteration
|
||||
results_in_each_iteration = 30
|
||||
|
||||
while len(all_jobs) < results_wanted:
|
||||
retry_count = 0
|
||||
while retry_count < max_retries:
|
||||
print("Doing from", offset, "to", offset + results_in_each_iteration, "jobs")
|
||||
try:
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed"],
|
||||
search_term="software engineer",
|
||||
# New York, NY
|
||||
# Dallas, TX
|
||||
|
||||
# Los Angeles, CA
|
||||
location="Los Angeles, CA",
|
||||
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
|
||||
country_indeed="USA",
|
||||
offset=offset,
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# Add the scraped jobs to the list
|
||||
all_jobs.extend(jobs.to_dict('records'))
|
||||
|
||||
# Increment the offset for the next page of results
|
||||
offset += results_in_each_iteration
|
||||
|
||||
# Add a delay to avoid rate limiting (you can adjust the delay time as needed)
|
||||
print(f"Scraped {len(all_jobs)} jobs")
|
||||
print("Sleeping secs", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1)) # Sleep for 2 seconds between requests
|
||||
|
||||
break # Break out of the retry loop if successful
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
retry_count += 1
|
||||
print("Sleeping secs before retry", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1))
|
||||
if retry_count >= max_retries:
|
||||
print("Max retries reached. Exiting.")
|
||||
break
|
||||
|
||||
# DataFrame from the collected job data
|
||||
jobs_df = pd.DataFrame(all_jobs)
|
||||
|
||||
# Formatting
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50)
|
||||
|
||||
print(jobs_df)
|
||||
|
||||
jobs_df.to_csv(csv_filename, index=False)
|
||||
print(f"Outputted to {csv_filename}")
|
||||
65
poetry.lock
generated
65
poetry.lock
generated
@@ -1243,36 +1243,39 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "1.25.2"
|
||||
version = "1.24.2"
|
||||
description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"},
|
||||
{file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"},
|
||||
{file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"},
|
||||
{file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"},
|
||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"},
|
||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"},
|
||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"},
|
||||
{file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
|
||||
{file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
|
||||
{file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
|
||||
{file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
|
||||
{file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
|
||||
{file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
|
||||
{file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
|
||||
{file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
|
||||
{file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2257,13 +2260,13 @@ test = ["flake8", "isort", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "tls-client"
|
||||
version = "0.2.1"
|
||||
version = "1.0"
|
||||
description = "Advanced Python HTTP Client."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "tls_client-0.2.1-py3-none-any.whl", hash = "sha256:124a710952b979d5e20b4e2b7879b7958d6e48a259d0f5b83101055eb173f0bd"},
|
||||
{file = "tls_client-0.2.1.tar.gz", hash = "sha256:473fb4c671d9d4ca6b818548ab6e955640dd589767bfce520830c5618c2f2e2b"},
|
||||
{file = "tls_client-1.0-py3-none-any.whl", hash = "sha256:f1183f5e18cb31914bd62d11b350a33ea0293ea80fb91d69a3072821dece3e66"},
|
||||
{file = "tls_client-1.0.tar.gz", hash = "sha256:7f6de48ad4a0ef69b72682c76ce604155971e07b4bfb2148a36276194ae3e7a0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2432,4 +2435,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "0c50057af9ebbbe5c124c81758b41f05c05636739c3d1747e1bac74e75a046cb"
|
||||
content-hash = "404a77d78066cbb2ef71015562baf44aa11d12aac29a191c1ccc7758bfda598a"
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.0"
|
||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
||||
version = "1.1.40"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
readme = "README.md"
|
||||
|
||||
packages = [
|
||||
@@ -12,9 +13,10 @@ packages = [
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
requests = "^2.31.0"
|
||||
tls-client = "^0.2.1"
|
||||
tls-client = "*"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
pandas = "^2.1.0"
|
||||
NUMPY = "1.24.2"
|
||||
pydantic = "^2.3.0"
|
||||
|
||||
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
import pandas as pd
|
||||
from typing import List, Tuple
|
||||
import concurrent.futures
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Tuple, Optional
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.glassdoor import GlassdoorScraper
|
||||
from .scrapers.linkedin import LinkedInScraper
|
||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
||||
|
||||
from .scrapers.exceptions import (
|
||||
LinkedInException,
|
||||
IndeedException,
|
||||
ZipRecruiterException,
|
||||
GlassdoorException,
|
||||
)
|
||||
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedInScraper,
|
||||
Site.INDEED: IndeedScraper,
|
||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||
Site.GLASSDOOR: GlassdoorScraper,
|
||||
}
|
||||
|
||||
|
||||
@@ -20,27 +29,43 @@ def _map_str_to_site(site_name: str) -> Site:
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | Site | List[Site],
|
||||
site_name: str | list[str] | Site | list[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
is_remote: bool = False,
|
||||
job_type: JobType = None,
|
||||
job_type: str = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
results_wanted: int = 15,
|
||||
country: str = "usa",
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxy: Optional[str] = None,
|
||||
full_description: Optional[bool] = False,
|
||||
offset: Optional[int] = 0,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Asynchronously scrapes job data from multiple job sites.
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
:return: results_wanted: pandas dataframe containing job data
|
||||
"""
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
if type(site_name) == str:
|
||||
site_name = _map_str_to_site(site_name)
|
||||
site_type = [_map_str_to_site(site_name)]
|
||||
else: #: if type(site_name) == list
|
||||
site_type = [
|
||||
_map_str_to_site(site) if type(site) == str else site_name
|
||||
for site in site_name
|
||||
]
|
||||
|
||||
country_enum = Country.from_string(country)
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
site_type = [site_name] if type(site_name) == Site else site_name
|
||||
scraper_input = ScraperInput(
|
||||
site_type=site_type,
|
||||
country=country_enum,
|
||||
@@ -50,72 +75,112 @@ def scrape_jobs(
|
||||
is_remote=is_remote,
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
full_description=full_description,
|
||||
results_wanted=results_wanted,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class()
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
scraper = scraper_class(proxy=proxy)
|
||||
|
||||
try:
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
||||
raise lie
|
||||
except Exception as e:
|
||||
if site == Site.LINKEDIN:
|
||||
raise LinkedInException(str(e))
|
||||
if site == Site.INDEED:
|
||||
raise IndeedException(str(e))
|
||||
if site == Site.ZIP_RECRUITER:
|
||||
raise ZipRecruiterException(str(e))
|
||||
if site == Site.GLASSDOOR:
|
||||
raise GlassdoorException(str(e))
|
||||
else:
|
||||
raise e
|
||||
return site.value, scraped_data
|
||||
|
||||
results = {}
|
||||
for site in scraper_input.site_type:
|
||||
site_value, scraped_data = scrape_site(site)
|
||||
results[site_value] = scraped_data
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
dfs = []
|
||||
def worker(site):
|
||||
site_val, scraped_info = scrape_site(site)
|
||||
return site_val, scraped_info
|
||||
|
||||
for site, job_response in results.items():
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for future in concurrent.futures.as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: list[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
data = job.dict()
|
||||
data["site"] = site
|
||||
data["company"] = data["company_name"]
|
||||
if data["job_type"]:
|
||||
# Take the first value from the job type tuple
|
||||
data["job_type"] = data["job_type"].value[0]
|
||||
else:
|
||||
data["job_type"] = None
|
||||
job_data = job.dict()
|
||||
job_data[
|
||||
"job_url_hyper"
|
||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
job_data["job_type"] = (
|
||||
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||
if job_data["job_type"]
|
||||
else None
|
||||
)
|
||||
job_data["emails"] = (
|
||||
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||
)
|
||||
if job_data["location"]:
|
||||
job_data["location"] = Location(
|
||||
**job_data["location"]
|
||||
).display_location()
|
||||
|
||||
data["location"] = Location(**data["location"]).display_location()
|
||||
|
||||
compensation_obj = data.get("compensation")
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
data["interval"] = (
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
data["min_amount"] = compensation_obj.get("min_amount")
|
||||
data["max_amount"] = compensation_obj.get("max_amount")
|
||||
data["currency"] = compensation_obj.get("currency", "USD")
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
else:
|
||||
data["interval"] = None
|
||||
data["min_amount"] = None
|
||||
data["max_amount"] = None
|
||||
data["currency"] = None
|
||||
job_data["interval"] = None
|
||||
job_data["min_amount"] = None
|
||||
job_data["max_amount"] = None
|
||||
job_data["currency"] = None
|
||||
|
||||
job_df = pd.DataFrame([data])
|
||||
dfs.append(job_df)
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if dfs:
|
||||
df = pd.concat(dfs, ignore_index=True)
|
||||
desired_order = [
|
||||
if jobs_dfs:
|
||||
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
||||
desired_order: list[str] = [
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"site",
|
||||
"title",
|
||||
"company",
|
||||
"company_url",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
"interval",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"job_url",
|
||||
"is_remote",
|
||||
"num_urgent_words",
|
||||
"benefits",
|
||||
"emails",
|
||||
"description",
|
||||
]
|
||||
df = df[desired_order]
|
||||
jobs_formatted_df = jobs_df[desired_order]
|
||||
else:
|
||||
df = pd.DataFrame()
|
||||
jobs_formatted_df = pd.DataFrame()
|
||||
|
||||
return df
|
||||
return jobs_formatted_df
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from typing import Union, Optional
|
||||
from typing import Optional
|
||||
from datetime import date
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel, validator
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class JobType(Enum):
|
||||
@@ -37,10 +36,16 @@ class JobType(Enum):
|
||||
"повназайнятість",
|
||||
"toànthờigian",
|
||||
)
|
||||
PART_TIME = ("parttime", "teilzeit")
|
||||
PART_TIME = ("parttime", "teilzeit", "částečnýúvazek", "deltid")
|
||||
CONTRACT = ("contract", "contractor")
|
||||
TEMPORARY = ("temporary",)
|
||||
INTERNSHIP = ("internship", "prácticas", "ojt(onthejobtraining)", "praktikum")
|
||||
INTERNSHIP = (
|
||||
"internship",
|
||||
"prácticas",
|
||||
"ojt(onthejobtraining)",
|
||||
"praktikum",
|
||||
"praktik",
|
||||
)
|
||||
|
||||
PER_DIEM = ("perdiem",)
|
||||
NIGHTS = ("nights",)
|
||||
@@ -50,40 +55,46 @@ class JobType(Enum):
|
||||
|
||||
|
||||
class Country(Enum):
|
||||
ARGENTINA = ("argentina", "ar")
|
||||
AUSTRALIA = ("australia", "au")
|
||||
AUSTRIA = ("austria", "at")
|
||||
"""
|
||||
Gets the subdomain for Indeed and Glassdoor.
|
||||
The second item in the tuple is the subdomain for Indeed
|
||||
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||
"""
|
||||
|
||||
ARGENTINA = ("argentina", "ar", "com.ar")
|
||||
AUSTRALIA = ("australia", "au", "com.au")
|
||||
AUSTRIA = ("austria", "at", "at")
|
||||
BAHRAIN = ("bahrain", "bh")
|
||||
BELGIUM = ("belgium", "be")
|
||||
BRAZIL = ("brazil", "br")
|
||||
CANADA = ("canada", "ca")
|
||||
BELGIUM = ("belgium", "be", "fr:be")
|
||||
BRAZIL = ("brazil", "br", "com.br")
|
||||
CANADA = ("canada", "ca", "ca")
|
||||
CHILE = ("chile", "cl")
|
||||
CHINA = ("china", "cn")
|
||||
COLOMBIA = ("colombia", "co")
|
||||
COSTARICA = ("costa rica", "cr")
|
||||
CZECHREPUBLIC = ("czech republic", "cz")
|
||||
CZECHREPUBLIC = ("czech republic,czechia", "cz")
|
||||
DENMARK = ("denmark", "dk")
|
||||
ECUADOR = ("ecuador", "ec")
|
||||
EGYPT = ("egypt", "eg")
|
||||
FINLAND = ("finland", "fi")
|
||||
FRANCE = ("france", "fr")
|
||||
GERMANY = ("germany", "de")
|
||||
FRANCE = ("france", "fr", "fr")
|
||||
GERMANY = ("germany", "de", "de")
|
||||
GREECE = ("greece", "gr")
|
||||
HONGKONG = ("hong kong", "hk")
|
||||
HONGKONG = ("hong kong", "hk", "com.hk")
|
||||
HUNGARY = ("hungary", "hu")
|
||||
INDIA = ("india", "in")
|
||||
INDIA = ("india", "in", "co.in")
|
||||
INDONESIA = ("indonesia", "id")
|
||||
IRELAND = ("ireland", "ie")
|
||||
IRELAND = ("ireland", "ie", "ie")
|
||||
ISRAEL = ("israel", "il")
|
||||
ITALY = ("italy", "it")
|
||||
ITALY = ("italy", "it", "it")
|
||||
JAPAN = ("japan", "jp")
|
||||
KUWAIT = ("kuwait", "kw")
|
||||
LUXEMBOURG = ("luxembourg", "lu")
|
||||
MALAYSIA = ("malaysia", "malaysia")
|
||||
MEXICO = ("mexico", "mx")
|
||||
MEXICO = ("mexico", "mx", "com.mx")
|
||||
MOROCCO = ("morocco", "ma")
|
||||
NETHERLANDS = ("netherlands", "nl")
|
||||
NEWZEALAND = ("new zealand", "nz")
|
||||
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||
NEWZEALAND = ("new zealand", "nz", "co.nz")
|
||||
NIGERIA = ("nigeria", "ng")
|
||||
NORWAY = ("norway", "no")
|
||||
OMAN = ("oman", "om")
|
||||
@@ -96,19 +107,19 @@ class Country(Enum):
|
||||
QATAR = ("qatar", "qa")
|
||||
ROMANIA = ("romania", "ro")
|
||||
SAUDIARABIA = ("saudi arabia", "sa")
|
||||
SINGAPORE = ("singapore", "sg")
|
||||
SINGAPORE = ("singapore", "sg", "sg")
|
||||
SOUTHAFRICA = ("south africa", "za")
|
||||
SOUTHKOREA = ("south korea", "kr")
|
||||
SPAIN = ("spain", "es")
|
||||
SPAIN = ("spain", "es", "es")
|
||||
SWEDEN = ("sweden", "se")
|
||||
SWITZERLAND = ("switzerland", "ch")
|
||||
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||
TAIWAN = ("taiwan", "tw")
|
||||
THAILAND = ("thailand", "th")
|
||||
TURKEY = ("turkey", "tr")
|
||||
UKRAINE = ("ukraine", "ua")
|
||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||
UK = ("uk", "uk")
|
||||
USA = ("usa", "www")
|
||||
UK = ("uk,united kingdom", "uk", "co.uk")
|
||||
USA = ("usa,us,united states", "www", "com")
|
||||
URUGUAY = ("uruguay", "uy")
|
||||
VENEZUELA = ("venezuela", "ve")
|
||||
VIETNAM = ("vietnam", "vn")
|
||||
@@ -116,34 +127,43 @@ class Country(Enum):
|
||||
# internal for ziprecruiter
|
||||
US_CANADA = ("usa/ca", "www")
|
||||
|
||||
# internal for linkeind
|
||||
# internal for linkedin
|
||||
WORLDWIDE = ("worldwide", "www")
|
||||
|
||||
def __new__(cls, country, domain):
|
||||
obj = object.__new__(cls)
|
||||
obj._value_ = country
|
||||
obj.domain = domain
|
||||
return obj
|
||||
@property
|
||||
def indeed_domain_value(self):
|
||||
return self.value[1]
|
||||
|
||||
@property
|
||||
def domain_value(self):
|
||||
return self.domain
|
||||
def glassdoor_domain_value(self):
|
||||
if len(self.value) == 3:
|
||||
subdomain, _, domain = self.value[2].partition(":")
|
||||
if subdomain and domain:
|
||||
return f"{subdomain}.glassdoor.{domain}"
|
||||
else:
|
||||
return f"www.glassdoor.{self.value[2]}"
|
||||
else:
|
||||
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||
|
||||
def get_url(self):
|
||||
return f"https://{self.glassdoor_domain_value}/"
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, country_str: str):
|
||||
"""Convert a string to the corresponding Country enum."""
|
||||
country_str = country_str.strip().lower()
|
||||
for country in cls:
|
||||
if country.value == country_str:
|
||||
country_names = country.value[0].split(',')
|
||||
if country_str in country_names:
|
||||
return country
|
||||
valid_countries = [country.value for country in cls]
|
||||
raise ValueError(
|
||||
f"Invalid country string: '{country_str}'. Valid countries (only include this param for Indeed) are: {', '.join(valid_countries)}"
|
||||
f"Invalid country string: '{country_str}'. Valid countries are: {', '.join([country[0] for country in valid_countries])}"
|
||||
)
|
||||
|
||||
|
||||
class Location(BaseModel):
|
||||
country: Country = None
|
||||
country: Country | None = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
|
||||
@@ -154,10 +174,13 @@ class Location(BaseModel):
|
||||
if self.state:
|
||||
location_parts.append(self.state)
|
||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
||||
if self.country.value in ("usa", "uk"):
|
||||
location_parts.append(self.country.value.upper())
|
||||
country_name = self.country.value[0]
|
||||
if "," in country_name:
|
||||
country_name = country_name.split(",")[0]
|
||||
if country_name in ("usa", "uk"):
|
||||
location_parts.append(country_name.upper())
|
||||
else:
|
||||
location_parts.append(self.country.value.title())
|
||||
location_parts.append(country_name.title())
|
||||
return ", ".join(location_parts)
|
||||
|
||||
|
||||
@@ -168,11 +191,15 @@ class CompensationInterval(Enum):
|
||||
DAILY = "daily"
|
||||
HOURLY = "hourly"
|
||||
|
||||
@classmethod
|
||||
def get_interval(cls, pay_period):
|
||||
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||
|
||||
|
||||
class Compensation(BaseModel):
|
||||
interval: CompensationInterval
|
||||
min_amount: int = None
|
||||
max_amount: int = None
|
||||
interval: Optional[CompensationInterval] = None
|
||||
min_amount: int | None = None
|
||||
max_amount: int | None = None
|
||||
currency: Optional[str] = "USD"
|
||||
|
||||
|
||||
@@ -182,29 +209,18 @@ class JobPost(BaseModel):
|
||||
job_url: str
|
||||
location: Optional[Location]
|
||||
|
||||
description: Optional[str] = None
|
||||
job_type: Optional[JobType] = None
|
||||
compensation: Optional[Compensation] = None
|
||||
date_posted: Optional[date] = None
|
||||
description: str | None = None
|
||||
company_url: str | None = None
|
||||
|
||||
job_type: list[JobType] | None = None
|
||||
compensation: Compensation | None = None
|
||||
date_posted: date | None = None
|
||||
benefits: str | None = None
|
||||
emails: list[str] | None = None
|
||||
num_urgent_words: int | None = None
|
||||
is_remote: bool | None = None
|
||||
# company_industry: str | None = None
|
||||
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
success: bool
|
||||
error: str = None
|
||||
|
||||
total_results: Optional[int] = None
|
||||
|
||||
jobs: list[JobPost] = []
|
||||
|
||||
returned_results: int = None
|
||||
|
||||
@validator("returned_results", pre=True, always=True)
|
||||
def set_returned_results(cls, v, values):
|
||||
jobs_list = values.get("jobs")
|
||||
|
||||
if v is None:
|
||||
if jobs_list is not None:
|
||||
return len(jobs_list)
|
||||
else:
|
||||
return 0
|
||||
return v
|
||||
|
||||
@@ -2,15 +2,11 @@ from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
||||
from typing import List, Optional, Any
|
||||
|
||||
|
||||
class StatusException(Exception):
|
||||
def __init__(self, status_code: int):
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
ZIP_RECRUITER = "zip_recruiter"
|
||||
GLASSDOOR = "glassdoor"
|
||||
|
||||
|
||||
class ScraperInput(BaseModel):
|
||||
@@ -23,21 +19,16 @@ class ScraperInput(BaseModel):
|
||||
is_remote: bool = False
|
||||
job_type: Optional[JobType] = None
|
||||
easy_apply: bool = None # linkedin
|
||||
full_description: bool = False
|
||||
offset: int = 0
|
||||
|
||||
results_wanted: int = 15
|
||||
|
||||
|
||||
class CommonResponse(BaseModel):
|
||||
status: Optional[str]
|
||||
error: Optional[str]
|
||||
linkedin: Optional[Any] = None
|
||||
indeed: Optional[Any] = None
|
||||
zip_recruiter: Optional[Any] = None
|
||||
|
||||
|
||||
class Scraper:
|
||||
def __init__(self, site: Site):
|
||||
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
...
|
||||
|
||||
26
src/jobspy/scrapers/exceptions.py
Normal file
26
src/jobspy/scrapers/exceptions.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""
|
||||
jobspy.scrapers.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains the set of Scrapers' exceptions.
|
||||
"""
|
||||
|
||||
|
||||
class LinkedInException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with LinkedIn")
|
||||
|
||||
|
||||
class IndeedException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Indeed")
|
||||
|
||||
|
||||
class ZipRecruiterException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with ZipRecruiter")
|
||||
|
||||
|
||||
class GlassdoorException(Exception):
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message or "An error occurred with Glassdoor")
|
||||
333
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
333
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
@@ -0,0 +1,333 @@
|
||||
"""
|
||||
jobspy.scrapers.glassdoor
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Glassdoor.
|
||||
"""
|
||||
import json
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from ..utils import count_urgent_words, extract_emails_from_text
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import GlassdoorException
|
||||
from ..utils import create_session, modify_and_get_description
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
|
||||
|
||||
class GlassdoorScraper(Scraper):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||
"""
|
||||
site = Site(Site.GLASSDOOR)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.url = None
|
||||
self.country = None
|
||||
self.jobs_per_page = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def fetch_jobs_page(
|
||||
self,
|
||||
scraper_input: ScraperInput,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None,
|
||||
) -> (list[JobPost], str | None):
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
try:
|
||||
payload = self.add_payload(
|
||||
scraper_input, location_id, location_type, page_num, cursor
|
||||
)
|
||||
session = create_session(self.proxy, is_tls=False, has_retry=True)
|
||||
response = session.post(
|
||||
f"{self.url}/graph", headers=self.headers(), timeout=10, data=payload
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
jobs = []
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {executor.submit(self.process_job, job): job for job in jobs_data}
|
||||
for future in as_completed(future_to_job_data):
|
||||
job_data = future_to_job_data[future]
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
jobs.append(job_post)
|
||||
except Exception as exc:
|
||||
raise GlassdoorException(f'Glassdoor generated an exception: {exc}')
|
||||
|
||||
return jobs, self.get_cursor_for_page(
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def process_job(self, job_data):
|
||||
"""Processes a single job and fetches its description."""
|
||||
job_id = job_data["jobview"]["job"]["listingId"]
|
||||
job_url = f'{self.url}job-listing/j?jl={job_id}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
self.seen_urls.add(job_url)
|
||||
job = job_data["jobview"]
|
||||
title = job["job"]["jobTitleText"]
|
||||
company_name = job["header"]["employerNameFromSearch"]
|
||||
company_id = job_data['jobview']['header']['employer']['id']
|
||||
location_name = job["header"].get("locationName", "")
|
||||
location_type = job["header"].get("locationType", "")
|
||||
age_in_days = job["header"].get("ageInDays")
|
||||
is_remote, location = False, None
|
||||
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days else None
|
||||
|
||||
if location_type == "S":
|
||||
is_remote = True
|
||||
else:
|
||||
location = self.parse_location(location_name)
|
||||
|
||||
compensation = self.parse_compensation(job["header"])
|
||||
|
||||
try:
|
||||
description = self.fetch_job_description(job_id)
|
||||
except Exception as e :
|
||||
description = None
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
company_url=f"{self.url}Overview/W-EI_IE{company_id}.htm" if company_id else None,
|
||||
company_name=company_name,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
location=location,
|
||||
compensation=compensation,
|
||||
is_remote=is_remote,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.country = scraper_input.country
|
||||
self.url = self.country.get_url()
|
||||
|
||||
location_id, location_type = self.get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
all_jobs: list[JobPost] = []
|
||||
cursor = None
|
||||
max_pages = 30
|
||||
|
||||
try:
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
max_pages + 1,
|
||||
),
|
||||
):
|
||||
try:
|
||||
jobs, cursor = self.fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
all_jobs.extend(jobs)
|
||||
if len(all_jobs) >= scraper_input.results_wanted:
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
|
||||
return JobResponse(jobs=all_jobs)
|
||||
|
||||
def fetch_job_description(self, job_id):
|
||||
"""Fetches the job description for a single job ID."""
|
||||
url = f"{self.url}/graph"
|
||||
body = [
|
||||
{
|
||||
"operationName": "JobDetailQuery",
|
||||
"variables": {
|
||||
"jl": job_id,
|
||||
"queryString": "q",
|
||||
"pageTypeEnum": "SERP"
|
||||
},
|
||||
"query": """
|
||||
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||
jobview: jobView(
|
||||
listingId: $jl
|
||||
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||
) {
|
||||
job {
|
||||
description
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
}
|
||||
]
|
||||
response = requests.post(url, json=body, headers=GlassdoorScraper.headers())
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
data = response.json()[0]
|
||||
desc = data['data']['jobview']['job']['description']
|
||||
soup = BeautifulSoup(desc, 'html.parser')
|
||||
return modify_and_get_description(soup)
|
||||
|
||||
@staticmethod
|
||||
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
def get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||
if not location or is_remote:
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
session = create_session(self.proxy, has_retry=True)
|
||||
response = session.get(url)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
items = response.json()
|
||||
if not items:
|
||||
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||
location_type = items[0]["locationType"]
|
||||
if location_type == "C":
|
||||
location_type = "CITY"
|
||||
elif location_type == "S":
|
||||
location_type = "STATE"
|
||||
return int(items[0]["locationId"]), location_type
|
||||
|
||||
@staticmethod
|
||||
def add_payload(
|
||||
scraper_input,
|
||||
location_id: int,
|
||||
location_type: str,
|
||||
page_num: int,
|
||||
cursor: str | None = None,
|
||||
) -> str:
|
||||
payload = {
|
||||
"operationName": "JobSearchResultsQuery",
|
||||
"variables": {
|
||||
"excludeJobListingIds": [],
|
||||
"filterParams": [{"filterKey": "applicationType", "values": "1"}] if scraper_input.easy_apply else [],
|
||||
"keyword": scraper_input.search_term,
|
||||
"numJobsToShow": 30,
|
||||
"locationType": location_type,
|
||||
"locationId": int(location_id),
|
||||
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||
"pageNumber": page_num,
|
||||
"pageCursor": cursor,
|
||||
},
|
||||
"query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n",
|
||||
}
|
||||
|
||||
job_type_filters = {
|
||||
JobType.FULL_TIME: "fulltime",
|
||||
JobType.PART_TIME: "parttime",
|
||||
JobType.CONTRACT: "contract",
|
||||
JobType.INTERNSHIP: "internship",
|
||||
JobType.TEMPORARY: "temporary",
|
||||
}
|
||||
|
||||
if scraper_input.job_type in job_type_filters:
|
||||
filter_value = job_type_filters[scraper_input.job_type]
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": filter_value}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
@staticmethod
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
@staticmethod
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"cookie": 'gdId=91e2dfc4-c8b5-4fa7-83d0-11512b80262c; G_ENABLED_IDPS=google; trs=https%3A%2F%2Fwww.redhat.com%2F:referral:referral:2023-07-05+09%3A50%3A14.862:undefined:undefined; g_state={"i_p":1688587331651,"i_l":1}; _cfuvid=.7llazxhYFZWi6EISSPdVjtqF0NMVwzxr_E.cB1jgLs-1697828392979-0-604800000; GSESSIONID=undefined; JSESSIONID=F03DD1B5EE02DB6D842FE42B142F88F3; cass=1; jobsClicked=true; indeedCtk=1hd77b301k79i801; asst=1697829114.2; G_AUTHUSER_H=0; uc=8013A8318C98C517FE6DD0024636DFDEF978FC33266D93A2FAFEF364EACA608949D8B8FA2DC243D62DE271D733EB189D809ABE5B08D7B1AE865D217BD4EEBB97C282F5DA5FEFE79C937E3F6110B2A3A0ADBBA3B4B6DF5A996FEE00516100A65FCB11DA26817BE8D1C1BF6CFE36B5B68A3FDC2CFEC83AB797F7841FBB157C202332FC7E077B56BD39B167BDF3D9866E3B; AWSALB=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; AWSALBCORS=zxc/Yk1nbWXXT6HjNyn3H4h4950ckVsFV/zOrq5LSoChYLE1qV+hDI8Axi3fUa9rlskndcO0M+Fw+ZnJ+AQ2afBFpyOd1acouLMYgkbEpqpQaWhY6/Gv4QH1zBcJ; gdsid=1697828393025:1697830776351:668396EDB9E6A832022D34414128093D; at=HkH8Hnqi9uaMC7eu0okqyIwqp07ht9hBvE1_St7E_hRqPvkO9pUeJ1Jcpds4F3g6LL5ADaCNlxrPn0o6DumGMfog8qI1-zxaV_jpiFs3pugntw6WpVyYWdfioIZ1IDKupyteeLQEM1AO4zhGjY_rPZynpsiZBPO_B1au94sKv64rv23yvP56OiWKKfI-8_9hhLACEwWvM-Az7X-4aE2QdFt93VJbXbbGVf07bdDZfimsIkTtgJCLSRhU1V0kEM1Efyu66vo3m77gFFaMW7lxyYnb36I5PdDtEXBm3aL-zR7-qa5ywd94ISEivgqQOA4FPItNhqIlX4XrfD1lxVz6rfPaoTIDi4DI6UMCUjwyPsuv8mn0rYqDfRnmJpZ97fJ5AnhrknAd_6ZWN5v1OrxJczHzcXd8LO820QPoqxzzG13bmSTXLwGSxMUCtSrVsq05hicimQ3jpRt0c1dA4OkTNqF7_770B9JfcHcM8cr8-C4IL56dnOjr9KBGfN1Q2IvZM2cOBRbV7okiNOzKVZ3qJ24AE34WA2F3U6Whiu6H8nIuGG5hSNkVygY6CtglNZfFF9p8pJAZm79PngrrBv-CXFBZmhYLFo46lmFetDkiJ6mirtez4tKpzTIYjIp4_JAkiZFwbLJ2QGH4mK8kyyW0lZiX1DTuQec50N_5wvRo0Gt7nlKxzLsApMnaNhuQeH5ygh_pa381ORo9mQGi0EYF9zk00pa2--z4PtjfQ8KFq36GgpxKy5-o4qgqygZj8F01L8r-FiX2G4C7PREMIpAyHX2A4-_JxA1IS2j12EyqKTLqE9VcP06qm2Z-YuIW3ctmpMxy5G9_KiEiGv17weizhSFnl6SbpAEY-2VSmQ5V6jm3hoMp2jemkuGCRkZeFstLDEPxlzFN7WM; __cf_bm=zGaVjIJw4irf40_7UVw54B6Ohm271RUX4Tc8KVScrbs-1697830777-0-AYv2GnKTnnCU+cY9xHbJunO0DwlLDO6SIBnC/s/qldpKsGK0rRAjD6y8lbyATT/KlS7g29OZaN4fbd0lrJg0KmWbIybZIzfWVLHSYePVuOhu; asst=1697829114.2; at=dFhXf64wsf2TlnWy41xLs7skJkuxgKToEGcjGtDfUvW4oEAJ4tTIR5dKQ8wbwT75aIaGgdCfvcb-da7vwrCGWscCncmfLFQpJ9l-LLwoRfk-pMsxHhd77wvf-W7I0HSm7-Q5lQJqI9WyNGRxOa-RpzBTf4L8_Et4-3FzjPaAoYY5pY1FhuwXbN5asGOAMW-p8cjpbfn3PumlIYuckguWnjrcY2F31YJ_1noeoHM9tCGpymANbqGXRkG6aXY7yCfVXtdgZU1K5SMeaSPZIuF_iLUxjc_corzpNiH6qq7BIAmh-e5Aa-g7cwpZcln1fmwTVw4uTMZf1eLIMTa9WzgqZNkvG-sGaq_XxKA_Wai6xTTkOHfRgm4632Ba2963wdJvkGmUUa3tb_L4_wTgk3eFnHp5JhghLfT2Pe3KidP-yX__vx8JOsqe3fndCkKXgVz7xQKe1Dur-sMNlGwi4LXfguTT2YUI8C5Miq3pj2IHc7dC97eyyAiAM4HvyGWfaXWZcei6oIGrOwMvYgy0AcwFry6SIP2SxLT5TrxinRRuem1r1IcOTJsMJyUPp1QsZ7bOyq9G_0060B4CPyovw5523hEuqLTM-R5e5yavY6C_1DHUyE15C3mrh7kdvmlGZeflnHqkFTEKwwOftm-Mv-CKD5Db9ABFGNxKB2FH7nDH67hfOvm4tGNMzceBPKYJ3wciTt9jK3wy39_7cOYVywfrZ-oLhw_XtsbGSSeGn3HytrfgSADAh2sT0Gg6eCC9Xy1vh-Za337SVLUDXZ73W2xJxxUHBkFzZs8L_Xndo5DsbpWhVs9IYUGyraJdqB3SLgDbAppIBCJl4fx6_DG8-xOQPBvuFMlTROe1JVdHOzXI1GElwFDTuH1pjkg4I2G0NhAbE06Y-1illQE; gdsid=1697828393025:1697831731408:99C30D94108AC3030D61C736DDCDF11C',
|
||||
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
@@ -1,17 +1,29 @@
|
||||
"""
|
||||
jobspy.scrapers.indeed
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Indeed.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Any
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import tls_client
|
||||
import urllib.parse
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from ..exceptions import IndeedException
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
create_session,
|
||||
get_enum_from_job_type,
|
||||
modify_and_get_description
|
||||
)
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
@@ -20,68 +32,56 @@ from ...jobs import (
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site, Country, StatusException
|
||||
|
||||
|
||||
class ParsingException(Exception):
|
||||
pass
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self):
|
||||
def __init__(self, proxy: str | None = None):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed job search url
|
||||
"""
|
||||
self.url = None
|
||||
self.country = None
|
||||
site = Site(Site.INDEED)
|
||||
super().__init__(site)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 15
|
||||
self.jobs_per_page = 25
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> tuple[list[JobPost], int]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:param session:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
"""
|
||||
self.country = scraper_input.country
|
||||
domain = self.country.domain_value
|
||||
domain = self.country.indeed_domain_value
|
||||
self.url = f"https://{domain}.indeed.com"
|
||||
|
||||
job_list = []
|
||||
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"filter": 0,
|
||||
"start": 0 + page * 10,
|
||||
}
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
sc_values = []
|
||||
if scraper_input.is_remote:
|
||||
sc_values.append("attr(DSQF7)")
|
||||
if scraper_input.job_type:
|
||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
response = session.get(self.url + "/jobs", params=params, allow_redirects=True)
|
||||
# print(response.status_code)
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
raise StatusException(response.status_code)
|
||||
try:
|
||||
session = create_session(self.proxy)
|
||||
response = session.get(
|
||||
f"{self.url}/m/jobs",
|
||||
headers=self.get_headers(),
|
||||
params=self.add_params(scraper_input, page),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
raise IndeedException(
|
||||
f"bad response with status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
raise IndeedException("bad proxy")
|
||||
raise IndeedException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
with open("text2.html", "w", encoding="utf-8") as f:
|
||||
f.write(str(soup))
|
||||
if "did not match any jobs" in str(soup):
|
||||
raise ParsingException("Search did not match any jobs")
|
||||
if "did not match any jobs" in response.text:
|
||||
raise IndeedException("Parsing exception: Search did not match any jobs")
|
||||
|
||||
jobs = IndeedScraper.parse_jobs(
|
||||
soup
|
||||
@@ -93,10 +93,10 @@ class IndeedScraper(Scraper):
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
):
|
||||
raise Exception("No jobs found.")
|
||||
raise IndeedException("No jobs found.")
|
||||
|
||||
def process_job(job) -> Optional[JobPost]:
|
||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
||||
def process_job(job: dict) -> JobPost | None:
|
||||
job_url = f'{self.url}/m/jobs/viewjob?jk={job["jobkey"]}'
|
||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
@@ -114,8 +114,8 @@ class IndeedScraper(Scraper):
|
||||
if interval in CompensationInterval.__members__:
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval[interval],
|
||||
min_amount=int(extracted_salary.get("max")),
|
||||
max_amount=int(extracted_salary.get("min")),
|
||||
min_amount=int(extracted_salary.get("min")),
|
||||
max_amount=int(extracted_salary.get("max")),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
@@ -124,10 +124,11 @@ class IndeedScraper(Scraper):
|
||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||
|
||||
description = self.get_description(job_url, session)
|
||||
description = self.get_description(job_url) if scraper_input.full_description else None
|
||||
|
||||
with io.StringIO(job["snippet"]) as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup.find_all("li")
|
||||
soup_io = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup_io.find_all("li")
|
||||
if description is None and li_elements:
|
||||
description = " ".join(li.text for li in li_elements)
|
||||
|
||||
@@ -135,6 +136,7 @@ class IndeedScraper(Scraper):
|
||||
title=job["normTitle"],
|
||||
description=description,
|
||||
company_name=job["company"],
|
||||
company_url=self.url + job["companyOverviewLink"] if "companyOverviewLink" in job else None,
|
||||
location=Location(
|
||||
city=job.get("jobLocationCity"),
|
||||
state=job.get("jobLocationState"),
|
||||
@@ -144,13 +146,19 @@ class IndeedScraper(Scraper):
|
||||
compensation=compensation,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url_client,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description)
|
||||
if description
|
||||
else None,
|
||||
is_remote=self.is_remote_job(job),
|
||||
)
|
||||
return job_post
|
||||
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback
|
||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
with ThreadPoolExecutor(max_workers=workers) as executor:
|
||||
job_results: list[Future] = [
|
||||
executor.submit(process_job, job)
|
||||
for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||
executor.submit(process_job, job) for job in jobs
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
@@ -163,111 +171,95 @@ class IndeedScraper(Scraper):
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
session = tls_client.Session(
|
||||
client_identifier="chrome112", random_tls_extension_order=True
|
||||
)
|
||||
|
||||
pages_to_process = (
|
||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||
)
|
||||
|
||||
try:
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
|
||||
job_list += jobs
|
||||
except StatusException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed returned status code {e.status_code}",
|
||||
)
|
||||
|
||||
except ParsingException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed failed to parse response: {e}",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"LinkedIn failed to scrape: {e}\n{traceback.format_exc()}")
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed failed to scrape: {e}",
|
||||
)
|
||||
job_list += jobs
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> str:
|
||||
def get_description(self, job_page_url: str) -> str | None:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:param session:
|
||||
:return: description
|
||||
"""
|
||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
||||
params = urllib.parse.parse_qs(parsed_url.query)
|
||||
jk_value = params.get("jk", [None])[0]
|
||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
||||
formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
|
||||
session = create_session(self.proxy)
|
||||
|
||||
try:
|
||||
response = session.get(
|
||||
formatted_url, allow_redirects=True, timeout_seconds=5
|
||||
formatted_url,
|
||||
headers=self.get_headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
print("The request timed out.")
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
print("status code not in range")
|
||||
return None
|
||||
|
||||
raw_description = response.json()["body"]["jobInfoWrapperModel"][
|
||||
"jobInfoModel"
|
||||
]["sanitizedJobDescription"]
|
||||
with io.StringIO(raw_description) as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
text_content = " ".join(soup.get_text().split()).strip()
|
||||
return text_content
|
||||
try:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
script_tags = soup.find_all('script')
|
||||
|
||||
job_description = ''
|
||||
for tag in script_tags:
|
||||
if 'window._initialData' in tag.text:
|
||||
json_str = tag.text
|
||||
json_str = json_str.split('window._initialData=')[1]
|
||||
json_str = json_str.rsplit(';', 1)[0]
|
||||
data = json.loads(json_str)
|
||||
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
|
||||
break
|
||||
except (KeyError, TypeError, IndexError):
|
||||
return None
|
||||
|
||||
soup = BeautifulSoup(job_description, "html.parser")
|
||||
return modify_and_get_description(soup)
|
||||
|
||||
@staticmethod
|
||||
def get_job_type(job: dict) -> Optional[JobType]:
|
||||
def get_job_type(job: dict) -> list[JobType] | None:
|
||||
"""
|
||||
Parses the job to get JobTypeIndeed
|
||||
Parses the job to get list of job types
|
||||
:param job:
|
||||
:return:
|
||||
"""
|
||||
job_types: list[JobType] = []
|
||||
for taxonomy in job["taxonomyAttributes"]:
|
||||
if taxonomy["label"] == "job-types":
|
||||
if len(taxonomy["attributes"]) > 0:
|
||||
label = taxonomy["attributes"][0].get("label")
|
||||
for i in range(len(taxonomy["attributes"])):
|
||||
label = taxonomy["attributes"][i].get("label")
|
||||
if label:
|
||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||
# print(f"Debug: job_type_str = {job_type_str}")
|
||||
return IndeedScraper.get_enum_from_value(job_type_str)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
job_type = get_enum_from_job_type(job_type_str)
|
||||
if job_type:
|
||||
job_types.append(job_type)
|
||||
return job_types
|
||||
|
||||
@staticmethod
|
||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
||||
@@ -277,7 +269,7 @@ class IndeedScraper(Scraper):
|
||||
:return: jobs
|
||||
"""
|
||||
|
||||
def find_mosaic_script() -> Optional[Tag]:
|
||||
def find_mosaic_script() -> Tag | None:
|
||||
"""
|
||||
Finds jobcards script tag
|
||||
:return: script_tag
|
||||
@@ -304,10 +296,10 @@ class IndeedScraper(Scraper):
|
||||
jobs = json.loads(m.group(1).strip())
|
||||
return jobs
|
||||
else:
|
||||
raise ParsingException("Could not find mosaic provider job cards data")
|
||||
raise IndeedException("Could not find mosaic provider job cards data")
|
||||
else:
|
||||
raise ParsingException(
|
||||
"Could not find a script tag containing mosaic provider data"
|
||||
raise IndeedException(
|
||||
"Could not find any results for the search"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -327,3 +319,53 @@ class IndeedScraper(Scraper):
|
||||
data = json.loads(json_str)
|
||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
||||
return total_num_jobs
|
||||
|
||||
@staticmethod
|
||||
def get_headers():
|
||||
return {
|
||||
'Host': 'www.indeed.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
|
||||
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def is_remote_job(job: dict) -> bool:
|
||||
"""
|
||||
:param job:
|
||||
:return: bool
|
||||
"""
|
||||
for taxonomy in job.get("taxonomyAttributes", []):
|
||||
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"filter": 0,
|
||||
"start": scraper_input.offset + page * 10,
|
||||
"sort": "date"
|
||||
}
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
sc_values = []
|
||||
if scraper_input.is_remote:
|
||||
sc_values.append("attr(DSQF7)")
|
||||
if scraper_input.job_type:
|
||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
|
||||
if scraper_input.easy_apply:
|
||||
params['iafilter'] = 1
|
||||
|
||||
return params
|
||||
|
||||
@@ -1,29 +1,52 @@
|
||||
from typing import Optional, Tuple
|
||||
"""
|
||||
jobspy.scrapers.linkedin
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape LinkedIn.
|
||||
"""
|
||||
import time
|
||||
import random
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests.exceptions import ProxyError
|
||||
from threading import Lock
|
||||
from bs4.element import Tag
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import LinkedInException
|
||||
from ..utils import create_session
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Country,
|
||||
Compensation
|
||||
)
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
get_enum_from_job_type,
|
||||
currency_parser,
|
||||
modify_and_get_description
|
||||
)
|
||||
|
||||
|
||||
class LinkedInScraper(Scraper):
|
||||
def __init__(self):
|
||||
DELAY = 3
|
||||
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
site = Site(Site.LINKEDIN)
|
||||
self.country = "worldwide"
|
||||
self.url = "https://www.linkedin.com"
|
||||
super().__init__(site)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
@@ -31,12 +54,12 @@ class LinkedInScraper(Scraper):
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.country = "worldwide"
|
||||
job_list: list[JobPost] = []
|
||||
seen_urls = set()
|
||||
page, processed_jobs, job_count = 0, 0, 0
|
||||
url_lock = Lock()
|
||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||
|
||||
def job_type_code(job_type):
|
||||
def job_type_code(job_type_enum):
|
||||
mapping = {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
@@ -45,121 +68,155 @@ class LinkedInScraper(Scraper):
|
||||
JobType.TEMPORARY: "T",
|
||||
}
|
||||
|
||||
return mapping.get(job_type, "")
|
||||
return mapping.get(job_type_enum, "")
|
||||
|
||||
with requests.Session() as session:
|
||||
while len(job_list) < scraper_input.results_wanted:
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": page,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": 0,
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
response = session.get(
|
||||
f"{self.url}/jobs/search", params=params, allow_redirects=True
|
||||
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
proxies=self.proxy,
|
||||
headers=self.headers(),
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
if response.status_code != 200:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Response returned {response.status_code}",
|
||||
)
|
||||
except requests.HTTPError as e:
|
||||
raise LinkedInException(f"bad response status code: {e.response.status_code}")
|
||||
except ProxyError as e:
|
||||
raise LinkedInException("bad proxy")
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
if page == 0:
|
||||
job_count_text = soup.find(
|
||||
"span", class_="results-context-header__job-count"
|
||||
).text
|
||||
job_count = int("".join(filter(str.isdigit, job_count_text)))
|
||||
|
||||
for job_card in soup.find_all(
|
||||
"div",
|
||||
class_="base-card relative w-full hover:no-underline focus:no-underline base-card--link base-search-card base-search-card--link job-search-card",
|
||||
):
|
||||
processed_jobs += 1
|
||||
data_entity_urn = job_card.get("data-entity-urn", "")
|
||||
job_id = (
|
||||
data_entity_urn.split(":")[-1] if data_entity_urn else "N/A"
|
||||
)
|
||||
for job_card in job_cards:
|
||||
job_url = None
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||
|
||||
with url_lock:
|
||||
if job_url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
job_info = job_card.find("div", class_="base-search-card__info")
|
||||
if job_info is None:
|
||||
continue
|
||||
title_tag = job_info.find("h3", class_="base-search-card__title")
|
||||
title = title_tag.text.strip() if title_tag else "N/A"
|
||||
|
||||
company_tag = job_info.find("a", class_="hidden-nested-link")
|
||||
company = company_tag.text.strip() if company_tag else "N/A"
|
||||
# Call process_job directly without threading
|
||||
try:
|
||||
job_post = self.process_job(job_card, job_url, scraper_input.full_description)
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
except Exception as e:
|
||||
raise LinkedInException("Exception occurred while processing jobs")
|
||||
|
||||
metadata_card = job_info.find(
|
||||
"div", class_="base-search-card__metadata"
|
||||
)
|
||||
location: Location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = metadata_card.find(
|
||||
"time", class_="job-search-card__listdate"
|
||||
)
|
||||
description, job_type = LinkedInScraper.get_description(job_url)
|
||||
if datetime_tag:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
else:
|
||||
date_posted = None
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval=CompensationInterval.YEARLY, currency=None
|
||||
),
|
||||
)
|
||||
job_list.append(job_post)
|
||||
if (
|
||||
len(job_list) >= scraper_input.results_wanted
|
||||
or processed_jobs >= job_count
|
||||
):
|
||||
break
|
||||
if (
|
||||
len(job_list) >= scraper_input.results_wanted
|
||||
or processed_jobs >= job_count
|
||||
):
|
||||
break
|
||||
|
||||
page += 1
|
||||
page += 25
|
||||
time.sleep(random.uniform(LinkedInScraper.DELAY, LinkedInScraper.DELAY + 2))
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=job_count,
|
||||
)
|
||||
return job_response
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@staticmethod
|
||||
def get_description(job_page_url: str) -> Optional[str]:
|
||||
def process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional[JobPost]:
|
||||
salary_tag = job_card.find('span', class_='job-search-card__salary-info')
|
||||
|
||||
compensation = None
|
||||
if salary_tag:
|
||||
salary_text = salary_tag.get_text(separator=' ').strip()
|
||||
salary_values = [currency_parser(value) for value in salary_text.split('-')]
|
||||
salary_min = salary_values[0]
|
||||
salary_max = salary_values[1]
|
||||
currency = salary_text[0] if salary_text[0] != '$' else 'USD'
|
||||
|
||||
compensation = Compensation(
|
||||
min_amount=int(salary_min),
|
||||
max_amount=int(salary_max),
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company_url = (
|
||||
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||
if company_a_tag and company_a_tag.has_attr("href")
|
||||
else ""
|
||||
)
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = (
|
||||
metadata_card.find("time", class_="job-search-card__listdate")
|
||||
if metadata_card
|
||||
else None
|
||||
)
|
||||
date_posted = description = job_type = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except Exception as e:
|
||||
date_posted = None
|
||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||
if full_descr:
|
||||
description, job_type = self.get_job_description(job_url)
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
company_name=company,
|
||||
company_url=company_url,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
compensation=compensation,
|
||||
benefits=benefits,
|
||||
job_type=job_type,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def get_job_description(
|
||||
self, job_page_url: str
|
||||
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None
|
||||
"""
|
||||
response = requests.get(job_page_url, allow_redirects=True)
|
||||
if response.status_code not in range(200, 400):
|
||||
try:
|
||||
session = create_session(is_tls=False, has_retry=True)
|
||||
response = session.get(job_page_url, timeout=5, proxies=self.proxy)
|
||||
response.raise_for_status()
|
||||
except requests.HTTPError as e:
|
||||
return None, None
|
||||
except Exception as e:
|
||||
return None, None
|
||||
if response.url == "https://www.linkedin.com/signup":
|
||||
return None, None
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
@@ -167,19 +224,19 @@ class LinkedInScraper(Scraper):
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
|
||||
text_content = None
|
||||
description = None
|
||||
if div_content:
|
||||
text_content = " ".join(div_content.get_text().split()).strip()
|
||||
description = modify_and_get_description(div_content)
|
||||
|
||||
def get_job_type(
|
||||
soup: BeautifulSoup,
|
||||
) -> Tuple[Optional[str], Optional[JobType]]:
|
||||
soup_job_type: BeautifulSoup,
|
||||
) -> list[JobType] | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup:
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup.find(
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
@@ -196,16 +253,9 @@ class LinkedInScraper(Scraper):
|
||||
employment_type = employment_type.lower()
|
||||
employment_type = employment_type.replace("-", "")
|
||||
|
||||
return LinkedInScraper.get_enum_from_value(employment_type)
|
||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||
|
||||
return text_content, get_job_type(soup)
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
return description, get_job_type(soup)
|
||||
|
||||
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||
"""
|
||||
@@ -213,7 +263,7 @@ class LinkedInScraper(Scraper):
|
||||
:param metadata_card
|
||||
:return: location
|
||||
"""
|
||||
location = Location(country=self.country)
|
||||
location = Location(country=Country.from_string(self.country))
|
||||
if metadata_card is not None:
|
||||
location_tag = metadata_card.find(
|
||||
"span", class_="job-search-card__location"
|
||||
@@ -225,7 +275,32 @@ class LinkedInScraper(Scraper):
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=self.country,
|
||||
country=Country.from_string(self.country),
|
||||
)
|
||||
elif len(parts) == 3:
|
||||
city, state, country = parts
|
||||
location = Location(
|
||||
city=city,
|
||||
state=state,
|
||||
country=Country.from_string(country),
|
||||
)
|
||||
|
||||
return location
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
return {
|
||||
'authority': 'www.linkedin.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'cache-control': 'max-age=0',
|
||||
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
|
||||
# 'sec-ch-ua-mobile': '?0',
|
||||
# 'sec-ch-ua-platform': '"macOS"',
|
||||
# 'sec-fetch-dest': 'document',
|
||||
# 'sec-fetch-mode': 'navigate',
|
||||
# 'sec-fetch-site': 'none',
|
||||
# 'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
96
src/jobspy/scrapers/utils.py
Normal file
96
src/jobspy/scrapers/utils.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
import tls_client
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from ..jobs import JobType
|
||||
|
||||
|
||||
def modify_and_get_description(soup):
|
||||
for li in soup.find_all('li'):
|
||||
li.string = "- " + li.get_text()
|
||||
|
||||
description = soup.get_text(separator='\n').strip()
|
||||
description = re.sub(r'\n+', '\n', description)
|
||||
return description
|
||||
|
||||
|
||||
def count_urgent_words(description: str) -> int:
|
||||
"""
|
||||
Count the number of urgent words or phrases in a job description.
|
||||
"""
|
||||
urgent_patterns = re.compile(
|
||||
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
matches = re.findall(urgent_patterns, description)
|
||||
count = len(matches)
|
||||
|
||||
return count
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with optional tls, proxy, and retry settings.
|
||||
|
||||
:return: A session object
|
||||
"""
|
||||
if is_tls:
|
||||
session = tls_client.Session(
|
||||
client_identifier="chrome112",
|
||||
random_tls_extension_order=True,
|
||||
)
|
||||
session.proxies = proxy
|
||||
else:
|
||||
session = requests.Session()
|
||||
session.allow_redirects = True
|
||||
if proxy:
|
||||
session.proxies.update(proxy)
|
||||
if has_retry:
|
||||
retries = Retry(total=3,
|
||||
connect=3,
|
||||
status=3,
|
||||
status_forcelist=[500, 502, 503, 504, 429],
|
||||
backoff_factor=delay)
|
||||
adapter = HTTPAdapter(max_retries=retries)
|
||||
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
"""
|
||||
res = None
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
res = job_type
|
||||
return res
|
||||
|
||||
def currency_parser(cur_str):
|
||||
# Remove any non-numerical characters
|
||||
# except for ',' '.' or '-' (e.g. EUR)
|
||||
cur_str = re.sub("[^-0-9.,]", '', cur_str)
|
||||
# Remove any 000s separators (either , or .)
|
||||
cur_str = re.sub("[.,]", '', cur_str[:-3]) + cur_str[-3:]
|
||||
|
||||
if '.' in list(cur_str[-3:]):
|
||||
num = float(cur_str)
|
||||
elif ',' in list(cur_str[-3:]):
|
||||
num = float(cur_str.replace(',', '.'))
|
||||
else:
|
||||
num = float(cur_str)
|
||||
|
||||
return np.round(num, 2)
|
||||
@@ -1,254 +1,125 @@
|
||||
"""
|
||||
jobspy.scrapers.ziprecruiter
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
import math
|
||||
import json
|
||||
import time
|
||||
import re
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
import tls_client
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from .. import Scraper, ScraperInput, Site, StatusException
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
Country,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import ZipRecruiterException
|
||||
from ...jobs import JobPost, Compensation, Location, JobResponse, JobType, Country
|
||||
from ..utils import count_urgent_words, extract_emails_from_text, create_session, modify_and_get_description
|
||||
|
||||
|
||||
class ZipRecruiterScraper(Scraper):
|
||||
def __init__(self):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the ZipRecruiter job search url
|
||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
site = Site(Site.ZIP_RECRUITER)
|
||||
self.url = "https://www.ziprecruiter.com"
|
||||
super().__init__(site)
|
||||
self.session = create_session(proxy)
|
||||
self.get_cookies()
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
self.session = tls_client.Session(
|
||||
client_identifier="chrome112", random_tls_extension_order=True
|
||||
)
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> tuple[list[JobPost], int | None]:
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||
) -> Tuple[list[JobPost], Optional[str]]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:param session:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
:param continue_token:
|
||||
:return: jobs found on page
|
||||
"""
|
||||
params = self.add_params(scraper_input)
|
||||
if continue_token:
|
||||
params["continue_from"] = continue_token
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=params
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with non 200 code" in str(e):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
|
||||
job_list = []
|
||||
time.sleep(5)
|
||||
response_data = response.json()
|
||||
jobs_list = response_data.get("jobs", [])
|
||||
next_continue_token = response_data.get("continue", None)
|
||||
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
job_type_value = scraper_input.job_type.value
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
job_results = [executor.submit(self.process_job, job) for job in jobs_list]
|
||||
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"page": page,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
response = self.session.get(
|
||||
self.url + "/jobs-search",
|
||||
headers=ZipRecruiterScraper.headers(),
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
)
|
||||
|
||||
# print(response.status_code)
|
||||
if response.status_code != 200:
|
||||
raise StatusException(response.status_code)
|
||||
|
||||
html_string = response.text
|
||||
soup = BeautifulSoup(html_string, "html.parser")
|
||||
|
||||
script_tag = soup.find("script", {"id": "js_variables"})
|
||||
data = json.loads(script_tag.string)
|
||||
|
||||
if page == 1:
|
||||
job_count = int(data["totalJobCount"].replace(",", ""))
|
||||
else:
|
||||
job_count = None
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
if "jobList" in data and data["jobList"]:
|
||||
jobs_js = data["jobList"]
|
||||
job_results = [
|
||||
executor.submit(self.process_job_js, job) for job in jobs_js
|
||||
]
|
||||
else:
|
||||
jobs_html = soup.find_all("div", {"class": "job_content"})
|
||||
job_results = [
|
||||
executor.submit(self.process_job_html, job) for job in jobs_html
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
|
||||
return job_list, job_count
|
||||
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||
return job_list, next_continue_token
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||
:param scraper_input: Information about job search criteria.
|
||||
:return: JobResponse containing a list of jobs.
|
||||
"""
|
||||
job_list: list[JobPost] = []
|
||||
continue_token = None
|
||||
|
||||
pages_to_process = max(
|
||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
)
|
||||
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
|
||||
try:
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 1)
|
||||
for page in range(1, max_pages + 1):
|
||||
if len(job_list) >= scraper_input.results_wanted:
|
||||
break
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page)
|
||||
for page in range(2, pages_to_process + 1)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
|
||||
job_list += jobs
|
||||
|
||||
except StatusException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"ZipRecruiter returned status code {e.status_code}",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"ZipRecruiter failed to scrape: {e}\n{traceback.format_exc()}")
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"ZipRecruiter failed to scrape: {e}",
|
||||
jobs_on_page, continue_token = self.find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
if jobs_on_page:
|
||||
job_list.extend(jobs_on_page)
|
||||
|
||||
#: note: this does not handle if the results are more or less than the results_wanted
|
||||
if not continue_token:
|
||||
break
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def process_job_html(self, job: Tag) -> Optional[JobPost]:
|
||||
"""
|
||||
Parses a job from the job content tag
|
||||
:param job: BeautifulSoup Tag for one job post
|
||||
:return JobPost
|
||||
"""
|
||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
||||
def process_job(self, job: dict) -> JobPost | None:
|
||||
"""Processes an individual job dict from the response"""
|
||||
title = job.get("name")
|
||||
job_url = f"https://www.ziprecruiter.com/jobs//j?lvk={job['listing_key']}"
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
return
|
||||
self.seen_urls.add(job_url)
|
||||
|
||||
title = job.find("h2", {"class": "title"}).text
|
||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
||||
job_description_html = job.get("job_description", "").strip()
|
||||
description_soup = BeautifulSoup(job_description_html, "html.parser")
|
||||
description = modify_and_get_description(description_soup)
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
if updated_job_url is not None:
|
||||
job_url = updated_job_url
|
||||
if description is None:
|
||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
||||
company = job["hiring_company"].get("name") if "hiring_company" in job else None
|
||||
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||
country_enum = Country.from_string(country_value)
|
||||
|
||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
||||
job_type = None
|
||||
if job_type_element:
|
||||
job_type_text = (
|
||||
job_type_element.text.strip().lower().replace("-", "").replace(" ", "")
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
||||
|
||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=ZipRecruiterScraper.get_location(job),
|
||||
job_type=job_type,
|
||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def process_job_js(self, job: dict) -> JobPost:
|
||||
title = job.get("Title")
|
||||
description = BeautifulSoup(
|
||||
job.get("Snippet", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
|
||||
company = job.get("OrgName")
|
||||
location = Location(
|
||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
||||
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||
)
|
||||
try:
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("EmploymentType", "").replace("-", "_").lower()
|
||||
)
|
||||
except ValueError:
|
||||
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
|
||||
return None
|
||||
|
||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
||||
salary_parts = formatted_salary.split(" ")
|
||||
|
||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
||||
if "." in min_salary_str:
|
||||
min_amount = int(float(min_salary_str) * 1000)
|
||||
else:
|
||||
min_amount = int(min_salary_str.replace("K", "000"))
|
||||
|
||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
||||
if "." in max_salary_str:
|
||||
max_amount = int(float(max_salary_str) * 1000)
|
||||
else:
|
||||
max_amount = int(max_salary_str.replace("K", "000"))
|
||||
else:
|
||||
max_amount = 0
|
||||
|
||||
compensation = Compensation(
|
||||
interval=CompensationInterval.YEARLY,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency="USD/CAD",
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("employment_type", "").replace("_", "").lower()
|
||||
)
|
||||
|
||||
save_job_url = job.get("SaveJobURL", "")
|
||||
posted_time_match = re.search(
|
||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||
@@ -259,160 +130,72 @@ class ZipRecruiterScraper(Scraper):
|
||||
date_posted = date_posted_obj.date()
|
||||
else:
|
||||
date_posted = date.today()
|
||||
job_url = job.get("JobURL")
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=location,
|
||||
job_type=job_type,
|
||||
compensation=compensation,
|
||||
compensation=Compensation(
|
||||
interval="yearly"
|
||||
if job.get("compensation_interval") == "annual"
|
||||
else job.get("compensation_interval"),
|
||||
min_amount=int(job["compensation_min"])
|
||||
if "compensation_min" in job
|
||||
else None,
|
||||
max_amount=int(job["compensation_max"])
|
||||
if "compensation_max" in job
|
||||
else None,
|
||||
currency=job.get("compensation_currency"),
|
||||
),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
description=description,
|
||||
emails=extract_emails_from_text(description) if description else None,
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def get_cookies(self):
|
||||
url="https://api.ziprecruiter.com/jobs-app/event"
|
||||
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
self.session.post(url, data=data, headers=ZipRecruiterScraper.headers())
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
|
||||
def get_description(self, job_page_url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:param session:
|
||||
:return: description or None, response url
|
||||
"""
|
||||
try:
|
||||
response = self.session.get(
|
||||
job_page_url,
|
||||
headers=ZipRecruiterScraper.headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
print("The request timed out.")
|
||||
return None
|
||||
|
||||
html_string = response.content
|
||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
||||
|
||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
||||
if job_description_div:
|
||||
return job_description_div.text.strip(), response.url
|
||||
return None, response.url
|
||||
|
||||
@staticmethod
|
||||
def get_interval(interval_str: str):
|
||||
"""
|
||||
Maps the interval alias to its appropriate CompensationInterval.
|
||||
:param interval_str
|
||||
:return: CompensationInterval
|
||||
"""
|
||||
interval_alias = {"annually": CompensationInterval.YEARLY}
|
||||
interval_str = interval_str.lower()
|
||||
|
||||
if interval_str in interval_alias:
|
||||
return interval_alias[interval_str]
|
||||
|
||||
return CompensationInterval(interval_str)
|
||||
|
||||
@staticmethod
|
||||
def get_date_posted(job: BeautifulSoup) -> Optional[datetime.date]:
|
||||
"""
|
||||
Extracts the date a job was posted
|
||||
:param job
|
||||
:return: date the job was posted or None
|
||||
"""
|
||||
button = job.find(
|
||||
"button", {"class": "action_input save_job zrs_btn_secondary_200"}
|
||||
)
|
||||
if not button:
|
||||
return None
|
||||
|
||||
url_time = button.get("data-href", "")
|
||||
url_components = urlparse(url_time)
|
||||
params = parse_qs(url_components.query)
|
||||
posted_time_str = params.get("posted_time", [None])[0]
|
||||
|
||||
if posted_time_str:
|
||||
posted_date = datetime.strptime(
|
||||
posted_time_str, "%Y-%m-%dT%H:%M:%SZ"
|
||||
).date()
|
||||
return posted_date
|
||||
|
||||
return [job_type]
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_compensation(job: BeautifulSoup) -> Optional[Compensation]:
|
||||
"""
|
||||
Parses the compensation tag from the job BeautifulSoup object
|
||||
:param job
|
||||
:return: Compensation object or None
|
||||
"""
|
||||
pay_element = job.find("li", {"class": "perk_item perk_pay"})
|
||||
if pay_element is None:
|
||||
return None
|
||||
pay = pay_element.find("div", {"class": "value"}).find("span").text.strip()
|
||||
|
||||
def create_compensation_object(pay_string: str) -> Compensation:
|
||||
"""
|
||||
Creates a Compensation object from a pay_string
|
||||
:param pay_string
|
||||
:return: compensation
|
||||
"""
|
||||
interval = ZipRecruiterScraper.get_interval(pay_string.split()[-1])
|
||||
|
||||
amounts = []
|
||||
for amount in pay_string.split("to"):
|
||||
amount = amount.replace(",", "").strip("$ ").split(" ")[0]
|
||||
if "K" in amount:
|
||||
amount = amount.replace("K", "")
|
||||
amount = int(float(amount)) * 1000
|
||||
else:
|
||||
amount = int(float(amount))
|
||||
amounts.append(amount)
|
||||
|
||||
compensation = Compensation(
|
||||
interval=interval,
|
||||
min_amount=min(amounts),
|
||||
max_amount=max(amounts),
|
||||
currency="USD/CAD",
|
||||
)
|
||||
|
||||
return compensation
|
||||
|
||||
return create_compensation_object(pay)
|
||||
|
||||
@staticmethod
|
||||
def get_location(job: BeautifulSoup) -> Location:
|
||||
"""
|
||||
Extracts the job location from BeatifulSoup object
|
||||
:param job:
|
||||
:return: location
|
||||
"""
|
||||
location_link = job.find("a", {"class": "company_location"})
|
||||
if location_link is not None:
|
||||
location_string = location_link.text.strip()
|
||||
parts = location_string.split(", ")
|
||||
if len(parts) == 2:
|
||||
city, state = parts
|
||||
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
}
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
city, state = None, None
|
||||
else:
|
||||
city, state = None, None
|
||||
return Location(city=city, state=state, country=Country.US_CANADA)
|
||||
job_type_value = scraper_input.job_type.value
|
||||
if scraper_input.easy_apply:
|
||||
params['zipapply'] = 1
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
@@ -421,5 +204,12 @@ class ZipRecruiterScraper(Scraper):
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
|
||||
14
src/tests/test_all.py
Normal file
14
src/tests/test_all.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_all():
|
||||
result = scrape_jobs(
|
||||
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
results_wanted=5,
|
||||
)
|
||||
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
11
src/tests/test_glassdoor.py
Normal file
11
src/tests/test_glassdoor.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="glassdoor", search_term="software engineer", country_indeed="USA"
|
||||
)
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
@@ -1,9 +1,11 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
result = scrape_jobs(
|
||||
site_name="indeed",
|
||||
search_term="software engineer",
|
||||
site_name="indeed", search_term="software engineer", country_indeed="usa"
|
||||
)
|
||||
assert result is not None
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from jobspy import scrape_jobs
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_linkedin():
|
||||
@@ -6,4 +7,6 @@ def test_linkedin():
|
||||
site_name="linkedin",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert result is not None
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from jobspy import scrape_jobs
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_ziprecruiter():
|
||||
@@ -7,4 +8,6 @@ def test_ziprecruiter():
|
||||
search_term="software engineer",
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert (
|
||||
isinstance(result, pd.DataFrame) and not result.empty
|
||||
), "Result should be a non-empty DataFrame"
|
||||
|
||||
Reference in New Issue
Block a user