mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-04 19:44:30 -08:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e59ab03e3 | ||
|
|
008ca61e12 | ||
|
|
8fc4c3bf90 | ||
|
|
bff39a2625 | ||
|
|
c676050dc0 | ||
|
|
37976f7ec2 | ||
|
|
9fb2fdd80f | ||
|
|
af07c1ecbd | ||
|
|
286b9e1256 | ||
|
|
162dd40b0f | ||
|
|
558e352939 | ||
|
|
efad1a1b7d | ||
|
|
eaa481c2f4 | ||
|
|
b914aa6449 | ||
|
|
6adbfb8b29 | ||
|
|
a3b9dd50ff | ||
|
|
d3ba3a4878 | ||
|
|
f524789d74 | ||
|
|
f3890d4830 | ||
|
|
60c9728691 | ||
|
|
f79d975e5f | ||
|
|
d6368f909b | ||
|
|
6fcf7f666e | ||
|
|
4406f9350f | ||
|
|
ca5155f234 | ||
|
|
822a55783e | ||
|
|
59f739018a | ||
|
|
a37e7f235e | ||
|
|
690739e858 | ||
|
|
43eb2fe0e8 | ||
|
|
e50227bba6 | ||
|
|
45c2d76e15 | ||
|
|
fd883178be | ||
|
|
70e2218c67 | ||
|
|
d6947ecdd7 | ||
|
|
5191658562 |
42
.github/workflows/publish-to-pypi.yml
vendored
42
.github/workflows/publish-to-pypi.yml
vendored
@@ -7,27 +7,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install poetry
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
poetry
|
||||
--user
|
||||
- name: Install poetry
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
poetry
|
||||
--user
|
||||
|
||||
- name: Build distribution 📦
|
||||
run: >-
|
||||
python3 -m
|
||||
poetry
|
||||
build
|
||||
- name: Build distribution 📦
|
||||
run: >-
|
||||
python3 -m
|
||||
poetry
|
||||
build
|
||||
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
||||
/.idea
|
||||
**/.DS_Store
|
||||
/venv/
|
||||
/ven/
|
||||
/.idea
|
||||
**/__pycache__/
|
||||
**/.pytest_cache/
|
||||
/.ipynb_checkpoints/
|
||||
**/output/
|
||||
**/.DS_Store
|
||||
*.pyc
|
||||
.env
|
||||
dist
|
||||
/.ipynb_checkpoints/
|
||||
dist
|
||||
1304
JobSpy_Demo.ipynb
1304
JobSpy_Demo.ipynb
File diff suppressed because it is too large
Load Diff
203
README.md
203
README.md
@@ -1,18 +1,33 @@
|
||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||
|
||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||
## Features
|
||||
|
||||
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||
|
||||
*Looking to build a data-focused software product?* **[Book a call](https://calendly.com/zachary-products/15min)** *to
|
||||
work with us.*
|
||||
\
|
||||
Check out another project we wrote: ***[HomeHarvest](https://github.com/ZacharyHampton/HomeHarvest)** – a Python package
|
||||
for real estate scraping*
|
||||
|
||||
## Features
|
||||
|
||||
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
||||
- Aggregates the job postings in a Pandas DataFrame
|
||||
|
||||
- Proxy support (HTTP/S, SOCKS)
|
||||
|
||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
||||
Updated for release v1.1.3
|
||||
|
||||

|
||||
|
||||
|
||||
### Installation
|
||||
`pip install python-jobspy`
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
|
||||
```
|
||||
pip install --upgrade python-jobspy
|
||||
```
|
||||
|
||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||
|
||||
### Usage
|
||||
|
||||
@@ -25,29 +40,36 @@ jobs: pd.DataFrame = scrape_jobs(
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=10,
|
||||
|
||||
# country: only needed for indeed
|
||||
country='USA'
|
||||
|
||||
country_indeed='USA' # only needed for indeed
|
||||
|
||||
# use if you want to use a proxy
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
# offset=25 # use if you want to start at a specific offset
|
||||
)
|
||||
|
||||
if jobs.empty:
|
||||
print("No jobs found.")
|
||||
else:
|
||||
#1 print
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_rows', None)
|
||||
pd.set_option('display.width', None)
|
||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
||||
print(jobs)
|
||||
# formatting for pandas
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_rows', None)
|
||||
pd.set_option('display.width', None)
|
||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
||||
|
||||
#2 display in Jupyter Notebook
|
||||
#display(jobs)
|
||||
# 1 output to console
|
||||
print(jobs)
|
||||
|
||||
# 2 display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
||||
|
||||
# 3 output to .csv
|
||||
# jobs.to_csv('jobs.csv', index=False)
|
||||
|
||||
# 4 output to .xlsx
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
#3 output to .csv
|
||||
#jobs.to_csv('jobs.csv', index=False)
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
```
|
||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||
@@ -57,7 +79,9 @@ linkedin Full-Stack Software Engineer Rain New York
|
||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||
```
|
||||
|
||||
### Parameters for `scrape_jobs()`
|
||||
|
||||
```plaintext
|
||||
Required
|
||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
||||
@@ -66,130 +90,103 @@ Optional
|
||||
├── location (int)
|
||||
├── distance (int): in miles
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||
├── is_remote (bool)
|
||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||
├── easy_apply (bool): filters for jobs on LinkedIn that have the 'Easy Apply' option
|
||||
├── country (enum): uses the corresponding subdomain on Indeed (e.g. Canada on Indeed is ca.indeed.com
|
||||
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn
|
||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||
```
|
||||
|
||||
|
||||
### JobPost Schema
|
||||
|
||||
```plaintext
|
||||
JobPost
|
||||
├── title (str)
|
||||
├── company_name (str)
|
||||
├── company (str)
|
||||
├── job_url (str)
|
||||
├── location (object)
|
||||
│ ├── country (str)
|
||||
│ ├── city (str)
|
||||
│ ├── state (str)
|
||||
├── description (str)
|
||||
├── job_type (enum)
|
||||
├── job_type (enum): fulltime, parttime, internship, contract
|
||||
├── compensation (object)
|
||||
│ ├── interval (CompensationInterval): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── interval (enum): yearly, monthly, weekly, daily, hourly
|
||||
│ ├── min_amount (int)
|
||||
│ ├── max_amount (int)
|
||||
│ └── currency (str)
|
||||
└── date_posted (datetime)
|
||||
│ └── currency (enum)
|
||||
└── date_posted (date)
|
||||
```
|
||||
|
||||
### Exceptions
|
||||
|
||||
The following exceptions may be raised when using JobSpy:
|
||||
|
||||
* `LinkedInException`
|
||||
* `IndeedException`
|
||||
* `ZipRecruiterException`
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
|
||||
### **LinkedIn**
|
||||
|
||||
LinkedIn searches globally. Use the `location` parameter
|
||||
LinkedIn searches globally & uses only the `location` parameter.
|
||||
|
||||
### **ZipRecruiter**
|
||||
|
||||
ZipRecruiter searches for jobs in US/Canada. Use the `location` parameter
|
||||
|
||||
ZipRecruiter searches for jobs in **US/Canada** & uses only the `location` parameter.
|
||||
|
||||
### **Indeed**
|
||||
For Indeed, you `location` along with `country` param
|
||||
|
||||
You can specify the following countries when searching on Indeed (use the exact name):
|
||||
Indeed supports most countries, but the `country_indeed` parameter is required. Additionally, use the `location`
|
||||
parameter to narrow down the location, e.g. city & state if necessary.
|
||||
|
||||
- Argentina
|
||||
- Australia
|
||||
- Austria
|
||||
- Bahrain
|
||||
- Belgium
|
||||
- Brazil
|
||||
- Canada
|
||||
- Chile
|
||||
- China
|
||||
- Colombia
|
||||
- Costa Rica
|
||||
- Czech Republic
|
||||
- Denmark
|
||||
- Ecuador
|
||||
- Egypt
|
||||
- Finland
|
||||
- France
|
||||
- Germany
|
||||
- Greece
|
||||
- Hong Kong
|
||||
- Hungary
|
||||
- India
|
||||
- Indonesia
|
||||
- Ireland
|
||||
- Israel
|
||||
- Italy
|
||||
- Japan
|
||||
- Kuwait
|
||||
- Luxembourg
|
||||
- Malaysia
|
||||
- Mexico
|
||||
- Morocco
|
||||
- Netherlands
|
||||
- New Zealand
|
||||
- Nigeria
|
||||
- Norway
|
||||
- Oman
|
||||
- Pakistan
|
||||
- Panama
|
||||
- Peru
|
||||
- Philippines
|
||||
- Poland
|
||||
- Portugal
|
||||
- Qatar
|
||||
- Romania
|
||||
- Saudi Arabia
|
||||
- Singapore
|
||||
- South Africa
|
||||
- South Korea
|
||||
- Spain
|
||||
- Sweden
|
||||
- Switzerland
|
||||
- Taiwan
|
||||
- Thailand
|
||||
- Turkey
|
||||
- Ukraine
|
||||
- United Arab Emirates
|
||||
- UK
|
||||
- USA
|
||||
- Uruguay
|
||||
- Venezuela
|
||||
- Vietnam
|
||||
You can specify the following countries when searching on Indeed (use the exact name):
|
||||
|
||||
| | | | |
|
||||
|----------------------|--------------|------------|----------------|
|
||||
| Argentina | Australia | Austria | Bahrain |
|
||||
| Belgium | Brazil | Canada | Chile |
|
||||
| China | Colombia | Costa Rica | Czech Republic |
|
||||
| Denmark | Ecuador | Egypt | Finland |
|
||||
| France | Germany | Greece | Hong Kong |
|
||||
| Hungary | India | Indonesia | Ireland |
|
||||
| Israel | Italy | Japan | Kuwait |
|
||||
| Luxembourg | Malaysia | Mexico | Morocco |
|
||||
| Netherlands | New Zealand | Nigeria | Norway |
|
||||
| Oman | Pakistan | Panama | Peru |
|
||||
| Philippines | Poland | Portugal | Qatar |
|
||||
| Romania | Saudi Arabia | Singapore | South Africa |
|
||||
| South Korea | Spain | Sweden | Switzerland |
|
||||
| Taiwan | Thailand | Turkey | Ukraine |
|
||||
| United Arab Emirates | UK | USA | Uruguay |
|
||||
| Venezuela | Vietnam | | |
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
---
|
||||
|
||||
**Q: Encountering issues with your queries?**
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems persist, [submit an issue](#).
|
||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||
persist, [submit an issue](https://github.com/cullenwatson/JobSpy/issues).
|
||||
|
||||
---
|
||||
|
||||
**Q: Received a response code 429?**
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, **ZipRecruiter** is particularly aggressive with blocking. We recommend:
|
||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, *
|
||||
*LinkedIn** is particularly aggressive with blocking. We recommend:
|
||||
|
||||
- Waiting a few seconds between requests.
|
||||
- Trying a VPN to change your IP address.
|
||||
|
||||
**Note:** Proxy support is in development and coming soon!
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
|
||||
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
|
||||
|
||||
- Upgrade to a newer version of MacOS
|
||||
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes
|
||||
|
||||
|
||||
|
||||
|
||||
167
examples/JobSpy_Demo.ipynb
Normal file
167
examples/JobSpy_Demo.ipynb
Normal file
@@ -0,0 +1,167 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from jobspy import scrape_jobs\n",
|
||||
"import pandas as pd\n",
|
||||
"from IPython.display import display, HTML"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd.set_option('display.max_columns', None)\n",
|
||||
"pd.set_option('display.max_rows', None)\n",
|
||||
"pd.set_option('display.width', None)\n",
|
||||
"pd.set_option('display.max_colwidth', 50)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 1 (no hyperlinks, USA)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='san francisco',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" results_wanted=5,\n",
|
||||
"\n",
|
||||
" # use if you want to use a proxy\n",
|
||||
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||
")\n",
|
||||
"display(jobs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a581b2d-f7da-4fac-868d-9efe143ee20a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 2 - remote USA & hyperlinks\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
|
||||
" # location='san francisco',\n",
|
||||
" search_term=\"software engineer\",\n",
|
||||
" country_indeed=\"USA\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" is_remote=True,\n",
|
||||
" results_wanted=5, \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe8289bc-5b64-4202-9a64-7c117c83fd9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "951c2fe1-52ff-407d-8bb1-068049b36777",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"linkedin\"],\n",
|
||||
" location='berlin',\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" hyperlinks=True,\n",
|
||||
" results_wanted=5,\n",
|
||||
" easy_apply=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e37a521-caef-441c-8fc2-2eb5b2e7da62",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0650e608-0b58-4bf5-ae86-68348035b16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
||||
"jobs = scrape_jobs(\n",
|
||||
" site_name=[\"indeed\"],\n",
|
||||
" search_term=\"engineer\",\n",
|
||||
" country_indeed = \"China\",\n",
|
||||
" hyperlinks=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "40913ac8-3f8a-4d7e-ac47-afb88316432b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use if hyperlinks=True\n",
|
||||
"html = jobs.to_html(escape=False)\n",
|
||||
"# change max-width: 200px to show more or less of the content\n",
|
||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
||||
"display(HTML(truncate_width))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
31
examples/JobSpy_Demo.py
Normal file
31
examples/JobSpy_Demo.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho)
|
||||
country_indeed='USA',
|
||||
offset=25 # start jobs from an offset (use if search failed and want to continue)
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# formatting for pandas
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_rows', None)
|
||||
pd.set_option('display.width', None)
|
||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
||||
|
||||
# 1: output to console
|
||||
print(jobs)
|
||||
|
||||
# 2: output to .csv
|
||||
jobs.to_csv('./jobs.csv', index=False)
|
||||
print('outputted to jobs.csv')
|
||||
|
||||
# 3: output to .xlsx
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
||||
1774
poetry.lock
generated
1774
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,9 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.0"
|
||||
version = "1.1.11"
|
||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
||||
homepage = "https://github.com/cullenwatson/JobSpy"
|
||||
readme = "README.md"
|
||||
|
||||
packages = [
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
import pandas as pd
|
||||
from typing import List, Tuple
|
||||
import concurrent.futures
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import List, Tuple, Optional
|
||||
|
||||
from .jobs import JobType, Location
|
||||
from .scrapers.indeed import IndeedScraper
|
||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||
from .scrapers.linkedin import LinkedInScraper
|
||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
||||
|
||||
from .scrapers.exceptions import (
|
||||
LinkedInException,
|
||||
IndeedException,
|
||||
ZipRecruiterException,
|
||||
)
|
||||
|
||||
SCRAPER_MAPPING = {
|
||||
Site.LINKEDIN: LinkedInScraper,
|
||||
@@ -20,27 +26,42 @@ def _map_str_to_site(site_name: str) -> Site:
|
||||
|
||||
|
||||
def scrape_jobs(
|
||||
site_name: str | Site | List[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
is_remote: bool = False,
|
||||
job_type: JobType = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
results_wanted: int = 15,
|
||||
country: str = "usa",
|
||||
site_name: str | List[str] | Site | List[Site],
|
||||
search_term: str,
|
||||
location: str = "",
|
||||
distance: int = None,
|
||||
is_remote: bool = False,
|
||||
job_type: str = None,
|
||||
easy_apply: bool = False, # linkedin
|
||||
results_wanted: int = 15,
|
||||
country_indeed: str = "usa",
|
||||
hyperlinks: bool = False,
|
||||
proxy: Optional[str] = None,
|
||||
offset: Optional[int] = 0
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Asynchronously scrapes job data from multiple job sites.
|
||||
Simultaneously scrapes job data from multiple job sites.
|
||||
:return: results_wanted: pandas dataframe containing job data
|
||||
"""
|
||||
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
raise Exception(f"Invalid job type: {value_str}")
|
||||
|
||||
job_type = get_enum_from_value(job_type) if job_type else None
|
||||
|
||||
if type(site_name) == str:
|
||||
site_name = _map_str_to_site(site_name)
|
||||
site_type = [_map_str_to_site(site_name)]
|
||||
else: #: if type(site_name) == list
|
||||
site_type = [
|
||||
_map_str_to_site(site) if type(site) == str else site_name
|
||||
for site in site_name
|
||||
]
|
||||
|
||||
country_enum = Country.from_string(country)
|
||||
country_enum = Country.from_string(country_indeed)
|
||||
|
||||
site_type = [site_name] if type(site_name) == Site else site_name
|
||||
scraper_input = ScraperInput(
|
||||
site_type=site_type,
|
||||
country=country_enum,
|
||||
@@ -51,71 +72,102 @@ def scrape_jobs(
|
||||
job_type=job_type,
|
||||
easy_apply=easy_apply,
|
||||
results_wanted=results_wanted,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||
scraper_class = SCRAPER_MAPPING[site]
|
||||
scraper = scraper_class()
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
scraper = scraper_class(proxy=proxy)
|
||||
|
||||
try:
|
||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
||||
raise lie
|
||||
except Exception as e:
|
||||
# unhandled exceptions
|
||||
if site == Site.LINKEDIN:
|
||||
raise LinkedInException()
|
||||
if site == Site.INDEED:
|
||||
raise IndeedException()
|
||||
if site == Site.ZIP_RECRUITER:
|
||||
raise ZipRecruiterException()
|
||||
else:
|
||||
raise e
|
||||
return site.value, scraped_data
|
||||
|
||||
results = {}
|
||||
for site in scraper_input.site_type:
|
||||
site_to_jobs_dict = {}
|
||||
|
||||
def worker(site):
|
||||
site_value, scraped_data = scrape_site(site)
|
||||
results[site_value] = scraped_data
|
||||
return site_value, scraped_data
|
||||
|
||||
dfs = []
|
||||
with ThreadPoolExecutor() as executor:
|
||||
future_to_site = {
|
||||
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||
}
|
||||
|
||||
for site, job_response in results.items():
|
||||
for future in concurrent.futures.as_completed(future_to_site):
|
||||
site_value, scraped_data = future.result()
|
||||
site_to_jobs_dict[site_value] = scraped_data
|
||||
|
||||
jobs_dfs: List[pd.DataFrame] = []
|
||||
|
||||
for site, job_response in site_to_jobs_dict.items():
|
||||
for job in job_response.jobs:
|
||||
data = job.dict()
|
||||
data["site"] = site
|
||||
data["company"] = data["company_name"]
|
||||
if data["job_type"]:
|
||||
job_data = job.dict()
|
||||
job_data[
|
||||
"job_url_hyper"
|
||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||
job_data["site"] = site
|
||||
job_data["company"] = job_data["company_name"]
|
||||
if job_data["job_type"]:
|
||||
# Take the first value from the job type tuple
|
||||
data["job_type"] = data["job_type"].value[0]
|
||||
job_data["job_type"] = job_data["job_type"].value[0]
|
||||
else:
|
||||
data["job_type"] = None
|
||||
job_data["job_type"] = None
|
||||
|
||||
data["location"] = Location(**data["location"]).display_location()
|
||||
job_data["location"] = Location(**job_data["location"]).display_location()
|
||||
|
||||
compensation_obj = data.get("compensation")
|
||||
compensation_obj = job_data.get("compensation")
|
||||
if compensation_obj and isinstance(compensation_obj, dict):
|
||||
data["interval"] = (
|
||||
job_data["interval"] = (
|
||||
compensation_obj.get("interval").value
|
||||
if compensation_obj.get("interval")
|
||||
else None
|
||||
)
|
||||
data["min_amount"] = compensation_obj.get("min_amount")
|
||||
data["max_amount"] = compensation_obj.get("max_amount")
|
||||
data["currency"] = compensation_obj.get("currency", "USD")
|
||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||
else:
|
||||
data["interval"] = None
|
||||
data["min_amount"] = None
|
||||
data["max_amount"] = None
|
||||
data["currency"] = None
|
||||
job_data["interval"] = None
|
||||
job_data["min_amount"] = None
|
||||
job_data["max_amount"] = None
|
||||
job_data["currency"] = None
|
||||
|
||||
job_df = pd.DataFrame([data])
|
||||
dfs.append(job_df)
|
||||
job_df = pd.DataFrame([job_data])
|
||||
jobs_dfs.append(job_df)
|
||||
|
||||
if dfs:
|
||||
df = pd.concat(dfs, ignore_index=True)
|
||||
desired_order = [
|
||||
if jobs_dfs:
|
||||
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
||||
desired_order: List[str] = [
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"site",
|
||||
"title",
|
||||
"company",
|
||||
"location",
|
||||
"job_type",
|
||||
"date_posted",
|
||||
"interval",
|
||||
"benefits",
|
||||
"min_amount",
|
||||
"max_amount",
|
||||
"currency",
|
||||
"job_url",
|
||||
"emails",
|
||||
"job_url_hyper" if hyperlinks else "job_url",
|
||||
"description",
|
||||
]
|
||||
df = df[desired_order]
|
||||
jobs_formatted_df = jobs_df[desired_order]
|
||||
else:
|
||||
df = pd.DataFrame()
|
||||
jobs_formatted_df = pd.DataFrame()
|
||||
|
||||
return df
|
||||
return jobs_formatted_df
|
||||
|
||||
@@ -170,7 +170,7 @@ class CompensationInterval(Enum):
|
||||
|
||||
|
||||
class Compensation(BaseModel):
|
||||
interval: CompensationInterval
|
||||
interval: Optional[CompensationInterval] = None
|
||||
min_amount: int = None
|
||||
max_amount: int = None
|
||||
currency: Optional[str] = "USD"
|
||||
@@ -186,25 +186,9 @@ class JobPost(BaseModel):
|
||||
job_type: Optional[JobType] = None
|
||||
compensation: Optional[Compensation] = None
|
||||
date_posted: Optional[date] = None
|
||||
benefits: Optional[str] = None
|
||||
emails: Optional[list[str]] = None
|
||||
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
success: bool
|
||||
error: str = None
|
||||
|
||||
total_results: Optional[int] = None
|
||||
|
||||
jobs: list[JobPost] = []
|
||||
|
||||
returned_results: int = None
|
||||
|
||||
@validator("returned_results", pre=True, always=True)
|
||||
def set_returned_results(cls, v, values):
|
||||
jobs_list = values.get("jobs")
|
||||
|
||||
if v is None:
|
||||
if jobs_list is not None:
|
||||
return len(jobs_list)
|
||||
else:
|
||||
return 0
|
||||
return v
|
||||
|
||||
@@ -2,11 +2,6 @@ from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
|
||||
from typing import List, Optional, Any
|
||||
|
||||
|
||||
class StatusException(Exception):
|
||||
def __init__(self, status_code: int):
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class Site(Enum):
|
||||
LINKEDIN = "linkedin"
|
||||
INDEED = "indeed"
|
||||
@@ -23,21 +18,15 @@ class ScraperInput(BaseModel):
|
||||
is_remote: bool = False
|
||||
job_type: Optional[JobType] = None
|
||||
easy_apply: bool = None # linkedin
|
||||
offset: int = 0
|
||||
|
||||
results_wanted: int = 15
|
||||
|
||||
|
||||
class CommonResponse(BaseModel):
|
||||
status: Optional[str]
|
||||
error: Optional[str]
|
||||
linkedin: Optional[Any] = None
|
||||
indeed: Optional[Any] = None
|
||||
zip_recruiter: Optional[Any] = None
|
||||
|
||||
|
||||
class Scraper:
|
||||
def __init__(self, site: Site):
|
||||
def __init__(self, site: Site, proxy: Optional[List[str]] = None):
|
||||
self.site = site
|
||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
...
|
||||
|
||||
18
src/jobspy/scrapers/exceptions.py
Normal file
18
src/jobspy/scrapers/exceptions.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
jobspy.scrapers.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains the set of Scrapers' exceptions.
|
||||
"""
|
||||
|
||||
|
||||
class LinkedInException(Exception):
|
||||
"""Failed to scrape LinkedIn"""
|
||||
|
||||
|
||||
class IndeedException(Exception):
|
||||
"""Failed to scrape Indeed"""
|
||||
|
||||
|
||||
class ZipRecruiterException(Exception):
|
||||
"""Failed to scrape ZipRecruiter"""
|
||||
@@ -1,8 +1,13 @@
|
||||
"""
|
||||
jobspy.scrapers.indeed
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape Indeed.
|
||||
"""
|
||||
import re
|
||||
import math
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
@@ -12,6 +17,7 @@ from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from ..exceptions import IndeedException
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
@@ -20,26 +26,30 @@ from ...jobs import (
|
||||
JobResponse,
|
||||
JobType,
|
||||
)
|
||||
from .. import Scraper, ScraperInput, Site, Country, StatusException
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
|
||||
|
||||
class ParsingException(Exception):
|
||||
pass
|
||||
def extract_emails_from_text(text: str) -> Optional[list[str]]:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
class IndeedScraper(Scraper):
|
||||
def __init__(self):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes IndeedScraper with the Indeed job search url
|
||||
"""
|
||||
self.url = None
|
||||
self.country = None
|
||||
site = Site(Site.INDEED)
|
||||
super().__init__(site)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 15
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
||||
) -> tuple[list[JobPost], int]:
|
||||
"""
|
||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||
@@ -52,13 +62,13 @@ class IndeedScraper(Scraper):
|
||||
domain = self.country.domain_value
|
||||
self.url = f"https://{domain}.indeed.com"
|
||||
|
||||
job_list = []
|
||||
job_list: list[JobPost] = []
|
||||
|
||||
params = {
|
||||
"q": scraper_input.search_term,
|
||||
"l": scraper_input.location,
|
||||
"filter": 0,
|
||||
"start": 0 + page * 10,
|
||||
"start": scraper_input.offset + page * 10,
|
||||
}
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
@@ -71,17 +81,26 @@ class IndeedScraper(Scraper):
|
||||
|
||||
if sc_values:
|
||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||
response = session.get(self.url + "/jobs", params=params, allow_redirects=True)
|
||||
# print(response.status_code)
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
raise StatusException(response.status_code)
|
||||
try:
|
||||
response = session.get(
|
||||
f"{self.url}/jobs",
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
proxy=self.proxy,
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code not in range(200, 400):
|
||||
raise IndeedException(
|
||||
f"bad response with status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
raise IndeedException("bad proxy")
|
||||
raise IndeedException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
with open("text2.html", "w", encoding="utf-8") as f:
|
||||
f.write(str(soup))
|
||||
if "did not match any jobs" in str(soup):
|
||||
raise ParsingException("Search did not match any jobs")
|
||||
if "did not match any jobs" in response.text:
|
||||
raise IndeedException("Parsing exception: Search did not match any jobs")
|
||||
|
||||
jobs = IndeedScraper.parse_jobs(
|
||||
soup
|
||||
@@ -89,11 +108,11 @@ class IndeedScraper(Scraper):
|
||||
total_num_jobs = IndeedScraper.total_jobs(soup)
|
||||
|
||||
if (
|
||||
not jobs.get("metaData", {})
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
not jobs.get("metaData", {})
|
||||
.get("mosaicProviderJobCardsModel", {})
|
||||
.get("results")
|
||||
):
|
||||
raise Exception("No jobs found.")
|
||||
raise IndeedException("No jobs found.")
|
||||
|
||||
def process_job(job) -> Optional[JobPost]:
|
||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
||||
@@ -125,9 +144,10 @@ class IndeedScraper(Scraper):
|
||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||
|
||||
description = self.get_description(job_url, session)
|
||||
emails = extract_emails_from_text(description)
|
||||
with io.StringIO(job["snippet"]) as f:
|
||||
soup = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup.find_all("li")
|
||||
soup_io = BeautifulSoup(f, "html.parser")
|
||||
li_elements = soup_io.find_all("li")
|
||||
if description is None and li_elements:
|
||||
description = " ".join(li.text for li in li_elements)
|
||||
|
||||
@@ -140,6 +160,7 @@ class IndeedScraper(Scraper):
|
||||
state=job.get("jobLocationState"),
|
||||
country=self.country,
|
||||
),
|
||||
emails=extract_emails_from_text(description),
|
||||
job_type=job_type,
|
||||
compensation=compensation,
|
||||
date_posted=date_posted,
|
||||
@@ -168,52 +189,33 @@ class IndeedScraper(Scraper):
|
||||
)
|
||||
|
||||
pages_to_process = (
|
||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||
)
|
||||
|
||||
try:
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
||||
for page in range(1, pages_to_process + 1)
|
||||
]
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
|
||||
job_list += jobs
|
||||
except StatusException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed returned status code {e.status_code}",
|
||||
)
|
||||
|
||||
except ParsingException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed failed to parse response: {e}",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"LinkedIn failed to scrape: {e}\n{traceback.format_exc()}")
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Indeed failed to scrape: {e}",
|
||||
)
|
||||
job_list += jobs
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> str:
|
||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> Optional[str]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
@@ -227,14 +229,12 @@ class IndeedScraper(Scraper):
|
||||
|
||||
try:
|
||||
response = session.get(
|
||||
formatted_url, allow_redirects=True, timeout_seconds=5
|
||||
formatted_url, allow_redirects=True, timeout_seconds=5, proxy=self.proxy
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
print("The request timed out.")
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
if response.status_code not in range(200, 400):
|
||||
print("status code not in range")
|
||||
return None
|
||||
|
||||
raw_description = response.json()["body"]["jobInfoWrapperModel"][
|
||||
@@ -258,14 +258,17 @@ class IndeedScraper(Scraper):
|
||||
label = taxonomy["attributes"][0].get("label")
|
||||
if label:
|
||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||
# print(f"Debug: job_type_str = {job_type_str}")
|
||||
return IndeedScraper.get_enum_from_value(job_type_str)
|
||||
return IndeedScraper.get_enum_from_job_type(job_type_str)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
def get_enum_from_job_type(job_type_str):
|
||||
"""
|
||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
"""
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
|
||||
@@ -286,9 +289,9 @@ class IndeedScraper(Scraper):
|
||||
|
||||
for tag in script_tags:
|
||||
if (
|
||||
tag.string
|
||||
and "mosaic.providerData" in tag.string
|
||||
and "mosaic-provider-jobcards" in tag.string
|
||||
tag.string
|
||||
and "mosaic.providerData" in tag.string
|
||||
and "mosaic-provider-jobcards" in tag.string
|
||||
):
|
||||
return tag
|
||||
return None
|
||||
@@ -304,9 +307,9 @@ class IndeedScraper(Scraper):
|
||||
jobs = json.loads(m.group(1).strip())
|
||||
return jobs
|
||||
else:
|
||||
raise ParsingException("Could not find mosaic provider job cards data")
|
||||
raise IndeedException("Could not find mosaic provider job cards data")
|
||||
else:
|
||||
raise ParsingException(
|
||||
raise IndeedException(
|
||||
"Could not find a script tag containing mosaic provider data"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,29 +1,50 @@
|
||||
from typing import Optional, Tuple
|
||||
"""
|
||||
jobspy.scrapers.linkedin
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape LinkedIn.
|
||||
"""
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
import time
|
||||
import re
|
||||
from requests.exceptions import ProxyError
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from threading import Lock
|
||||
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import LinkedInException
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Location,
|
||||
JobResponse,
|
||||
JobType,
|
||||
Compensation,
|
||||
CompensationInterval,
|
||||
)
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> Optional[list[str]]:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
class LinkedInScraper(Scraper):
|
||||
def __init__(self):
|
||||
MAX_RETRIES = 3
|
||||
DELAY = 10
|
||||
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
site = Site(Site.LINKEDIN)
|
||||
self.country = "worldwide"
|
||||
self.url = "https://www.linkedin.com"
|
||||
super().__init__(site)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
@@ -31,12 +52,12 @@ class LinkedInScraper(Scraper):
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
self.country = "worldwide"
|
||||
job_list: list[JobPost] = []
|
||||
seen_urls = set()
|
||||
page, processed_jobs, job_count = 0, 0, 0
|
||||
url_lock = Lock()
|
||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||
|
||||
def job_type_code(job_type):
|
||||
def job_type_code(job_type_enum):
|
||||
mapping = {
|
||||
JobType.FULL_TIME: "F",
|
||||
JobType.PART_TIME: "P",
|
||||
@@ -45,121 +66,134 @@ class LinkedInScraper(Scraper):
|
||||
JobType.TEMPORARY: "T",
|
||||
}
|
||||
|
||||
return mapping.get(job_type, "")
|
||||
return mapping.get(job_type_enum, "")
|
||||
|
||||
with requests.Session() as session:
|
||||
while len(job_list) < scraper_input.results_wanted:
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": page,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"distance": scraper_input.distance,
|
||||
"f_WT": 2 if scraper_input.is_remote else None,
|
||||
"f_JT": job_type_code(scraper_input.job_type)
|
||||
if scraper_input.job_type
|
||||
else None,
|
||||
"pageNum": 0,
|
||||
page: page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
response = session.get(
|
||||
f"{self.url}/jobs/search", params=params, allow_redirects=True
|
||||
)
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
if response.status_code != 200:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"Response returned {response.status_code}",
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
retries = 0
|
||||
while retries < self.MAX_RETRIES:
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
proxies=self.proxy,
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
if page == 0:
|
||||
job_count_text = soup.find(
|
||||
"span", class_="results-context-header__job-count"
|
||||
).text
|
||||
job_count = int("".join(filter(str.isdigit, job_count_text)))
|
||||
|
||||
for job_card in soup.find_all(
|
||||
"div",
|
||||
class_="base-card relative w-full hover:no-underline focus:no-underline base-card--link base-search-card base-search-card--link job-search-card",
|
||||
):
|
||||
processed_jobs += 1
|
||||
data_entity_urn = job_card.get("data-entity-urn", "")
|
||||
job_id = (
|
||||
data_entity_urn.split(":")[-1] if data_entity_urn else "N/A"
|
||||
)
|
||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||
if job_url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
job_info = job_card.find("div", class_="base-search-card__info")
|
||||
if job_info is None:
|
||||
continue
|
||||
title_tag = job_info.find("h3", class_="base-search-card__title")
|
||||
title = title_tag.text.strip() if title_tag else "N/A"
|
||||
|
||||
company_tag = job_info.find("a", class_="hidden-nested-link")
|
||||
company = company_tag.text.strip() if company_tag else "N/A"
|
||||
|
||||
metadata_card = job_info.find(
|
||||
"div", class_="base-search-card__metadata"
|
||||
)
|
||||
location: Location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = metadata_card.find(
|
||||
"time", class_="job-search-card__listdate"
|
||||
)
|
||||
description, job_type = LinkedInScraper.get_description(job_url)
|
||||
if datetime_tag:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
else:
|
||||
date_posted = None
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_type=job_type,
|
||||
compensation=Compensation(
|
||||
interval=CompensationInterval.YEARLY, currency=None
|
||||
),
|
||||
)
|
||||
job_list.append(job_post)
|
||||
if (
|
||||
len(job_list) >= scraper_input.results_wanted
|
||||
or processed_jobs >= job_count
|
||||
):
|
||||
break
|
||||
if (
|
||||
len(job_list) >= scraper_input.results_wanted
|
||||
or processed_jobs >= job_count
|
||||
):
|
||||
break
|
||||
except requests.HTTPError as e:
|
||||
if hasattr(e, 'response') and e.response is not None:
|
||||
if e.response.status_code == 429:
|
||||
time.sleep(self.DELAY)
|
||||
retries += 1
|
||||
continue
|
||||
else:
|
||||
raise LinkedInException(f"bad response status code: {e.response.status_code}")
|
||||
else:
|
||||
raise
|
||||
except ProxyError as e:
|
||||
raise LinkedInException("bad proxy")
|
||||
except Exception as e:
|
||||
raise LinkedInException(str(e))
|
||||
else:
|
||||
# Raise an exception if the maximum number of retries is reached
|
||||
raise LinkedInException("Max retries reached, failed to get a valid response")
|
||||
|
||||
page += 1
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = []
|
||||
for job_card in soup.find_all("div", class_="base-search-card"):
|
||||
job_url = None
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
href = href_tag.attrs["href"].split("?")[0]
|
||||
job_id = href.split("-")[-1]
|
||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||
|
||||
with url_lock:
|
||||
if job_url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(job_url)
|
||||
|
||||
futures.append(executor.submit(self.process_job, job_card, job_url))
|
||||
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
job_post = future.result()
|
||||
if job_post:
|
||||
job_list.append(job_post)
|
||||
except Exception as e:
|
||||
raise LinkedInException("Exception occurred while processing jobs")
|
||||
page += 25
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=job_count,
|
||||
)
|
||||
return job_response
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@staticmethod
|
||||
def get_description(job_page_url: str) -> Optional[str]:
|
||||
def process_job(self, job_card: Tag, job_url: str) -> Optional[JobPost]:
|
||||
title_tag = job_card.find("span", class_="sr-only")
|
||||
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||
|
||||
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||
company_a_tag = company_tag.find("a") if company_tag else None
|
||||
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||
|
||||
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||
location = self.get_location(metadata_card)
|
||||
|
||||
datetime_tag = metadata_card.find("time", class_="job-search-card__listdate") if metadata_card else None
|
||||
date_posted = None
|
||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||
datetime_str = datetime_tag["datetime"]
|
||||
try:
|
||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||
except Exception as e:
|
||||
date_posted = None
|
||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||
|
||||
description, job_type = self.get_job_description(job_url)
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=location,
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
job_type=job_type,
|
||||
benefits=benefits,
|
||||
emails=extract_emails_from_text(description)
|
||||
)
|
||||
|
||||
def get_job_description(self, job_page_url: str) -> tuple[None, None] | tuple[
|
||||
str | None, tuple[str | None, JobType | None]]:
|
||||
"""
|
||||
Retrieves job description by going to the job page url
|
||||
:param job_page_url:
|
||||
:return: description or None
|
||||
"""
|
||||
response = requests.get(job_page_url, allow_redirects=True)
|
||||
if response.status_code not in range(200, 400):
|
||||
try:
|
||||
response = requests.get(job_page_url, timeout=5, proxies=self.proxy)
|
||||
response.raise_for_status()
|
||||
except Exception as e:
|
||||
return None, None
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
@@ -167,19 +201,19 @@ class LinkedInScraper(Scraper):
|
||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||
)
|
||||
|
||||
text_content = None
|
||||
description = None
|
||||
if div_content:
|
||||
text_content = " ".join(div_content.get_text().split()).strip()
|
||||
description = " ".join(div_content.get_text().split()).strip()
|
||||
|
||||
def get_job_type(
|
||||
soup: BeautifulSoup,
|
||||
) -> Tuple[Optional[str], Optional[JobType]]:
|
||||
soup_job_type: BeautifulSoup,
|
||||
) -> JobType | None:
|
||||
"""
|
||||
Gets the job type from job page
|
||||
:param soup:
|
||||
:param soup_job_type:
|
||||
:return: JobType
|
||||
"""
|
||||
h3_tag = soup.find(
|
||||
h3_tag = soup_job_type.find(
|
||||
"h3",
|
||||
class_="description__job-criteria-subheader",
|
||||
string=lambda text: "Employment type" in text,
|
||||
@@ -198,7 +232,7 @@ class LinkedInScraper(Scraper):
|
||||
|
||||
return LinkedInScraper.get_enum_from_value(employment_type)
|
||||
|
||||
return text_content, get_job_type(soup)
|
||||
return description, get_job_type(soup)
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
@@ -229,3 +263,9 @@ class LinkedInScraper(Scraper):
|
||||
)
|
||||
|
||||
return location
|
||||
|
||||
def extract_emails_from_text(text: str) -> Optional[list[str]]:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
@@ -1,17 +1,24 @@
|
||||
"""
|
||||
jobspy.scrapers.ziprecruiter
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains routines to scrape ZipRecruiter.
|
||||
"""
|
||||
import math
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Tuple, Any
|
||||
from urllib.parse import urlparse, parse_qs, urlunparse
|
||||
|
||||
import tls_client
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from .. import Scraper, ScraperInput, Site, StatusException
|
||||
from .. import Scraper, ScraperInput, Site
|
||||
from ..exceptions import ZipRecruiterException
|
||||
from ...jobs import (
|
||||
JobPost,
|
||||
Compensation,
|
||||
@@ -22,15 +29,21 @@ from ...jobs import (
|
||||
Country,
|
||||
)
|
||||
|
||||
def extract_emails_from_text(text: str) -> Optional[list[str]]:
|
||||
if not text:
|
||||
return None
|
||||
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||
return email_regex.findall(text)
|
||||
|
||||
|
||||
class ZipRecruiterScraper(Scraper):
|
||||
def __init__(self):
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the ZipRecruiter job search url
|
||||
"""
|
||||
site = Site(Site.ZIP_RECRUITER)
|
||||
self.url = "https://www.ziprecruiter.com"
|
||||
super().__init__(site)
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 20
|
||||
self.seen_urls = set()
|
||||
@@ -38,83 +51,69 @@ class ZipRecruiterScraper(Scraper):
|
||||
client_identifier="chrome112", random_tls_extension_order=True
|
||||
)
|
||||
|
||||
def scrape_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> tuple[list[JobPost], int | None]:
|
||||
def find_jobs_in_page(
|
||||
self, scraper_input: ScraperInput, page: int
|
||||
) -> list[JobPost]:
|
||||
"""
|
||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||
:param scraper_input:
|
||||
:param page:
|
||||
:param session:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
:return: jobs found on page
|
||||
"""
|
||||
|
||||
job_list = []
|
||||
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
job_type_value = scraper_input.job_type.value
|
||||
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"page": page,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
response = self.session.get(
|
||||
self.url + "/jobs-search",
|
||||
headers=ZipRecruiterScraper.headers(),
|
||||
params=params,
|
||||
allow_redirects=True,
|
||||
)
|
||||
|
||||
# print(response.status_code)
|
||||
if response.status_code != 200:
|
||||
raise StatusException(response.status_code)
|
||||
|
||||
html_string = response.text
|
||||
soup = BeautifulSoup(html_string, "html.parser")
|
||||
|
||||
script_tag = soup.find("script", {"id": "js_variables"})
|
||||
data = json.loads(script_tag.string)
|
||||
|
||||
if page == 1:
|
||||
job_count = int(data["totalJobCount"].replace(",", ""))
|
||||
job_list: list[JobPost] = []
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.url}/jobs-search",
|
||||
headers=ZipRecruiterScraper.headers(),
|
||||
params=ZipRecruiterScraper.add_params(scraper_input, page),
|
||||
allow_redirects=True,
|
||||
proxy=self.proxy,
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
if "Proxy responded with non 200 code" in str(e):
|
||||
raise ZipRecruiterException("bad proxy")
|
||||
raise ZipRecruiterException(str(e))
|
||||
else:
|
||||
job_count = None
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
js_tag = soup.find("script", {"id": "js_variables"})
|
||||
|
||||
if js_tag:
|
||||
page_json = json.loads(js_tag.string)
|
||||
jobs_list = page_json.get("jobList")
|
||||
if jobs_list:
|
||||
page_variant = "javascript"
|
||||
# print('type javascript', len(jobs_list))
|
||||
else:
|
||||
page_variant = "html_2"
|
||||
jobs_list = soup.find_all("div", {"class": "job_content"})
|
||||
# print('type 2 html', len(jobs_list))
|
||||
else:
|
||||
page_variant = "html_1"
|
||||
jobs_list = soup.find_all("li", {"class": "job-listing"})
|
||||
# print('type 1 html', len(jobs_list))
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
if "jobList" in data and data["jobList"]:
|
||||
jobs_js = data["jobList"]
|
||||
if page_variant == "javascript":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_js, job) for job in jobs_js
|
||||
executor.submit(self.process_job_javascript, job)
|
||||
for job in jobs_list
|
||||
]
|
||||
else:
|
||||
jobs_html = soup.find_all("div", {"class": "job_content"})
|
||||
elif page_variant == "html_1":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_html, job) for job in jobs_html
|
||||
executor.submit(self.process_job_html_1, job) for job in jobs_list
|
||||
]
|
||||
elif page_variant == "html_2":
|
||||
job_results = [
|
||||
executor.submit(self.process_job_html_2, job) for job in jobs_list
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
|
||||
return job_list, job_count
|
||||
return job_list
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
@@ -122,57 +121,35 @@ class ZipRecruiterScraper(Scraper):
|
||||
:param scraper_input:
|
||||
:return: job_response
|
||||
"""
|
||||
|
||||
start_page = (scraper_input.offset // self.jobs_per_page) + 1 if scraper_input.offset else 1
|
||||
#: get first page to initialize session
|
||||
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, start_page)
|
||||
pages_to_process = max(
|
||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||
)
|
||||
|
||||
try:
|
||||
#: get first page to initialize session
|
||||
job_list, total_results = self.scrape_page(scraper_input, 1)
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.find_jobs_in_page, scraper_input, page)
|
||||
for page in range(start_page + 1, start_page + pages_to_process + 2)
|
||||
]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self.scrape_page, scraper_input, page)
|
||||
for page in range(2, pages_to_process + 1)
|
||||
]
|
||||
for future in futures:
|
||||
jobs = future.result()
|
||||
|
||||
for future in futures:
|
||||
jobs, _ = future.result()
|
||||
job_list += jobs
|
||||
|
||||
job_list += jobs
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
except StatusException as e:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"ZipRecruiter returned status code {e.status_code}",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"ZipRecruiter failed to scrape: {e}\n{traceback.format_exc()}")
|
||||
return JobResponse(
|
||||
success=False,
|
||||
error=f"ZipRecruiter failed to scrape: {e}",
|
||||
)
|
||||
|
||||
#: note: this does not handle if the results are more or less than the results_wanted
|
||||
|
||||
if len(job_list) > scraper_input.results_wanted:
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
|
||||
job_response = JobResponse(
|
||||
success=True,
|
||||
jobs=job_list,
|
||||
total_results=total_results,
|
||||
)
|
||||
return job_response
|
||||
|
||||
def process_job_html(self, job: Tag) -> Optional[JobPost]:
|
||||
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
|
||||
"""
|
||||
Parses a job from the job content tag
|
||||
:param job: BeautifulSoup Tag for one job post
|
||||
:return JobPost
|
||||
TODO this method isnt finished due to not encountering this type of html often
|
||||
"""
|
||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
||||
job_url = self.cleanurl(job.find("a", {"class": "job_link"})["href"])
|
||||
if job_url in self.seen_urls:
|
||||
return None
|
||||
|
||||
@@ -180,8 +157,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
if updated_job_url is not None:
|
||||
job_url = updated_job_url
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
if description is None:
|
||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
||||
|
||||
@@ -189,7 +165,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
job_type = None
|
||||
if job_type_element:
|
||||
job_type_text = (
|
||||
job_type_element.text.strip().lower().replace("-", "").replace(" ", "")
|
||||
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
||||
|
||||
@@ -204,26 +180,68 @@ class ZipRecruiterScraper(Scraper):
|
||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
emails=extract_emails_from_text(description),
|
||||
)
|
||||
return job_post
|
||||
|
||||
def process_job_js(self, job: dict) -> JobPost:
|
||||
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
|
||||
"""
|
||||
Parses a job from the job content tag for a second variat of HTML that ZR uses
|
||||
:param job: BeautifulSoup Tag for one job post
|
||||
:return JobPost
|
||||
"""
|
||||
job_url = self.cleanurl(job.find("a", class_="job_link")["href"])
|
||||
title = job.find("h2", class_="title").text
|
||||
company = job.find("a", class_="company_name").text.strip()
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
if description is None:
|
||||
description = job.find("p", class_="job_snippet").get_text().strip()
|
||||
|
||||
job_type_text = job.find("li", class_="perk_item perk_type")
|
||||
job_type = None
|
||||
if job_type_text:
|
||||
job_type_text = (
|
||||
job_type_text.get_text()
|
||||
.strip()
|
||||
.lower()
|
||||
.replace("-", "")
|
||||
.replace(" ", "")
|
||||
)
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
||||
|
||||
job_post = JobPost(
|
||||
title=title,
|
||||
description=description,
|
||||
company_name=company,
|
||||
location=ZipRecruiterScraper.get_location(job),
|
||||
job_type=job_type,
|
||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
||||
date_posted=date_posted,
|
||||
job_url=job_url,
|
||||
)
|
||||
return job_post
|
||||
|
||||
def process_job_javascript(self, job: dict) -> JobPost:
|
||||
title = job.get("Title")
|
||||
description = BeautifulSoup(
|
||||
job.get("Snippet", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
job_url = self.cleanurl(job.get("JobURL"))
|
||||
|
||||
description, updated_job_url = self.get_description(job_url)
|
||||
# job_url = updated_job_url if updated_job_url else job_url
|
||||
if description is None:
|
||||
description = BeautifulSoup(
|
||||
job.get("Snippet", "").strip(), "html.parser"
|
||||
).get_text()
|
||||
|
||||
company = job.get("OrgName")
|
||||
location = Location(
|
||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
||||
)
|
||||
try:
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("EmploymentType", "").replace("-", "_").lower()
|
||||
)
|
||||
except ValueError:
|
||||
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
|
||||
return None
|
||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||
job.get("EmploymentType", "").replace("-", "").lower()
|
||||
)
|
||||
|
||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
||||
salary_parts = formatted_salary.split(" ")
|
||||
@@ -259,7 +277,6 @@ class ZipRecruiterScraper(Scraper):
|
||||
date_posted = date_posted_obj.date()
|
||||
else:
|
||||
date_posted = date.today()
|
||||
job_url = job.get("JobURL")
|
||||
|
||||
return JobPost(
|
||||
title=title,
|
||||
@@ -273,17 +290,11 @@ class ZipRecruiterScraper(Scraper):
|
||||
)
|
||||
return job_post
|
||||
|
||||
@staticmethod
|
||||
def get_enum_from_value(value_str):
|
||||
for job_type in JobType:
|
||||
if value_str in job_type.value:
|
||||
return job_type
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
a = True
|
||||
return job_type
|
||||
return None
|
||||
|
||||
@@ -295,15 +306,17 @@ class ZipRecruiterScraper(Scraper):
|
||||
:return: description or None, response url
|
||||
"""
|
||||
try:
|
||||
response = self.session.get(
|
||||
response = requests.get(
|
||||
job_page_url,
|
||||
headers=ZipRecruiterScraper.headers(),
|
||||
allow_redirects=True,
|
||||
timeout_seconds=5,
|
||||
timeout=5,
|
||||
proxies=self.proxy,
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
print("The request timed out.")
|
||||
return None
|
||||
if response.status_code not in range(200, 400):
|
||||
return None, None
|
||||
except Exception as e:
|
||||
return None, None
|
||||
|
||||
html_string = response.content
|
||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
||||
@@ -313,6 +326,36 @@ class ZipRecruiterScraper(Scraper):
|
||||
return job_description_div.text.strip(), response.url
|
||||
return None, response.url
|
||||
|
||||
@staticmethod
|
||||
def add_params(scraper_input, page) -> dict[str, str | Any]:
|
||||
params = {
|
||||
"search": scraper_input.search_term,
|
||||
"location": scraper_input.location,
|
||||
"page": page,
|
||||
"form": "jobs-landing",
|
||||
}
|
||||
job_type_value = None
|
||||
if scraper_input.job_type:
|
||||
if scraper_input.job_type.value == "fulltime":
|
||||
job_type_value = "full_time"
|
||||
elif scraper_input.job_type.value == "parttime":
|
||||
job_type_value = "part_time"
|
||||
else:
|
||||
job_type_value = scraper_input.job_type.value
|
||||
|
||||
if job_type_value:
|
||||
params[
|
||||
"refine_by_employment"
|
||||
] = f"employment_type:employment_type:{job_type_value}"
|
||||
|
||||
if scraper_input.is_remote:
|
||||
params["refine_by_location_type"] = "only_remote"
|
||||
|
||||
if scraper_input.distance:
|
||||
params["radius"] = scraper_input.distance
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def get_interval(interval_str: str):
|
||||
"""
|
||||
@@ -329,7 +372,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
return CompensationInterval(interval_str)
|
||||
|
||||
@staticmethod
|
||||
def get_date_posted(job: BeautifulSoup) -> Optional[datetime.date]:
|
||||
def get_date_posted(job: Tag) -> Optional[datetime.date]:
|
||||
"""
|
||||
Extracts the date a job was posted
|
||||
:param job
|
||||
@@ -355,7 +398,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_compensation(job: BeautifulSoup) -> Optional[Compensation]:
|
||||
def get_compensation(job: Tag) -> Optional[Compensation]:
|
||||
"""
|
||||
Parses the compensation tag from the job BeautifulSoup object
|
||||
:param job
|
||||
@@ -396,7 +439,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
return create_compensation_object(pay)
|
||||
|
||||
@staticmethod
|
||||
def get_location(job: BeautifulSoup) -> Location:
|
||||
def get_location(job: Tag) -> Location:
|
||||
"""
|
||||
Extracts the job location from BeatifulSoup object
|
||||
:param job:
|
||||
@@ -423,3 +466,9 @@ class ZipRecruiterScraper(Scraper):
|
||||
return {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def cleanurl(url):
|
||||
parsed_url = urlparse(url)
|
||||
|
||||
return urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))
|
||||
|
||||
12
src/tests/test_all.py
Normal file
12
src/tests/test_all.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_all():
|
||||
result = scrape_jobs(
|
||||
site_name=["linkedin", "indeed", "zip_recruiter"],
|
||||
search_term="software engineer",
|
||||
results_wanted=5,
|
||||
)
|
||||
|
||||
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"
|
||||
@@ -1,4 +1,5 @@
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_indeed():
|
||||
@@ -6,4 +7,4 @@ def test_indeed():
|
||||
site_name="indeed",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert result is not None
|
||||
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from jobspy import scrape_jobs
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_linkedin():
|
||||
@@ -6,4 +7,4 @@ def test_linkedin():
|
||||
site_name="linkedin",
|
||||
search_term="software engineer",
|
||||
)
|
||||
assert result is not None
|
||||
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from jobspy import scrape_jobs
|
||||
from ..jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def test_ziprecruiter():
|
||||
@@ -7,4 +8,4 @@ def test_ziprecruiter():
|
||||
search_term="software engineer",
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"
|
||||
|
||||
Reference in New Issue
Block a user