mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3fc222eb5 | ||
|
|
b303b3f841 | ||
|
|
1a0c75f323 | ||
|
|
e2f6885d61 | ||
|
|
8d65d1b652 | ||
|
|
216d3fd39f | ||
|
|
d3bfdc0a6e | ||
|
|
ba5ed803ca | ||
|
|
ff1eb0f7b0 | ||
|
|
f2cc74b7f2 | ||
|
|
5e71866630 | ||
|
|
4e67c6e5a3 | ||
|
|
caf655525a | ||
|
|
90fa4a4c4f | ||
|
|
e5353e604d | ||
|
|
628f4dee9c | ||
|
|
2e59ab03e3 | ||
|
|
008ca61e12 | ||
|
|
8fc4c3bf90 | ||
|
|
bff39a2625 | ||
|
|
c676050dc0 | ||
|
|
37976f7ec2 | ||
|
|
9fb2fdd80f | ||
|
|
af07c1ecbd | ||
|
|
286b9e1256 | ||
|
|
162dd40b0f | ||
|
|
558e352939 | ||
|
|
efad1a1b7d | ||
|
|
eaa481c2f4 | ||
|
|
b914aa6449 | ||
|
|
6adbfb8b29 | ||
|
|
a3b9dd50ff | ||
|
|
d3ba3a4878 | ||
|
|
f524789d74 | ||
|
|
f3890d4830 | ||
|
|
60c9728691 | ||
|
|
f79d975e5f | ||
|
|
d6368f909b | ||
|
|
6fcf7f666e | ||
|
|
4406f9350f | ||
|
|
ca5155f234 | ||
|
|
822a55783e |
42
.github/workflows/publish-to-pypi.yml
vendored
42
.github/workflows/publish-to-pypi.yml
vendored
@@ -7,27 +7,27 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Install poetry
|
- name: Install poetry
|
||||||
run: >-
|
run: >-
|
||||||
python3 -m
|
python3 -m
|
||||||
pip install
|
pip install
|
||||||
poetry
|
poetry
|
||||||
--user
|
--user
|
||||||
|
|
||||||
- name: Build distribution 📦
|
- name: Build distribution 📦
|
||||||
run: >-
|
run: >-
|
||||||
python3 -m
|
python3 -m
|
||||||
poetry
|
poetry
|
||||||
build
|
build
|
||||||
|
|
||||||
- name: Publish distribution 📦 to PyPI
|
- name: Publish distribution 📦 to PyPI
|
||||||
if: startsWith(github.ref, 'refs/tags')
|
if: startsWith(github.ref, 'refs/tags')
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
with:
|
with:
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
|||||||
/.idea
|
|
||||||
**/.DS_Store
|
|
||||||
/venv/
|
/venv/
|
||||||
/ven/
|
/.idea
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
**/.pytest_cache/
|
**/.pytest_cache/
|
||||||
|
/.ipynb_checkpoints/
|
||||||
|
**/output/
|
||||||
|
**/.DS_Store
|
||||||
*.pyc
|
*.pyc
|
||||||
.env
|
.env
|
||||||
dist
|
dist
|
||||||
/.ipynb_checkpoints/
|
|
||||||
140
README.md
140
README.md
@@ -1,63 +1,53 @@
|
|||||||
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||||
|
|
||||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||||
## Features
|
|
||||||
|
|
||||||
|
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||||
|
|
||||||
|
*Looking to build a data-focused software product?* **[Book a call](https://calendly.com/bunsly/15min)** *to
|
||||||
|
work with us.*
|
||||||
|
\
|
||||||
|
Check out another project we wrote: ***[HomeHarvest](https://github.com/Bunsly/HomeHarvest)** – a Python package
|
||||||
|
for real estate scraping*
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
||||||
- Aggregates the job postings in a Pandas DataFrame
|
- Aggregates the job postings in a Pandas DataFrame
|
||||||
|
- Proxy support (HTTP/S, SOCKS)
|
||||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=-yS3mgI5H-4)
|
|
||||||
|
|
||||||
|
|
||||||
|
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
||||||
|
Updated for release v1.1.3
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install python-jobspy
|
pip install python-jobspy
|
||||||
```
|
```
|
||||||
|
|
||||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from jobspy import scrape_jobs
|
from jobspy import scrape_jobs
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
jobs: pd.DataFrame = scrape_jobs(
|
jobs = scrape_jobs(
|
||||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
location="Dallas, TX",
|
location="Dallas, TX",
|
||||||
results_wanted=10,
|
results_wanted=10,
|
||||||
|
country_indeed='USA' # only needed for indeed
|
||||||
country_indeed='USA' # only needed for indeed
|
|
||||||
|
|
||||||
# use if you want to use a proxy
|
|
||||||
# proxy="socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
|
||||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
|
||||||
# proxy="https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
|
||||||
)
|
)
|
||||||
|
print(f"Found {len(jobs)} jobs")
|
||||||
pd.set_option('display.max_columns', None)
|
print(jobs.head())
|
||||||
pd.set_option('display.max_rows', None)
|
jobs.to_csv("jobs.csv", index=False) # / to_xlsx
|
||||||
pd.set_option('display.width', None)
|
|
||||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
|
||||||
|
|
||||||
#1 output
|
|
||||||
print(jobs)
|
|
||||||
print(errors)
|
|
||||||
|
|
||||||
#2 display in Jupyter Notebook
|
|
||||||
#display(jobs)
|
|
||||||
#display(errors)
|
|
||||||
|
|
||||||
#3 output to .csv
|
|
||||||
#result.jobs.to_csv('result.jobs.csv', index=False)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Output
|
### Output
|
||||||
|
|
||||||
```
|
```
|
||||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||||
@@ -67,7 +57,9 @@ linkedin Full-Stack Software Engineer Rain New York
|
|||||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||||
```
|
```
|
||||||
|
|
||||||
### Parameters for `scrape_jobs()`
|
### Parameters for `scrape_jobs()`
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
Required
|
Required
|
||||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
||||||
@@ -76,14 +68,16 @@ Optional
|
|||||||
├── location (int)
|
├── location (int)
|
||||||
├── distance (int): in miles
|
├── distance (int): in miles
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
├── job_type (enum): fulltime, parttime, internship, contract
|
||||||
|
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||||
├── is_remote (bool)
|
├── is_remote (bool)
|
||||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
||||||
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn
|
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn
|
||||||
├── country_indeed (enum): filters the country on Indeed
|
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||||
|
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### JobPost Schema
|
### JobPost Schema
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
JobPost
|
JobPost
|
||||||
├── title (str)
|
├── title (str)
|
||||||
@@ -94,69 +88,85 @@ JobPost
|
|||||||
│ ├── city (str)
|
│ ├── city (str)
|
||||||
│ ├── state (str)
|
│ ├── state (str)
|
||||||
├── description (str)
|
├── description (str)
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
├── job_type (str): fulltime, parttime, internship, contract
|
||||||
├── compensation (object)
|
├── compensation (object)
|
||||||
│ ├── interval (enum): yearly, monthly, weekly, daily, hourly
|
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
||||||
│ ├── min_amount (int)
|
│ ├── min_amount (int)
|
||||||
│ ├── max_amount (int)
|
│ ├── max_amount (int)
|
||||||
│ └── currency (enum)
|
│ └── currency (enum)
|
||||||
└── date_posted (date)
|
└── date_posted (date)
|
||||||
|
└── emails (str)
|
||||||
|
└── num_urgent_words (int)
|
||||||
|
└── is_remote (bool)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Exceptions
|
||||||
|
|
||||||
|
The following exceptions may be raised when using JobSpy:
|
||||||
|
|
||||||
|
* `LinkedInException`
|
||||||
|
* `IndeedException`
|
||||||
|
* `ZipRecruiterException`
|
||||||
|
|
||||||
## Supported Countries for Job Searching
|
## Supported Countries for Job Searching
|
||||||
|
|
||||||
|
|
||||||
### **LinkedIn**
|
### **LinkedIn**
|
||||||
|
|
||||||
LinkedIn searches globally & uses only the `location` parameter
|
LinkedIn searches globally & uses only the `location` parameter.
|
||||||
|
|
||||||
### **ZipRecruiter**
|
### **ZipRecruiter**
|
||||||
|
|
||||||
ZipRecruiter searches for jobs in US/Canada & uses only the `location` parameter
|
ZipRecruiter searches for jobs in **US/Canada** & uses only the `location` parameter.
|
||||||
|
|
||||||
|
|
||||||
### **Indeed**
|
### **Indeed**
|
||||||
For Indeed, the `country_indeed` parameter is required. Additionally, use the `location` parameter and include the city or state if necessary.
|
|
||||||
|
|
||||||
You can specify the following countries when searching on Indeed (use the exact name):
|
Indeed supports most countries, but the `country_indeed` parameter is required. Additionally, use the `location`
|
||||||
|
parameter to narrow down the location, e.g. city & state if necessary.
|
||||||
|
|
||||||
|
You can specify the following countries when searching on Indeed (use the exact name):
|
||||||
|
|
||||||
| | | | |
|
| | | | |
|
||||||
|------|------|------|------|
|
|----------------------|--------------|------------|----------------|
|
||||||
| Argentina | Australia | Austria | Bahrain |
|
| Argentina | Australia | Austria | Bahrain |
|
||||||
| Belgium | Brazil | Canada | Chile |
|
| Belgium | Brazil | Canada | Chile |
|
||||||
| China | Colombia | Costa Rica | Czech Republic |
|
| China | Colombia | Costa Rica | Czech Republic |
|
||||||
| Denmark | Ecuador | Egypt | Finland |
|
| Denmark | Ecuador | Egypt | Finland |
|
||||||
| France | Germany | Greece | Hong Kong |
|
| France | Germany | Greece | Hong Kong |
|
||||||
| Hungary | India | Indonesia | Ireland |
|
| Hungary | India | Indonesia | Ireland |
|
||||||
| Israel | Italy | Japan | Kuwait |
|
| Israel | Italy | Japan | Kuwait |
|
||||||
| Luxembourg | Malaysia | Mexico | Morocco |
|
| Luxembourg | Malaysia | Mexico | Morocco |
|
||||||
| Netherlands | New Zealand | Nigeria | Norway |
|
| Netherlands | New Zealand | Nigeria | Norway |
|
||||||
| Oman | Pakistan | Panama | Peru |
|
| Oman | Pakistan | Panama | Peru |
|
||||||
| Philippines | Poland | Portugal | Qatar |
|
| Philippines | Poland | Portugal | Qatar |
|
||||||
| Romania | Saudi Arabia | Singapore | South Africa |
|
| Romania | Saudi Arabia | Singapore | South Africa |
|
||||||
| South Korea | Spain | Sweden | Switzerland |
|
| South Korea | Spain | Sweden | Switzerland |
|
||||||
| Taiwan | Thailand | Turkey | Ukraine |
|
| Taiwan | Thailand | Turkey | Ukraine |
|
||||||
| United Arab Emirates | UK | USA | Uruguay |
|
| United Arab Emirates | UK | USA | Uruguay |
|
||||||
| Venezuela | Vietnam | | |
|
| Venezuela | Vietnam | | |
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Q: Encountering issues with your queries?**
|
**Q: Encountering issues with your queries?**
|
||||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems persist, [submit an issue](#).
|
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||||
|
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Q: Received a response code 429?**
|
**Q: Received a response code 429?**
|
||||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, **ZipRecruiter** is particularly aggressive with blocking. We recommend:
|
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||||
|
|
||||||
- Waiting a few seconds between requests.
|
- Waiting a few seconds between requests.
|
||||||
- Trying a VPN to change your IP address.
|
- Trying a VPN or proxy to change your IP address.
|
||||||
|
|
||||||
**Note:** Proxy support is in development and coming soon!
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
|
||||||
|
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
|
||||||
|
|
||||||
|
- Upgrade to a newer version of MacOS
|
||||||
|
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from jobspy import scrape_jobs\n",
|
"from jobspy import scrape_jobs\n",
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"from IPython.display import display, HTML\n"
|
"from IPython.display import display, HTML"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -34,18 +34,16 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# example 1 (no hyperlinks, USA)\n",
|
"# example 1 (no hyperlinks, USA)\n",
|
||||||
"jobs = scrape_jobs(\n",
|
"jobs = scrape_jobs(\n",
|
||||||
" site_name=[\"linkedin\", \"zip_recruiter\"],\n",
|
" site_name=[\"linkedin\"],\n",
|
||||||
" location='san francisco',\n",
|
" location='san francisco',\n",
|
||||||
" search_term=\"engineer\",\n",
|
" search_term=\"engineer\",\n",
|
||||||
" results_wanted=5,\n",
|
" results_wanted=5,\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # use if you want to use a proxy\n",
|
" # use if you want to use a proxy\n",
|
||||||
" # proxy=\"socks5://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
|
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||||
" # proxy=\"http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
|
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||||
" # proxy=\"https://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001\",\n",
|
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
||||||
"\n",
|
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
|
||||||
"display(jobs)"
|
"display(jobs)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -97,9 +95,6 @@
|
|||||||
" hyperlinks=True,\n",
|
" hyperlinks=True,\n",
|
||||||
" results_wanted=5,\n",
|
" results_wanted=5,\n",
|
||||||
" easy_apply=True\n",
|
" easy_apply=True\n",
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -125,11 +120,10 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
"# example 4 - international indeed (no zip_recruiter)\n",
|
||||||
"result = scrape_jobs(\n",
|
"jobs = scrape_jobs(\n",
|
||||||
" site_name=[\"indeed\"],\n",
|
" site_name=[\"indeed\"],\n",
|
||||||
" location='berlin',\n",
|
|
||||||
" search_term=\"engineer\",\n",
|
" search_term=\"engineer\",\n",
|
||||||
" country_indeed = \"Germany\",\n",
|
" country_indeed = \"China\",\n",
|
||||||
" hyperlinks=True\n",
|
" hyperlinks=True\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
@@ -165,7 +159,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.11"
|
"version": "3.11.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
31
examples/JobSpy_Demo.py
Normal file
31
examples/JobSpy_Demo.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
jobs: pd.DataFrame = scrape_jobs(
|
||||||
|
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||||
|
search_term="software engineer",
|
||||||
|
location="Dallas, TX",
|
||||||
|
results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho)
|
||||||
|
country_indeed="USA",
|
||||||
|
offset=25 # start jobs from an offset (use if search failed and want to continue)
|
||||||
|
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||||
|
)
|
||||||
|
|
||||||
|
# formatting for pandas
|
||||||
|
pd.set_option("display.max_columns", None)
|
||||||
|
pd.set_option("display.max_rows", None)
|
||||||
|
pd.set_option("display.width", None)
|
||||||
|
pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc
|
||||||
|
|
||||||
|
# 1: output to console
|
||||||
|
print(jobs)
|
||||||
|
|
||||||
|
# 2: output to .csv
|
||||||
|
jobs.to_csv("./jobs.csv", index=False)
|
||||||
|
print("outputted to jobs.csv")
|
||||||
|
|
||||||
|
# 3: output to .xlsx
|
||||||
|
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||||
|
|
||||||
|
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||||
|
# display(jobs)
|
||||||
69
poetry.lock
generated
69
poetry.lock
generated
@@ -1053,6 +1053,16 @@ files = [
|
|||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
||||||
@@ -1243,36 +1253,39 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "numpy"
|
name = "numpy"
|
||||||
version = "1.25.2"
|
version = "1.24.2"
|
||||||
description = "Fundamental package for array computing in Python"
|
description = "Fundamental package for array computing in Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"},
|
{file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"},
|
{file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"},
|
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"},
|
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"},
|
{file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"},
|
{file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
|
||||||
{file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"},
|
{file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"},
|
{file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"},
|
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"},
|
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"},
|
{file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"},
|
{file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"},
|
{file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
|
||||||
{file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"},
|
{file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"},
|
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"},
|
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"},
|
{file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"},
|
{file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"},
|
{file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"},
|
{file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
|
||||||
{file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"},
|
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
|
||||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"},
|
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
|
||||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"},
|
{file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
|
||||||
{file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"},
|
{file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
|
||||||
{file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"},
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
|
||||||
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
|
||||||
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
|
||||||
|
{file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2432,4 +2445,4 @@ files = [
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.10"
|
python-versions = "^3.10"
|
||||||
content-hash = "0c50057af9ebbbe5c124c81758b41f05c05636739c3d1747e1bac74e75a046cb"
|
content-hash = "f966f3979873eec2c3b13460067f5aa414c69aa8ab5cd3239c1cfa564fcb5deb"
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.1.3"
|
version = "1.1.22"
|
||||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||||
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
||||||
packages = [
|
packages = [
|
||||||
@@ -15,6 +16,7 @@ requests = "^2.31.0"
|
|||||||
tls-client = "^0.2.1"
|
tls-client = "^0.2.1"
|
||||||
beautifulsoup4 = "^4.12.2"
|
beautifulsoup4 = "^4.12.2"
|
||||||
pandas = "^2.1.0"
|
pandas = "^2.1.0"
|
||||||
|
NUMPY = "1.24.2"
|
||||||
pydantic = "^2.3.0"
|
pydantic = "^2.3.0"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from typing import List, Tuple, NamedTuple, Dict, Optional
|
from typing import Tuple, Optional
|
||||||
import traceback
|
|
||||||
|
|
||||||
from .jobs import JobType, Location
|
from .jobs import JobType, Location
|
||||||
from .scrapers.indeed import IndeedScraper
|
from .scrapers.indeed import IndeedScraper
|
||||||
@@ -27,23 +26,32 @@ def _map_str_to_site(site_name: str) -> Site:
|
|||||||
|
|
||||||
|
|
||||||
def scrape_jobs(
|
def scrape_jobs(
|
||||||
site_name: str | List[str] | Site | List[Site],
|
site_name: str | list[str] | Site | list[Site],
|
||||||
search_term: str,
|
search_term: str,
|
||||||
location: str = "",
|
location: str = "",
|
||||||
distance: int = None,
|
distance: int = None,
|
||||||
is_remote: bool = False,
|
is_remote: bool = False,
|
||||||
job_type: JobType = None,
|
job_type: str = None,
|
||||||
easy_apply: bool = False, # linkedin
|
easy_apply: bool = False, # linkedin
|
||||||
results_wanted: int = 15,
|
results_wanted: int = 15,
|
||||||
country_indeed: str = "usa",
|
country_indeed: str = "usa",
|
||||||
hyperlinks: bool = False,
|
hyperlinks: bool = False,
|
||||||
proxy: Optional[str] = None,
|
proxy: Optional[str] = None,
|
||||||
|
offset: Optional[int] = 0,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Simultaneously scrapes job data from multiple job sites.
|
Simultaneously scrapes job data from multiple job sites.
|
||||||
:return: results_wanted: pandas dataframe containing job data
|
:return: results_wanted: pandas dataframe containing job data
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def get_enum_from_value(value_str):
|
||||||
|
for job_type in JobType:
|
||||||
|
if value_str in job_type.value:
|
||||||
|
return job_type
|
||||||
|
raise Exception(f"Invalid job type: {value_str}")
|
||||||
|
|
||||||
|
job_type = get_enum_from_value(job_type) if job_type else None
|
||||||
|
|
||||||
if type(site_name) == str:
|
if type(site_name) == str:
|
||||||
site_type = [_map_str_to_site(site_name)]
|
site_type = [_map_str_to_site(site_name)]
|
||||||
else: #: if type(site_name) == list
|
else: #: if type(site_name) == list
|
||||||
@@ -64,6 +72,7 @@ def scrape_jobs(
|
|||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
easy_apply=easy_apply,
|
easy_apply=easy_apply,
|
||||||
results_wanted=results_wanted,
|
results_wanted=results_wanted,
|
||||||
|
offset=offset,
|
||||||
)
|
)
|
||||||
|
|
||||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||||
@@ -75,13 +84,12 @@ def scrape_jobs(
|
|||||||
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
except (LinkedInException, IndeedException, ZipRecruiterException) as lie:
|
||||||
raise lie
|
raise lie
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# unhandled exceptions
|
|
||||||
if site == Site.LINKEDIN:
|
if site == Site.LINKEDIN:
|
||||||
raise LinkedInException()
|
raise LinkedInException(str(e))
|
||||||
if site == Site.INDEED:
|
if site == Site.INDEED:
|
||||||
raise IndeedException()
|
raise IndeedException(str(e))
|
||||||
if site == Site.ZIP_RECRUITER:
|
if site == Site.ZIP_RECRUITER:
|
||||||
raise ZipRecruiterException()
|
raise ZipRecruiterException(str(e))
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
return site.value, scraped_data
|
return site.value, scraped_data
|
||||||
@@ -89,8 +97,8 @@ def scrape_jobs(
|
|||||||
site_to_jobs_dict = {}
|
site_to_jobs_dict = {}
|
||||||
|
|
||||||
def worker(site):
|
def worker(site):
|
||||||
site_value, scraped_data = scrape_site(site)
|
site_val, scraped_info = scrape_site(site)
|
||||||
return site_value, scraped_data
|
return site_val, scraped_info
|
||||||
|
|
||||||
with ThreadPoolExecutor() as executor:
|
with ThreadPoolExecutor() as executor:
|
||||||
future_to_site = {
|
future_to_site = {
|
||||||
@@ -101,7 +109,7 @@ def scrape_jobs(
|
|||||||
site_value, scraped_data = future.result()
|
site_value, scraped_data = future.result()
|
||||||
site_to_jobs_dict[site_value] = scraped_data
|
site_to_jobs_dict[site_value] = scraped_data
|
||||||
|
|
||||||
jobs_dfs: List[pd.DataFrame] = []
|
jobs_dfs: list[pd.DataFrame] = []
|
||||||
|
|
||||||
for site, job_response in site_to_jobs_dict.items():
|
for site, job_response in site_to_jobs_dict.items():
|
||||||
for job in job_response.jobs:
|
for job in job_response.jobs:
|
||||||
@@ -111,12 +119,14 @@ def scrape_jobs(
|
|||||||
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
] = f'<a href="{job_data["job_url"]}">{job_data["job_url"]}</a>'
|
||||||
job_data["site"] = site
|
job_data["site"] = site
|
||||||
job_data["company"] = job_data["company_name"]
|
job_data["company"] = job_data["company_name"]
|
||||||
if job_data["job_type"]:
|
job_data["job_type"] = (
|
||||||
# Take the first value from the job type tuple
|
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||||
job_data["job_type"] = job_data["job_type"].value[0]
|
if job_data["job_type"]
|
||||||
else:
|
else None
|
||||||
job_data["job_type"] = None
|
)
|
||||||
|
job_data["emails"] = (
|
||||||
|
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||||
|
)
|
||||||
job_data["location"] = Location(**job_data["location"]).display_location()
|
job_data["location"] = Location(**job_data["location"]).display_location()
|
||||||
|
|
||||||
compensation_obj = job_data.get("compensation")
|
compensation_obj = job_data.get("compensation")
|
||||||
@@ -140,18 +150,22 @@ def scrape_jobs(
|
|||||||
|
|
||||||
if jobs_dfs:
|
if jobs_dfs:
|
||||||
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
jobs_df = pd.concat(jobs_dfs, ignore_index=True)
|
||||||
desired_order: List[str] = [
|
desired_order: list[str] = [
|
||||||
|
"job_url_hyper" if hyperlinks else "job_url",
|
||||||
"site",
|
"site",
|
||||||
"title",
|
"title",
|
||||||
"company",
|
"company",
|
||||||
"location",
|
"location",
|
||||||
"date_posted",
|
|
||||||
"job_type",
|
"job_type",
|
||||||
|
"date_posted",
|
||||||
"interval",
|
"interval",
|
||||||
"min_amount",
|
"min_amount",
|
||||||
"max_amount",
|
"max_amount",
|
||||||
"currency",
|
"currency",
|
||||||
"job_url_hyper" if hyperlinks else "job_url",
|
"is_remote",
|
||||||
|
"num_urgent_words",
|
||||||
|
"benefits",
|
||||||
|
"emails",
|
||||||
"description",
|
"description",
|
||||||
]
|
]
|
||||||
jobs_formatted_df = jobs_df[desired_order]
|
jobs_formatted_df = jobs_df[desired_order]
|
||||||
|
|||||||
@@ -37,10 +37,16 @@ class JobType(Enum):
|
|||||||
"повназайнятість",
|
"повназайнятість",
|
||||||
"toànthờigian",
|
"toànthờigian",
|
||||||
)
|
)
|
||||||
PART_TIME = ("parttime", "teilzeit")
|
PART_TIME = ("parttime", "teilzeit", "částečnýúvazek", "deltid")
|
||||||
CONTRACT = ("contract", "contractor")
|
CONTRACT = ("contract", "contractor")
|
||||||
TEMPORARY = ("temporary",)
|
TEMPORARY = ("temporary",)
|
||||||
INTERNSHIP = ("internship", "prácticas", "ojt(onthejobtraining)", "praktikum")
|
INTERNSHIP = (
|
||||||
|
"internship",
|
||||||
|
"prácticas",
|
||||||
|
"ojt(onthejobtraining)",
|
||||||
|
"praktikum",
|
||||||
|
"praktik",
|
||||||
|
)
|
||||||
|
|
||||||
PER_DIEM = ("perdiem",)
|
PER_DIEM = ("perdiem",)
|
||||||
NIGHTS = ("nights",)
|
NIGHTS = ("nights",)
|
||||||
@@ -170,9 +176,9 @@ class CompensationInterval(Enum):
|
|||||||
|
|
||||||
|
|
||||||
class Compensation(BaseModel):
|
class Compensation(BaseModel):
|
||||||
interval: CompensationInterval
|
interval: Optional[CompensationInterval] = None
|
||||||
min_amount: int = None
|
min_amount: int | None = None
|
||||||
max_amount: int = None
|
max_amount: int | None = None
|
||||||
currency: Optional[str] = "USD"
|
currency: Optional[str] = "USD"
|
||||||
|
|
||||||
|
|
||||||
@@ -182,10 +188,15 @@ class JobPost(BaseModel):
|
|||||||
job_url: str
|
job_url: str
|
||||||
location: Optional[Location]
|
location: Optional[Location]
|
||||||
|
|
||||||
description: Optional[str] = None
|
description: str | None = None
|
||||||
job_type: Optional[JobType] = None
|
job_type: list[JobType] | None = None
|
||||||
compensation: Optional[Compensation] = None
|
compensation: Compensation | None = None
|
||||||
date_posted: Optional[date] = None
|
date_posted: date | None = None
|
||||||
|
benefits: str | None = None
|
||||||
|
emails: list[str] | None = None
|
||||||
|
num_urgent_words: int | None = None
|
||||||
|
is_remote: bool | None = None
|
||||||
|
# company_industry: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class JobResponse(BaseModel):
|
class JobResponse(BaseModel):
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ class ScraperInput(BaseModel):
|
|||||||
is_remote: bool = False
|
is_remote: bool = False
|
||||||
job_type: Optional[JobType] = None
|
job_type: Optional[JobType] = None
|
||||||
easy_apply: bool = None # linkedin
|
easy_apply: bool = None # linkedin
|
||||||
|
offset: int = 0
|
||||||
|
|
||||||
results_wanted: int = 15
|
results_wanted: int = 15
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,15 @@ This module contains the set of Scrapers' exceptions.
|
|||||||
|
|
||||||
|
|
||||||
class LinkedInException(Exception):
|
class LinkedInException(Exception):
|
||||||
"""Failed to scrape LinkedIn"""
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with LinkedIn")
|
||||||
|
|
||||||
|
|
||||||
class IndeedException(Exception):
|
class IndeedException(Exception):
|
||||||
"""Failed to scrape Indeed"""
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Indeed")
|
||||||
|
|
||||||
|
|
||||||
class ZipRecruiterException(Exception):
|
class ZipRecruiterException(Exception):
|
||||||
"""Failed to scrape ZipRecruiter"""
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with ZipRecruiter")
|
||||||
|
|||||||
@@ -8,17 +8,20 @@ import re
|
|||||||
import math
|
import math
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import tls_client
|
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from bs4.element import Tag
|
from bs4.element import Tag
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
from concurrent.futures import ThreadPoolExecutor, Future
|
||||||
|
|
||||||
from ..exceptions import IndeedException
|
from ..exceptions import IndeedException
|
||||||
|
from ..utils import (
|
||||||
|
count_urgent_words,
|
||||||
|
extract_emails_from_text,
|
||||||
|
create_session,
|
||||||
|
get_enum_from_job_type,
|
||||||
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Compensation,
|
Compensation,
|
||||||
@@ -27,14 +30,16 @@ from ...jobs import (
|
|||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
)
|
)
|
||||||
from .. import Scraper, ScraperInput, Site, Country
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
|
||||||
|
|
||||||
class IndeedScraper(Scraper):
|
class IndeedScraper(Scraper):
|
||||||
def __init__(self, proxy: Optional[str] = None):
|
def __init__(self, proxy: str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes IndeedScraper with the Indeed job search url
|
Initializes IndeedScraper with the Indeed job search url
|
||||||
"""
|
"""
|
||||||
|
self.url = None
|
||||||
|
self.country = None
|
||||||
site = Site(Site.INDEED)
|
site = Site(Site.INDEED)
|
||||||
super().__init__(site, proxy=proxy)
|
super().__init__(site, proxy=proxy)
|
||||||
|
|
||||||
@@ -42,26 +47,23 @@ class IndeedScraper(Scraper):
|
|||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
|
|
||||||
def scrape_page(
|
def scrape_page(
|
||||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
self, scraper_input: ScraperInput, page: int
|
||||||
) -> tuple[list[JobPost], int]:
|
) -> tuple[list[JobPost], int]:
|
||||||
"""
|
"""
|
||||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:param page:
|
:param page:
|
||||||
:param session:
|
|
||||||
:return: jobs found on page, total number of jobs found for search
|
:return: jobs found on page, total number of jobs found for search
|
||||||
"""
|
"""
|
||||||
self.country = scraper_input.country
|
self.country = scraper_input.country
|
||||||
domain = self.country.domain_value
|
domain = self.country.domain_value
|
||||||
self.url = f"https://{domain}.indeed.com"
|
self.url = f"https://{domain}.indeed.com"
|
||||||
|
|
||||||
job_list: list[JobPost] = []
|
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"q": scraper_input.search_term,
|
"q": scraper_input.search_term,
|
||||||
"l": scraper_input.location,
|
"l": scraper_input.location,
|
||||||
"filter": 0,
|
"filter": 0,
|
||||||
"start": 0 + page * 10,
|
"start": scraper_input.offset + page * 10,
|
||||||
}
|
}
|
||||||
if scraper_input.distance:
|
if scraper_input.distance:
|
||||||
params["radius"] = scraper_input.distance
|
params["radius"] = scraper_input.distance
|
||||||
@@ -75,11 +77,12 @@ class IndeedScraper(Scraper):
|
|||||||
if sc_values:
|
if sc_values:
|
||||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
||||||
try:
|
try:
|
||||||
|
session = create_session(self.proxy, is_tls=True)
|
||||||
response = session.get(
|
response = session.get(
|
||||||
self.url + "/jobs",
|
f"{self.url}/jobs",
|
||||||
|
headers=self.get_headers(),
|
||||||
params=params,
|
params=params,
|
||||||
allow_redirects=True,
|
allow_redirects=True,
|
||||||
proxy=self.proxy,
|
|
||||||
timeout_seconds=10,
|
timeout_seconds=10,
|
||||||
)
|
)
|
||||||
if response.status_code not in range(200, 400):
|
if response.status_code not in range(200, 400):
|
||||||
@@ -107,7 +110,7 @@ class IndeedScraper(Scraper):
|
|||||||
):
|
):
|
||||||
raise IndeedException("No jobs found.")
|
raise IndeedException("No jobs found.")
|
||||||
|
|
||||||
def process_job(job) -> Optional[JobPost]:
|
def process_job(job) -> JobPost | None:
|
||||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
||||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
||||||
if job_url in self.seen_urls:
|
if job_url in self.seen_urls:
|
||||||
@@ -126,8 +129,8 @@ class IndeedScraper(Scraper):
|
|||||||
if interval in CompensationInterval.__members__:
|
if interval in CompensationInterval.__members__:
|
||||||
compensation = Compensation(
|
compensation = Compensation(
|
||||||
interval=CompensationInterval[interval],
|
interval=CompensationInterval[interval],
|
||||||
min_amount=int(extracted_salary.get("max")),
|
min_amount=int(extracted_salary.get("min")),
|
||||||
max_amount=int(extracted_salary.get("min")),
|
max_amount=int(extracted_salary.get("max")),
|
||||||
currency=currency,
|
currency=currency,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -136,10 +139,10 @@ class IndeedScraper(Scraper):
|
|||||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
||||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
date_posted = date_posted.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
description = self.get_description(job_url, session)
|
description = self.get_description(job_url)
|
||||||
with io.StringIO(job["snippet"]) as f:
|
with io.StringIO(job["snippet"]) as f:
|
||||||
soup = BeautifulSoup(f, "html.parser")
|
soup_io = BeautifulSoup(f, "html.parser")
|
||||||
li_elements = soup.find_all("li")
|
li_elements = soup_io.find_all("li")
|
||||||
if description is None and li_elements:
|
if description is None and li_elements:
|
||||||
description = " ".join(li.text for li in li_elements)
|
description = " ".join(li.text for li in li_elements)
|
||||||
|
|
||||||
@@ -156,13 +159,18 @@ class IndeedScraper(Scraper):
|
|||||||
compensation=compensation,
|
compensation=compensation,
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url_client,
|
job_url=job_url_client,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
num_urgent_words=count_urgent_words(description)
|
||||||
|
if description
|
||||||
|
else None,
|
||||||
|
is_remote=self.is_remote_job(job),
|
||||||
)
|
)
|
||||||
return job_post
|
return job_post
|
||||||
|
|
||||||
|
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
||||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||||
job_results: list[Future] = [
|
job_results: list[Future] = [
|
||||||
executor.submit(process_job, job)
|
executor.submit(process_job, job) for job in jobs
|
||||||
for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
|
||||||
]
|
]
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
job_list = [result.result() for result in job_results if result.result()]
|
||||||
@@ -175,20 +183,16 @@ class IndeedScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
session = tls_client.Session(
|
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
|
||||||
)
|
|
||||||
|
|
||||||
pages_to_process = (
|
pages_to_process = (
|
||||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
#: get first page to initialize session
|
#: get first page to initialize session
|
||||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
job_list, total_results = self.scrape_page(scraper_input, 0)
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||||
futures: list[Future] = [
|
futures: list[Future] = [
|
||||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
executor.submit(self.scrape_page, scraper_input, page)
|
||||||
for page in range(1, pages_to_process + 1)
|
for page in range(1, pages_to_process + 1)
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -206,21 +210,24 @@ class IndeedScraper(Scraper):
|
|||||||
)
|
)
|
||||||
return job_response
|
return job_response
|
||||||
|
|
||||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> str:
|
def get_description(self, job_page_url: str) -> str | None:
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Retrieves job description by going to the job page url
|
||||||
:param job_page_url:
|
:param job_page_url:
|
||||||
:param session:
|
|
||||||
:return: description
|
:return: description
|
||||||
"""
|
"""
|
||||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
parsed_url = urllib.parse.urlparse(job_page_url)
|
||||||
params = urllib.parse.parse_qs(parsed_url.query)
|
params = urllib.parse.parse_qs(parsed_url.query)
|
||||||
jk_value = params.get("jk", [None])[0]
|
jk_value = params.get("jk", [None])[0]
|
||||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
||||||
|
session = create_session(self.proxy)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = session.get(
|
response = session.get(
|
||||||
formatted_url, allow_redirects=True, timeout_seconds=5, proxy=self.proxy
|
formatted_url,
|
||||||
|
headers=self.get_headers(),
|
||||||
|
allow_redirects=True,
|
||||||
|
timeout_seconds=5,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return None
|
return None
|
||||||
@@ -228,36 +235,56 @@ class IndeedScraper(Scraper):
|
|||||||
if response.status_code not in range(200, 400):
|
if response.status_code not in range(200, 400):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
raw_description = response.json()["body"]["jobInfoWrapperModel"][
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
"jobInfoModel"
|
script_tag = soup.find(
|
||||||
]["sanitizedJobDescription"]
|
"script", text=lambda x: x and "window._initialData" in x
|
||||||
with io.StringIO(raw_description) as f:
|
)
|
||||||
soup = BeautifulSoup(f, "html.parser")
|
|
||||||
text_content = " ".join(soup.get_text().split()).strip()
|
if not script_tag:
|
||||||
return text_content
|
return None
|
||||||
|
|
||||||
|
script_code = script_tag.string
|
||||||
|
match = re.search(r"window\._initialData\s*=\s*({.*?})\s*;", script_code, re.S)
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
return None
|
||||||
|
|
||||||
|
json_string = match.group(1)
|
||||||
|
data = json.loads(json_string)
|
||||||
|
try:
|
||||||
|
job_description = data["jobInfoWrapperModel"]["jobInfoModel"][
|
||||||
|
"sanitizedJobDescription"
|
||||||
|
]
|
||||||
|
except (KeyError, TypeError, IndexError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
soup = BeautifulSoup(
|
||||||
|
job_description, "html.parser"
|
||||||
|
)
|
||||||
|
text_content = " ".join(
|
||||||
|
soup.get_text(separator=" ").split()
|
||||||
|
).strip()
|
||||||
|
|
||||||
|
return text_content
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_type(job: dict) -> Optional[JobType]:
|
def get_job_type(job: dict) -> list[JobType] | None:
|
||||||
"""
|
"""
|
||||||
Parses the job to get JobTypeIndeed
|
Parses the job to get list of job types
|
||||||
:param job:
|
:param job:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
|
job_types: list[JobType] = []
|
||||||
for taxonomy in job["taxonomyAttributes"]:
|
for taxonomy in job["taxonomyAttributes"]:
|
||||||
if taxonomy["label"] == "job-types":
|
if taxonomy["label"] == "job-types":
|
||||||
if len(taxonomy["attributes"]) > 0:
|
for i in range(len(taxonomy["attributes"])):
|
||||||
label = taxonomy["attributes"][0].get("label")
|
label = taxonomy["attributes"][i].get("label")
|
||||||
if label:
|
if label:
|
||||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
||||||
return IndeedScraper.get_enum_from_value(job_type_str)
|
job_type = get_enum_from_job_type(job_type_str)
|
||||||
return None
|
if job_type:
|
||||||
|
job_types.append(job_type)
|
||||||
@staticmethod
|
return job_types
|
||||||
def get_enum_from_value(value_str):
|
|
||||||
for job_type in JobType:
|
|
||||||
if value_str in job_type.value:
|
|
||||||
return job_type
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
def parse_jobs(soup: BeautifulSoup) -> dict:
|
||||||
@@ -267,7 +294,7 @@ class IndeedScraper(Scraper):
|
|||||||
:return: jobs
|
:return: jobs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def find_mosaic_script() -> Optional[Tag]:
|
def find_mosaic_script() -> Tag | None:
|
||||||
"""
|
"""
|
||||||
Finds jobcards script tag
|
Finds jobcards script tag
|
||||||
:return: script_tag
|
:return: script_tag
|
||||||
@@ -317,3 +344,30 @@ class IndeedScraper(Scraper):
|
|||||||
data = json.loads(json_str)
|
data = json.loads(json_str)
|
||||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
||||||
return total_num_jobs
|
return total_num_jobs
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_headers():
|
||||||
|
return {
|
||||||
|
"authority": "www.indeed.com",
|
||||||
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"referer": "https://www.indeed.com/viewjob?jk=fe6182337d72c7b1&tk=1hcbfcmd0k62t802&from=serp&vjs=3&advn=8132938064490989&adid=408692607&ad=-6NYlbfkN0A3Osc99MJFDKjquSk4WOGT28ALb_ad4QMtrHreCb9ICg6MiSVy9oDAp3evvOrI7Q-O9qOtQTg1EPbthP9xWtBN2cOuVeHQijxHjHpJC65TjDtftH3AXeINjBvAyDrE8DrRaAXl8LD3Fs1e_xuDHQIssdZ2Mlzcav8m5jHrA0fA64ZaqJV77myldaNlM7-qyQpy4AsJQfvg9iR2MY7qeC5_FnjIgjKIy_lNi9OPMOjGRWXA94CuvC7zC6WeiJmBQCHISl8IOBxf7EdJZlYdtzgae3593TFxbkd6LUwbijAfjax39aAuuCXy3s9C4YgcEP3TwEFGQoTpYu9Pmle-Ae1tHGPgsjxwXkgMm7Cz5mBBdJioglRCj9pssn-1u1blHZM4uL1nK9p1Y6HoFgPUU9xvKQTHjKGdH8d4y4ETyCMoNF4hAIyUaysCKdJKitC8PXoYaWhDqFtSMR4Jys8UPqUV&xkcb=SoDD-_M3JLQfWnQTDh0LbzkdCdPP&xpse=SoBa6_I3JLW9FlWZlB0PbzkdCdPP&sjdu=i6xVERweJM_pVUvgf-MzuaunBTY7G71J5eEX6t4DrDs5EMPQdODrX7Nn-WIPMezoqr5wA_l7Of-3CtoiUawcHw",
|
||||||
|
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-platform": '"Windows"',
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_remote_job(job: dict) -> bool:
|
||||||
|
"""
|
||||||
|
:param job:
|
||||||
|
:return: bool
|
||||||
|
"""
|
||||||
|
for taxonomy in job.get("taxonomyAttributes", []):
|
||||||
|
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|||||||
@@ -4,33 +4,38 @@ jobspy.scrapers.linkedin
|
|||||||
|
|
||||||
This module contains routines to scrape LinkedIn.
|
This module contains routines to scrape LinkedIn.
|
||||||
"""
|
"""
|
||||||
from typing import Optional, Tuple
|
from typing import Optional
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import traceback
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from requests.exceptions import Timeout, ProxyError
|
import time
|
||||||
|
from requests.exceptions import ProxyError
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from bs4.element import Tag
|
from bs4.element import Tag
|
||||||
|
from threading import Lock
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import count_urgent_words, extract_emails_from_text, get_enum_from_job_type
|
||||||
from ..exceptions import LinkedInException
|
from ..exceptions import LinkedInException
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
Compensation,
|
|
||||||
CompensationInterval,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class LinkedInScraper(Scraper):
|
class LinkedInScraper(Scraper):
|
||||||
|
MAX_RETRIES = 3
|
||||||
|
DELAY = 10
|
||||||
|
|
||||||
def __init__(self, proxy: Optional[str] = None):
|
def __init__(self, proxy: Optional[str] = None):
|
||||||
"""
|
"""
|
||||||
Initializes LinkedInScraper with the LinkedIn job search url
|
Initializes LinkedInScraper with the LinkedIn job search url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.LINKEDIN)
|
site = Site(Site.LINKEDIN)
|
||||||
|
self.country = "worldwide"
|
||||||
self.url = "https://www.linkedin.com"
|
self.url = "https://www.linkedin.com"
|
||||||
super().__init__(site, proxy=proxy)
|
super().__init__(site, proxy=proxy)
|
||||||
|
|
||||||
@@ -40,12 +45,12 @@ class LinkedInScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
self.country = "worldwide"
|
|
||||||
job_list: list[JobPost] = []
|
job_list: list[JobPost] = []
|
||||||
seen_urls = set()
|
seen_urls = set()
|
||||||
page, processed_jobs, job_count = 0, 0, 0
|
url_lock = Lock()
|
||||||
|
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
||||||
|
|
||||||
def job_type_code(job_type):
|
def job_type_code(job_type_enum):
|
||||||
mapping = {
|
mapping = {
|
||||||
JobType.FULL_TIME: "F",
|
JobType.FULL_TIME: "F",
|
||||||
JobType.PART_TIME: "P",
|
JobType.PART_TIME: "P",
|
||||||
@@ -54,117 +59,138 @@ class LinkedInScraper(Scraper):
|
|||||||
JobType.TEMPORARY: "T",
|
JobType.TEMPORARY: "T",
|
||||||
}
|
}
|
||||||
|
|
||||||
return mapping.get(job_type, "")
|
return mapping.get(job_type_enum, "")
|
||||||
|
|
||||||
with requests.Session() as session:
|
while len(job_list) < scraper_input.results_wanted and page < 1000:
|
||||||
while len(job_list) < scraper_input.results_wanted:
|
params = {
|
||||||
params = {
|
"keywords": scraper_input.search_term,
|
||||||
"keywords": scraper_input.search_term,
|
"location": scraper_input.location,
|
||||||
"location": scraper_input.location,
|
"distance": scraper_input.distance,
|
||||||
"distance": scraper_input.distance,
|
"f_WT": 2 if scraper_input.is_remote else None,
|
||||||
"f_WT": 2 if scraper_input.is_remote else None,
|
"f_JT": job_type_code(scraper_input.job_type)
|
||||||
"f_JT": job_type_code(scraper_input.job_type)
|
if scraper_input.job_type
|
||||||
if scraper_input.job_type
|
else None,
|
||||||
else None,
|
"pageNum": 0,
|
||||||
"pageNum": page,
|
page: page + scraper_input.offset,
|
||||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||||
}
|
}
|
||||||
|
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
|
|
||||||
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
|
retries = 0
|
||||||
|
while retries < self.MAX_RETRIES:
|
||||||
try:
|
try:
|
||||||
response = session.get(
|
response = requests.get(
|
||||||
f"{self.url}/jobs/search",
|
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||||
params=params,
|
params=params,
|
||||||
allow_redirects=True,
|
allow_redirects=True,
|
||||||
proxies=self.proxy,
|
proxies=self.proxy,
|
||||||
timeout=10,
|
timeout=10,
|
||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
|
break
|
||||||
except requests.HTTPError as e:
|
except requests.HTTPError as e:
|
||||||
raise LinkedInException(
|
if hasattr(e, "response") and e.response is not None:
|
||||||
f"bad response status code: {response.status_code}"
|
if e.response.status_code == 429:
|
||||||
)
|
time.sleep(self.DELAY)
|
||||||
|
retries += 1
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise LinkedInException(
|
||||||
|
f"bad response status code: {e.response.status_code}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
except ProxyError as e:
|
except ProxyError as e:
|
||||||
raise LinkedInException("bad proxy")
|
raise LinkedInException("bad proxy")
|
||||||
except (ProxyError, Exception) as e:
|
except Exception as e:
|
||||||
raise LinkedInException(str(e))
|
raise LinkedInException(str(e))
|
||||||
|
else:
|
||||||
|
# Raise an exception if the maximum number of retries is reached
|
||||||
|
raise LinkedInException(
|
||||||
|
"Max retries reached, failed to get a valid response"
|
||||||
|
)
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
|
||||||
if page == 0:
|
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||||
job_count_text = soup.find(
|
futures = []
|
||||||
"span", class_="results-context-header__job-count"
|
for job_card in soup.find_all("div", class_="base-search-card"):
|
||||||
).text
|
job_url = None
|
||||||
job_count = int("".join(filter(str.isdigit, job_count_text)))
|
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||||
|
if href_tag and "href" in href_tag.attrs:
|
||||||
|
href = href_tag.attrs["href"].split("?")[0]
|
||||||
|
job_id = href.split("-")[-1]
|
||||||
|
job_url = f"{self.url}/jobs/view/{job_id}"
|
||||||
|
|
||||||
for job_card in soup.find_all(
|
with url_lock:
|
||||||
"div",
|
if job_url in seen_urls:
|
||||||
class_="base-card relative w-full hover:no-underline focus:no-underline base-card--link base-search-card base-search-card--link job-search-card",
|
continue
|
||||||
):
|
seen_urls.add(job_url)
|
||||||
processed_jobs += 1
|
|
||||||
data_entity_urn = job_card.get("data-entity-urn", "")
|
|
||||||
job_id = (
|
|
||||||
data_entity_urn.split(":")[-1] if data_entity_urn else "N/A"
|
|
||||||
)
|
|
||||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
|
||||||
if job_url in seen_urls:
|
|
||||||
continue
|
|
||||||
seen_urls.add(job_url)
|
|
||||||
job_info = job_card.find("div", class_="base-search-card__info")
|
|
||||||
if job_info is None:
|
|
||||||
continue
|
|
||||||
title_tag = job_info.find("h3", class_="base-search-card__title")
|
|
||||||
title = title_tag.text.strip() if title_tag else "N/A"
|
|
||||||
|
|
||||||
company_tag = job_info.find("a", class_="hidden-nested-link")
|
futures.append(executor.submit(self.process_job, job_card, job_url))
|
||||||
company = company_tag.text.strip() if company_tag else "N/A"
|
|
||||||
|
|
||||||
metadata_card = job_info.find(
|
for future in as_completed(futures):
|
||||||
"div", class_="base-search-card__metadata"
|
try:
|
||||||
)
|
job_post = future.result()
|
||||||
location: Location = self.get_location(metadata_card)
|
if job_post:
|
||||||
|
job_list.append(job_post)
|
||||||
datetime_tag = metadata_card.find(
|
except Exception as e:
|
||||||
"time", class_="job-search-card__listdate"
|
raise LinkedInException(
|
||||||
)
|
"Exception occurred while processing jobs"
|
||||||
description, job_type = self.get_description(job_url)
|
)
|
||||||
if datetime_tag:
|
page += 25
|
||||||
datetime_str = datetime_tag["datetime"]
|
|
||||||
try:
|
|
||||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
|
||||||
except Exception as e:
|
|
||||||
date_posted = None
|
|
||||||
else:
|
|
||||||
date_posted = None
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=location,
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=Compensation(
|
|
||||||
interval=CompensationInterval.YEARLY, currency=None
|
|
||||||
),
|
|
||||||
)
|
|
||||||
job_list.append(job_post)
|
|
||||||
if processed_jobs >= job_count:
|
|
||||||
break
|
|
||||||
if len(job_list) >= scraper_input.results_wanted:
|
|
||||||
break
|
|
||||||
if processed_jobs >= job_count:
|
|
||||||
break
|
|
||||||
if len(job_list) >= scraper_input.results_wanted:
|
|
||||||
break
|
|
||||||
|
|
||||||
page += 1
|
|
||||||
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
return JobResponse(jobs=job_list)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
def get_description(self, job_page_url: str) -> Optional[str]:
|
def process_job(self, job_card: Tag, job_url: str) -> Optional[JobPost]:
|
||||||
|
title_tag = job_card.find("span", class_="sr-only")
|
||||||
|
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||||
|
|
||||||
|
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||||
|
company_a_tag = company_tag.find("a") if company_tag else None
|
||||||
|
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||||
|
|
||||||
|
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||||
|
location = self.get_location(metadata_card)
|
||||||
|
|
||||||
|
datetime_tag = (
|
||||||
|
metadata_card.find("time", class_="job-search-card__listdate")
|
||||||
|
if metadata_card
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
date_posted = None
|
||||||
|
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||||
|
datetime_str = datetime_tag["datetime"]
|
||||||
|
try:
|
||||||
|
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||||
|
except Exception as e:
|
||||||
|
date_posted = None
|
||||||
|
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||||
|
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
||||||
|
|
||||||
|
description, job_type = self.get_job_description(job_url)
|
||||||
|
|
||||||
|
return JobPost(
|
||||||
|
title=title,
|
||||||
|
description=description,
|
||||||
|
company_name=company,
|
||||||
|
location=location,
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=job_url,
|
||||||
|
# job_type=[JobType.FULL_TIME],
|
||||||
|
job_type=job_type,
|
||||||
|
benefits=benefits,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
num_urgent_words=count_urgent_words(description) if description else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_job_description(
|
||||||
|
self, job_page_url: str
|
||||||
|
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Retrieves job description by going to the job page url
|
||||||
:param job_page_url:
|
:param job_page_url:
|
||||||
@@ -181,19 +207,19 @@ class LinkedInScraper(Scraper):
|
|||||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||||
)
|
)
|
||||||
|
|
||||||
text_content = None
|
description = None
|
||||||
if div_content:
|
if div_content:
|
||||||
text_content = " ".join(div_content.get_text().split()).strip()
|
description = " ".join(div_content.get_text().split()).strip()
|
||||||
|
|
||||||
def get_job_type(
|
def get_job_type(
|
||||||
soup: BeautifulSoup,
|
soup_job_type: BeautifulSoup,
|
||||||
) -> Tuple[Optional[str], Optional[JobType]]:
|
) -> list[JobType] | None:
|
||||||
"""
|
"""
|
||||||
Gets the job type from job page
|
Gets the job type from job page
|
||||||
:param soup:
|
:param soup_job_type:
|
||||||
:return: JobType
|
:return: JobType
|
||||||
"""
|
"""
|
||||||
h3_tag = soup.find(
|
h3_tag = soup_job_type.find(
|
||||||
"h3",
|
"h3",
|
||||||
class_="description__job-criteria-subheader",
|
class_="description__job-criteria-subheader",
|
||||||
string=lambda text: "Employment type" in text,
|
string=lambda text: "Employment type" in text,
|
||||||
@@ -210,16 +236,9 @@ class LinkedInScraper(Scraper):
|
|||||||
employment_type = employment_type.lower()
|
employment_type = employment_type.lower()
|
||||||
employment_type = employment_type.replace("-", "")
|
employment_type = employment_type.replace("-", "")
|
||||||
|
|
||||||
return LinkedInScraper.get_enum_from_value(employment_type)
|
return [get_enum_from_job_type(employment_type)]
|
||||||
|
|
||||||
return text_content, get_job_type(soup)
|
return description, get_job_type(soup)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_enum_from_value(value_str):
|
|
||||||
for job_type in JobType:
|
|
||||||
if value_str in job_type.value:
|
|
||||||
return job_type
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
def get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||||
"""
|
"""
|
||||||
|
|||||||
63
src/jobspy/scrapers/utils.py
Normal file
63
src/jobspy/scrapers/utils.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import tls_client
|
||||||
|
from ..jobs import JobType
|
||||||
|
|
||||||
|
|
||||||
|
def count_urgent_words(description: str) -> int:
|
||||||
|
"""
|
||||||
|
Count the number of urgent words or phrases in a job description.
|
||||||
|
"""
|
||||||
|
urgent_patterns = re.compile(
|
||||||
|
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
matches = re.findall(urgent_patterns, description)
|
||||||
|
count = len(matches)
|
||||||
|
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||||
|
if not text:
|
||||||
|
return None
|
||||||
|
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||||
|
return email_regex.findall(text)
|
||||||
|
|
||||||
|
|
||||||
|
def create_session(proxy: dict | None = None, is_tls: bool = True):
|
||||||
|
"""
|
||||||
|
Creates a tls client session
|
||||||
|
|
||||||
|
:return: A session object with or without proxies.
|
||||||
|
"""
|
||||||
|
if is_tls:
|
||||||
|
session = tls_client.Session(
|
||||||
|
client_identifier="chrome112",
|
||||||
|
random_tls_extension_order=True,
|
||||||
|
)
|
||||||
|
session.proxies = proxy
|
||||||
|
# TODO multiple proxies
|
||||||
|
# if self.proxies:
|
||||||
|
# session.proxies = {
|
||||||
|
# "http": random.choice(self.proxies),
|
||||||
|
# "https": random.choice(self.proxies),
|
||||||
|
# }
|
||||||
|
else:
|
||||||
|
session = requests.Session()
|
||||||
|
session.allow_redirects = True
|
||||||
|
session.proxies.update(proxy)
|
||||||
|
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||||
|
"""
|
||||||
|
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||||
|
"""
|
||||||
|
res = None
|
||||||
|
for job_type in JobType:
|
||||||
|
if job_type_str in job_type.value:
|
||||||
|
res = job_type
|
||||||
|
return res
|
||||||
@@ -5,30 +5,18 @@ jobspy.scrapers.ziprecruiter
|
|||||||
This module contains routines to scrape ZipRecruiter.
|
This module contains routines to scrape ZipRecruiter.
|
||||||
"""
|
"""
|
||||||
import math
|
import math
|
||||||
import json
|
import time
|
||||||
import re
|
import re
|
||||||
import traceback
|
from datetime import datetime, date
|
||||||
from datetime import datetime
|
from typing import Optional, Tuple, Any
|
||||||
from typing import Optional, Tuple
|
|
||||||
from urllib.parse import urlparse, parse_qs
|
|
||||||
|
|
||||||
import tls_client
|
|
||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from bs4.element import Tag
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..exceptions import ZipRecruiterException
|
from ..exceptions import ZipRecruiterException
|
||||||
from ...jobs import (
|
from ..utils import count_urgent_words, extract_emails_from_text, create_session
|
||||||
JobPost,
|
from ...jobs import JobPost, Compensation, Location, JobResponse, JobType
|
||||||
Compensation,
|
|
||||||
CompensationInterval,
|
|
||||||
Location,
|
|
||||||
JobResponse,
|
|
||||||
JobType,
|
|
||||||
Country,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ZipRecruiterScraper(Scraper):
|
class ZipRecruiterScraper(Scraper):
|
||||||
@@ -42,29 +30,24 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
|
|
||||||
self.jobs_per_page = 20
|
self.jobs_per_page = 20
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
self.session = tls_client.Session(
|
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def find_jobs_in_page(
|
def find_jobs_in_page(self, scraper_input: ScraperInput, continue_token: str | None = None) -> Tuple[list[JobPost], Optional[str]]:
|
||||||
self, scraper_input: ScraperInput, page: int
|
|
||||||
) -> tuple[list[JobPost], int | None]:
|
|
||||||
"""
|
"""
|
||||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:param page:
|
:param continue_token:
|
||||||
:param session:
|
:return: jobs found on page
|
||||||
:return: jobs found on page, total number of jobs found for search
|
|
||||||
"""
|
"""
|
||||||
job_list: list[JobPost] = []
|
params = self.add_params(scraper_input)
|
||||||
|
if continue_token:
|
||||||
|
params['continue'] = continue_token
|
||||||
try:
|
try:
|
||||||
response = self.session.get(
|
session = create_session(self.proxy, is_tls=False)
|
||||||
self.url + "/jobs-search",
|
response = session.get(
|
||||||
headers=ZipRecruiterScraper.headers(),
|
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||||
params=ZipRecruiterScraper.add_params(scraper_input, page),
|
headers=self.headers(),
|
||||||
allow_redirects=True,
|
params=self.add_params(scraper_input),
|
||||||
proxy=self.proxy,
|
timeout=10,
|
||||||
timeout_seconds=10,
|
|
||||||
)
|
)
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise ZipRecruiterException(
|
raise ZipRecruiterException(
|
||||||
@@ -74,194 +57,66 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
if "Proxy responded with non 200 code" in str(e):
|
if "Proxy responded with non 200 code" in str(e):
|
||||||
raise ZipRecruiterException("bad proxy")
|
raise ZipRecruiterException("bad proxy")
|
||||||
raise ZipRecruiterException(str(e))
|
raise ZipRecruiterException(str(e))
|
||||||
else:
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
|
||||||
js_tag = soup.find("script", {"id": "js_variables"})
|
|
||||||
|
|
||||||
if js_tag:
|
time.sleep(5)
|
||||||
page_json = json.loads(js_tag.string)
|
response_data = response.json()
|
||||||
jobs_list = page_json.get("jobList")
|
jobs_list = response_data.get("jobs", [])
|
||||||
if jobs_list:
|
next_continue_token = response_data.get('continue', None)
|
||||||
page_variant = "javascript"
|
|
||||||
# print('type javascript', len(jobs_list))
|
|
||||||
else:
|
|
||||||
page_variant = "html_2"
|
|
||||||
jobs_list = soup.find_all("div", {"class": "job_content"})
|
|
||||||
# print('type 2 html', len(jobs_list))
|
|
||||||
else:
|
|
||||||
page_variant = "html_1"
|
|
||||||
jobs_list = soup.find_all("li", {"class": "job-listing"})
|
|
||||||
# print('type 1 html', len(jobs_list))
|
|
||||||
# with open("zip_method_8.html", "w") as f:
|
|
||||||
# f.write(soup.prettify())
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||||
if page_variant == "javascript":
|
job_results = [
|
||||||
job_results = [
|
executor.submit(self.process_job, job)
|
||||||
executor.submit(self.process_job_javascript, job)
|
for job in jobs_list
|
||||||
for job in jobs_list
|
]
|
||||||
]
|
|
||||||
elif page_variant == "html_1":
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html_1, job) for job in jobs_list
|
|
||||||
]
|
|
||||||
elif page_variant == "html_2":
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html_2, job) for job in jobs_list
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
job_list = [result.result() for result in job_results if result.result()]
|
||||||
return job_list
|
return job_list, next_continue_token
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||||
:param scraper_input:
|
:param scraper_input: Information about job search criteria.
|
||||||
:return: job_response
|
:return: JobResponse containing a list of jobs.
|
||||||
"""
|
"""
|
||||||
#: get first page to initialize session
|
job_list: list[JobPost] = []
|
||||||
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, 1)
|
continue_token = None
|
||||||
pages_to_process = max(
|
|
||||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
|
||||||
)
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.find_jobs_in_page, scraper_input, page)
|
|
||||||
for page in range(2, pages_to_process + 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
for page in range(1, max_pages + 1):
|
||||||
jobs = future.result()
|
if len(job_list) >= scraper_input.results_wanted:
|
||||||
|
break
|
||||||
|
|
||||||
job_list += jobs
|
jobs_on_page, continue_token = self.find_jobs_in_page(scraper_input, continue_token)
|
||||||
|
if jobs_on_page:
|
||||||
|
job_list.extend(jobs_on_page)
|
||||||
|
|
||||||
|
if not continue_token:
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(job_list) > scraper_input.results_wanted:
|
||||||
|
job_list = job_list[:scraper_input.results_wanted]
|
||||||
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
|
||||||
return JobResponse(jobs=job_list)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
|
@staticmethod
|
||||||
"""
|
def process_job(job: dict) -> JobPost:
|
||||||
Parses a job from the job content tag
|
""" Processes an individual job dict from the response """
|
||||||
:param job: BeautifulSoup Tag for one job post
|
title = job.get("name")
|
||||||
:return JobPost
|
job_url = job.get("job_url")
|
||||||
"""
|
|
||||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
title = job.find("h2", {"class": "title"}).text
|
description = BeautifulSoup(
|
||||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
job.get("job_description", "").strip(), "html.parser"
|
||||||
|
).get_text()
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
company = job['hiring_company'].get("name") if "hiring_company" in job else None
|
||||||
job_url = updated_job_url if updated_job_url else job_url
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
|
||||||
|
|
||||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
|
||||||
job_type = None
|
|
||||||
if job_type_element:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
|
|
||||||
)
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
|
||||||
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
|
|
||||||
"""
|
|
||||||
Parses a job from the job content tag for a second variat of HTML that ZR uses
|
|
||||||
:param job: BeautifulSoup Tag for one job post
|
|
||||||
:return JobPost
|
|
||||||
"""
|
|
||||||
job_url = job.find("a", class_="job_link")["href"]
|
|
||||||
title = job.find("h2", class_="title").text
|
|
||||||
company = job.find("a", class_="company_name").text.strip()
|
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
job_url = updated_job_url if updated_job_url else job_url
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", class_="job_snippet").get_text().strip()
|
|
||||||
|
|
||||||
job_type_text = job.find("li", class_="perk_item perk_type")
|
|
||||||
job_type = None
|
|
||||||
if job_type_text:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_text.get_text()
|
|
||||||
.strip()
|
|
||||||
.lower()
|
|
||||||
.replace("-", "")
|
|
||||||
.replace(" ", "")
|
|
||||||
)
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
def process_job_javascript(self, job: dict) -> JobPost:
|
|
||||||
title = job.get("Title")
|
|
||||||
job_url = job.get("JobURL")
|
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
job_url = updated_job_url if updated_job_url else job_url
|
|
||||||
if description is None:
|
|
||||||
description = BeautifulSoup(
|
|
||||||
job.get("Snippet", "").strip(), "html.parser"
|
|
||||||
).get_text()
|
|
||||||
|
|
||||||
company = job.get("OrgName")
|
|
||||||
location = Location(
|
location = Location(
|
||||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
city=job.get("job_city"), state=job.get("job_state"), country='usa' if job.get("job_country") == 'US' else 'canada'
|
||||||
)
|
)
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||||
job.get("EmploymentType", "").replace("-", "").lower()
|
job.get("employment_type", "").replace("_", "").lower()
|
||||||
)
|
)
|
||||||
|
|
||||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
|
||||||
salary_parts = formatted_salary.split(" ")
|
|
||||||
|
|
||||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
|
||||||
if "." in min_salary_str:
|
|
||||||
min_amount = int(float(min_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
min_amount = int(min_salary_str.replace("K", "000"))
|
|
||||||
|
|
||||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
|
||||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
|
||||||
if "." in max_salary_str:
|
|
||||||
max_amount = int(float(max_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
max_amount = int(max_salary_str.replace("K", "000"))
|
|
||||||
else:
|
|
||||||
max_amount = 0
|
|
||||||
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=CompensationInterval.YEARLY,
|
|
||||||
min_amount=min_amount,
|
|
||||||
max_amount=max_amount,
|
|
||||||
currency="USD/CAD",
|
|
||||||
)
|
|
||||||
save_job_url = job.get("SaveJobURL", "")
|
save_job_url = job.get("SaveJobURL", "")
|
||||||
posted_time_match = re.search(
|
posted_time_match = re.search(
|
||||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||||
@@ -272,62 +127,37 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
date_posted = date_posted_obj.date()
|
date_posted = date_posted_obj.date()
|
||||||
else:
|
else:
|
||||||
date_posted = date.today()
|
date_posted = date.today()
|
||||||
job_url = job.get("JobURL")
|
|
||||||
|
|
||||||
return JobPost(
|
return JobPost(
|
||||||
title=title,
|
title=title,
|
||||||
description=description,
|
|
||||||
company_name=company,
|
company_name=company,
|
||||||
location=location,
|
location=location,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=compensation,
|
compensation=Compensation(
|
||||||
|
interval="yearly" if job.get("compensation_interval") == "annual" else job.get("compensation_interval"),
|
||||||
|
min_amount=int(job["compensation_min"]) if "compensation_min" in job else None,
|
||||||
|
max_amount=int(job["compensation_max"]) if "compensation_max" in job else None,
|
||||||
|
currency=job.get("compensation_currency"),
|
||||||
|
),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
|
description=description,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
num_urgent_words=count_urgent_words(description) if description else None,
|
||||||
)
|
)
|
||||||
return job_post
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
|
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
for job_type in JobType:
|
for job_type in JobType:
|
||||||
if job_type_str in job_type.value:
|
if job_type_str in job_type.value:
|
||||||
a = True
|
return [job_type]
|
||||||
return job_type
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_description(self, job_page_url: str) -> Tuple[Optional[str], Optional[str]]:
|
|
||||||
"""
|
|
||||||
Retrieves job description by going to the job page url
|
|
||||||
:param job_page_url:
|
|
||||||
:param session:
|
|
||||||
:return: description or None, response url
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
response = requests.get(
|
|
||||||
job_page_url,
|
|
||||||
headers=ZipRecruiterScraper.headers(),
|
|
||||||
allow_redirects=True,
|
|
||||||
timeout=5,
|
|
||||||
proxies=self.proxy,
|
|
||||||
)
|
|
||||||
if response.status_code not in range(200, 400):
|
|
||||||
return None, None
|
|
||||||
except Exception as e:
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
html_string = response.content
|
|
||||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
|
||||||
if job_description_div:
|
|
||||||
return job_description_div.text.strip(), response.url
|
|
||||||
return None, response.url
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_params(scraper_input, page) -> Optional[str]:
|
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||||
params = {
|
params = {
|
||||||
"search": scraper_input.search_term,
|
"search": scraper_input.search_term,
|
||||||
"location": scraper_input.location,
|
"location": scraper_input.location,
|
||||||
"page": page,
|
|
||||||
"form": "jobs-landing",
|
"form": "jobs-landing",
|
||||||
}
|
}
|
||||||
job_type_value = None
|
job_type_value = None
|
||||||
@@ -352,107 +182,6 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_interval(interval_str: str):
|
|
||||||
"""
|
|
||||||
Maps the interval alias to its appropriate CompensationInterval.
|
|
||||||
:param interval_str
|
|
||||||
:return: CompensationInterval
|
|
||||||
"""
|
|
||||||
interval_alias = {"annually": CompensationInterval.YEARLY}
|
|
||||||
interval_str = interval_str.lower()
|
|
||||||
|
|
||||||
if interval_str in interval_alias:
|
|
||||||
return interval_alias[interval_str]
|
|
||||||
|
|
||||||
return CompensationInterval(interval_str)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_date_posted(job: BeautifulSoup) -> Optional[datetime.date]:
|
|
||||||
"""
|
|
||||||
Extracts the date a job was posted
|
|
||||||
:param job
|
|
||||||
:return: date the job was posted or None
|
|
||||||
"""
|
|
||||||
button = job.find(
|
|
||||||
"button", {"class": "action_input save_job zrs_btn_secondary_200"}
|
|
||||||
)
|
|
||||||
if not button:
|
|
||||||
return None
|
|
||||||
|
|
||||||
url_time = button.get("data-href", "")
|
|
||||||
url_components = urlparse(url_time)
|
|
||||||
params = parse_qs(url_components.query)
|
|
||||||
posted_time_str = params.get("posted_time", [None])[0]
|
|
||||||
|
|
||||||
if posted_time_str:
|
|
||||||
posted_date = datetime.strptime(
|
|
||||||
posted_time_str, "%Y-%m-%dT%H:%M:%SZ"
|
|
||||||
).date()
|
|
||||||
return posted_date
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_compensation(job: BeautifulSoup) -> Optional[Compensation]:
|
|
||||||
"""
|
|
||||||
Parses the compensation tag from the job BeautifulSoup object
|
|
||||||
:param job
|
|
||||||
:return: Compensation object or None
|
|
||||||
"""
|
|
||||||
pay_element = job.find("li", {"class": "perk_item perk_pay"})
|
|
||||||
if pay_element is None:
|
|
||||||
return None
|
|
||||||
pay = pay_element.find("div", {"class": "value"}).find("span").text.strip()
|
|
||||||
|
|
||||||
def create_compensation_object(pay_string: str) -> Compensation:
|
|
||||||
"""
|
|
||||||
Creates a Compensation object from a pay_string
|
|
||||||
:param pay_string
|
|
||||||
:return: compensation
|
|
||||||
"""
|
|
||||||
interval = ZipRecruiterScraper.get_interval(pay_string.split()[-1])
|
|
||||||
|
|
||||||
amounts = []
|
|
||||||
for amount in pay_string.split("to"):
|
|
||||||
amount = amount.replace(",", "").strip("$ ").split(" ")[0]
|
|
||||||
if "K" in amount:
|
|
||||||
amount = amount.replace("K", "")
|
|
||||||
amount = int(float(amount)) * 1000
|
|
||||||
else:
|
|
||||||
amount = int(float(amount))
|
|
||||||
amounts.append(amount)
|
|
||||||
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=interval,
|
|
||||||
min_amount=min(amounts),
|
|
||||||
max_amount=max(amounts),
|
|
||||||
currency="USD/CAD",
|
|
||||||
)
|
|
||||||
|
|
||||||
return compensation
|
|
||||||
|
|
||||||
return create_compensation_object(pay)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_location(job: BeautifulSoup) -> Location:
|
|
||||||
"""
|
|
||||||
Extracts the job location from BeatifulSoup object
|
|
||||||
:param job:
|
|
||||||
:return: location
|
|
||||||
"""
|
|
||||||
location_link = job.find("a", {"class": "company_location"})
|
|
||||||
if location_link is not None:
|
|
||||||
location_string = location_link.text.strip()
|
|
||||||
parts = location_string.split(", ")
|
|
||||||
if len(parts) == 2:
|
|
||||||
city, state = parts
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
return Location(city=city, state=state, country=Country.US_CANADA)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def headers() -> dict:
|
def headers() -> dict:
|
||||||
"""
|
"""
|
||||||
@@ -460,5 +189,13 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
:return: dict - Dictionary containing headers
|
:return: dict - Dictionary containing headers
|
||||||
"""
|
"""
|
||||||
return {
|
return {
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
'Host': 'api.ziprecruiter.com',
|
||||||
|
'Cookie': 'ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38',
|
||||||
|
'accept': '*/*',
|
||||||
|
'x-zr-zva-override': '100000000;vid:ZT1huzm_EQlDTVEc',
|
||||||
|
'x-pushnotificationid': '0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0',
|
||||||
|
'x-deviceid': 'D77B3A92-E589-46A4-8A39-6EF6F1D86006',
|
||||||
|
'user-agent': 'Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)',
|
||||||
|
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
|
||||||
|
'accept-language': 'en-US,en;q=0.9'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from ..jobspy import scrape_jobs
|
from ..jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def test_all():
|
def test_all():
|
||||||
@@ -7,4 +8,7 @@ def test_all():
|
|||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
results_wanted=5,
|
results_wanted=5,
|
||||||
)
|
)
|
||||||
assert result is not None and result.errors.empty is True
|
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and not result.empty
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from ..jobspy import scrape_jobs
|
from ..jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def test_indeed():
|
def test_indeed():
|
||||||
@@ -6,4 +7,6 @@ def test_indeed():
|
|||||||
site_name="indeed",
|
site_name="indeed",
|
||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
)
|
)
|
||||||
assert result is not None and result.errors.empty is True
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and not result.empty
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from ..jobspy import scrape_jobs
|
from ..jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def test_linkedin():
|
def test_linkedin():
|
||||||
@@ -6,4 +7,6 @@ def test_linkedin():
|
|||||||
site_name="linkedin",
|
site_name="linkedin",
|
||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
)
|
)
|
||||||
assert result is not None and result.errors.empty is True
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and not result.empty
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from ..jobspy import scrape_jobs
|
from ..jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def test_ziprecruiter():
|
def test_ziprecruiter():
|
||||||
@@ -7,4 +8,6 @@ def test_ziprecruiter():
|
|||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
)
|
)
|
||||||
|
|
||||||
assert result is not None and result.errors.empty is True
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and not result.empty
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
Reference in New Issue
Block a user