mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 12:04:33 -08:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8570c0651e | ||
|
|
8678b0bbe4 | ||
|
|
60d4d911c9 | ||
|
|
2a0cba8c7e | ||
|
|
de70189fa2 | ||
|
|
b55c0eb86d | ||
|
|
88c95c4ad5 | ||
|
|
d8d33d602f | ||
|
|
6330c14879 | ||
|
|
48631ea271 | ||
|
|
edffe18e65 | ||
|
|
0988230a24 | ||
|
|
d000a81eb3 | ||
|
|
ccb0c17660 | ||
|
|
df339610fa | ||
|
|
c501006bd8 | ||
|
|
89a3ee231c | ||
|
|
6439f71433 | ||
|
|
7f6271b2e0 | ||
|
|
5cb7ffe5fd | ||
|
|
cd29f79796 | ||
|
|
65d2e5e707 | ||
|
|
08d63a87a2 | ||
|
|
1ffdb1756f | ||
|
|
1185693422 | ||
|
|
dcd7144318 | ||
|
|
bf73c061bd | ||
|
|
8dd08ed9fd | ||
|
|
5d3df732e6 | ||
|
|
86f858e06d | ||
|
|
1089d1f0a5 | ||
|
|
3e93454738 | ||
|
|
0d150d519f | ||
|
|
cc3497f929 | ||
|
|
5986f75346 | ||
|
|
4b7bdb9313 | ||
|
|
80213f28d2 | ||
|
|
ada38532c3 |
22
.github/workflows/python-test.yml
vendored
Normal file
22
.github/workflows/python-test.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
name: Python Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install poetry
|
||||||
|
poetry install
|
||||||
|
- name: Run tests
|
||||||
|
run: poetry run pytest src/tests/
|
||||||
164
README.md
164
README.md
@@ -11,10 +11,7 @@ work with us.*
|
|||||||
|
|
||||||
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||||
- Aggregates the job postings in a Pandas DataFrame
|
- Aggregates the job postings in a Pandas DataFrame
|
||||||
- Proxy support
|
- Proxies support
|
||||||
|
|
||||||
[Video Guide for JobSpy](https://www.youtube.com/watch?v=RuP1HrAZnxs&pp=ygUgam9icyBzY3JhcGVyIGJvdCBsaW5rZWRpbiBpbmRlZWQ%3D) -
|
|
||||||
Updated for release v1.1.3
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -38,17 +35,21 @@ jobs = scrape_jobs(
|
|||||||
location="Dallas, TX",
|
location="Dallas, TX",
|
||||||
results_wanted=20,
|
results_wanted=20,
|
||||||
hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
|
hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
|
||||||
country_indeed='USA' # only needed for indeed / glassdoor
|
country_indeed='USA', # only needed for indeed / glassdoor
|
||||||
|
|
||||||
|
# linkedin_fetch_description=True # get full description , direct job url , company industry and job level (seniority level) for linkedin (slower)
|
||||||
|
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||||
|
|
||||||
)
|
)
|
||||||
print(f"Found {len(jobs)} jobs")
|
print(f"Found {len(jobs)} jobs")
|
||||||
print(jobs.head())
|
print(jobs.head())
|
||||||
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_xlsx
|
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_excel
|
||||||
```
|
```
|
||||||
|
|
||||||
### Output
|
### Output
|
||||||
|
|
||||||
```
|
```
|
||||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||||
@@ -60,66 +61,121 @@ zip_recruiter Software Developer TEKsystems Phoenix
|
|||||||
### Parameters for `scrape_jobs()`
|
### Parameters for `scrape_jobs()`
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
Required
|
|
||||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
|
||||||
└── search_term (str)
|
|
||||||
Optional
|
Optional
|
||||||
|
├── site_name (list|str):
|
||||||
|
| linkedin, zip_recruiter, indeed, glassdoor
|
||||||
|
| (default is all four)
|
||||||
|
│
|
||||||
|
├── search_term (str)
|
||||||
|
│
|
||||||
├── location (str)
|
├── location (str)
|
||||||
├── distance (int): in miles, default 50
|
│
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
├── distance (int):
|
||||||
├── proxy (str): in format 'http://user:pass@host:port'
|
| in miles, default 50
|
||||||
|
│
|
||||||
|
├── job_type (str):
|
||||||
|
| fulltime, parttime, internship, contract
|
||||||
|
│
|
||||||
|
├── proxies (list):
|
||||||
|
| in format ['user:pass@host:port', 'localhost']
|
||||||
|
| each job board scraper will round robin through the proxies
|
||||||
|
│
|
||||||
├── is_remote (bool)
|
├── is_remote (bool)
|
||||||
├── linkedin_fetch_description (bool): fetches full description for LinkedIn (slower)
|
│
|
||||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
├── results_wanted (int):
|
||||||
├── easy_apply (bool): filters for jobs that are hosted on the job board site (not supported on Indeed)
|
| number of job results to retrieve for each site specified in 'site_name'
|
||||||
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids
|
│
|
||||||
├── description_format (enum): markdown, html (format type of the job descriptions)
|
├── easy_apply (bool):
|
||||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
| filters for jobs that are hosted on the job board site
|
||||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
│
|
||||||
├── hours_old (int): filters jobs by the number of hours since the job was posted (ZipRecruiter and Glassdoor round up to next day. If you use this on Indeed, it will not filter by job_type or is_remote)
|
├── description_format (str):
|
||||||
|
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||||
|
│
|
||||||
|
├── offset (int):
|
||||||
|
| starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||||
|
│
|
||||||
|
├── hours_old (int):
|
||||||
|
| filters jobs by the number of hours since the job was posted
|
||||||
|
| (ZipRecruiter and Glassdoor round up to next day.)
|
||||||
|
│
|
||||||
|
├── verbose (int) {0, 1, 2}:
|
||||||
|
| Controls the verbosity of the runtime printouts
|
||||||
|
| (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||||
|
|
||||||
|
├── linkedin_fetch_description (bool):
|
||||||
|
| fetches full description and direct job url for LinkedIn (Increases requests by O(n))
|
||||||
|
│
|
||||||
|
├── linkedin_company_ids (list[int]):
|
||||||
|
| searches for linkedin jobs with specific company ids
|
||||||
|
|
|
||||||
|
├── country_indeed (str):
|
||||||
|
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||||
|
|
|
||||||
|
├── enforce_annual_salary (bool):
|
||||||
|
| converts wages to annual salary
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
├── Indeed limitations:
|
||||||
|
| Only one from this list can be used in a search:
|
||||||
|
| - hours_old
|
||||||
|
| - job_type & is_remote
|
||||||
|
| - easy_apply
|
||||||
|
│
|
||||||
|
└── LinkedIn limitations:
|
||||||
|
| Only one from this list can be used in a search:
|
||||||
|
| - hours_old
|
||||||
|
| - easy_apply
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### JobPost Schema
|
### JobPost Schema
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
JobPost
|
JobPost
|
||||||
├── title (str)
|
├── title
|
||||||
├── company (str)
|
├── company
|
||||||
├── company_url (str)
|
├── company_url
|
||||||
├── job_url (str)
|
├── job_url
|
||||||
├── location (object)
|
├── location
|
||||||
│ ├── country (str)
|
│ ├── country
|
||||||
│ ├── city (str)
|
│ ├── city
|
||||||
│ ├── state (str)
|
│ ├── state
|
||||||
├── description (str)
|
├── description
|
||||||
├── job_type (str): fulltime, parttime, internship, contract
|
├── job_type: fulltime, parttime, internship, contract
|
||||||
├── compensation (object)
|
├── job_function
|
||||||
│ ├── interval (str): yearly, monthly, weekly, daily, hourly
|
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||||
│ ├── min_amount (int)
|
│ ├── min_amount
|
||||||
│ ├── max_amount (int)
|
│ ├── max_amount
|
||||||
│ └── currency (enum)
|
│ ├── currency
|
||||||
└── date_posted (date)
|
│ └── salary_source: direct_data, description (parsed from posting)
|
||||||
└── emails (str)
|
├── date_posted
|
||||||
└── is_remote (bool)
|
├── emails
|
||||||
|
└── is_remote
|
||||||
|
|
||||||
|
Linkedin specific
|
||||||
|
└── job_level
|
||||||
|
|
||||||
|
Linkedin & Indeed specific
|
||||||
|
└── company_industry
|
||||||
|
|
||||||
Indeed specific
|
Indeed specific
|
||||||
├── company_country (str)
|
├── company_country
|
||||||
└── company_addresses (str)
|
├── company_addresses
|
||||||
└── company_industry (str)
|
├── company_employees_label
|
||||||
└── company_employees_label (str)
|
├── company_revenue_label
|
||||||
└── company_revenue_label (str)
|
├── company_description
|
||||||
└── company_description (str)
|
├── ceo_name
|
||||||
└── ceo_name (str)
|
├── ceo_photo_url
|
||||||
└── ceo_photo_url (str)
|
├── logo_photo_url
|
||||||
└── logo_photo_url (str)
|
└── banner_photo_url
|
||||||
└── banner_photo_url (str)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Supported Countries for Job Searching
|
## Supported Countries for Job Searching
|
||||||
|
|
||||||
### **LinkedIn**
|
### **LinkedIn**
|
||||||
|
|
||||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we are using
|
LinkedIn searches globally & uses only the `location` parameter.
|
||||||
|
|
||||||
### **ZipRecruiter**
|
### **ZipRecruiter**
|
||||||
|
|
||||||
@@ -154,8 +210,8 @@ You can specify the following countries when searching on Indeed (use the exact
|
|||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
* Indeed is the best scraper currently with no rate limiting.
|
* Indeed is the best scraper currently with no rate limiting.
|
||||||
* Glassdoor/Ziprecruiter can only fetch 900/1000 jobs from the endpoints we are using on a given search.
|
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||||
* LinkedIn is the most restrictive and usually rate limits around the 10th page.
|
* LinkedIn is the most restrictive and usually rate limits around the 10th page with one ip. Proxies are a must basically.
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
@@ -170,7 +226,7 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
|||||||
**Q: Received a response code 429?**
|
**Q: Received a response code 429?**
|
||||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||||
|
|
||||||
- Waiting some time between scrapes (site-dependent).
|
- Wait some time between scrapes (site-dependent).
|
||||||
- Trying a VPN or proxy to change your IP address.
|
- Try using the proxies param to change your IP address.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
jobs: pd.DataFrame = scrape_jobs(
|
|
||||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
|
||||||
search_term="software engineer",
|
|
||||||
location="Dallas, TX",
|
|
||||||
results_wanted=25, # be wary the higher it is, the more likey you'll get blocked (rotating proxy can help tho)
|
|
||||||
country_indeed="USA",
|
|
||||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
|
||||||
)
|
|
||||||
|
|
||||||
# formatting for pandas
|
|
||||||
pd.set_option("display.max_columns", None)
|
|
||||||
pd.set_option("display.max_rows", None)
|
|
||||||
pd.set_option("display.width", None)
|
|
||||||
pd.set_option("display.max_colwidth", 50) # set to 0 to see full job url / desc
|
|
||||||
|
|
||||||
# 1: output to console
|
|
||||||
print(jobs)
|
|
||||||
|
|
||||||
# 2: output to .csv
|
|
||||||
jobs.to_csv("./jobs.csv", index=False)
|
|
||||||
print("outputted to jobs.csv")
|
|
||||||
|
|
||||||
# 3: output to .xlsx
|
|
||||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
|
||||||
|
|
||||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
|
||||||
# display(jobs)
|
|
||||||
@@ -1,167 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from jobspy import scrape_jobs\n",
|
|
||||||
"import pandas as pd\n",
|
|
||||||
"from IPython.display import display, HTML"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"pd.set_option('display.max_columns', None)\n",
|
|
||||||
"pd.set_option('display.max_rows', None)\n",
|
|
||||||
"pd.set_option('display.width', None)\n",
|
|
||||||
"pd.set_option('display.max_colwidth', 50)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# example 1 (no hyperlinks, USA)\n",
|
|
||||||
"jobs = scrape_jobs(\n",
|
|
||||||
" site_name=[\"linkedin\"],\n",
|
|
||||||
" location='san francisco',\n",
|
|
||||||
" search_term=\"engineer\",\n",
|
|
||||||
" results_wanted=5,\n",
|
|
||||||
"\n",
|
|
||||||
" # use if you want to use a proxy\n",
|
|
||||||
" # proxy=\"socks5://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
|
||||||
" proxy=\"http://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
|
||||||
" #proxy=\"https://jobspy:5a4vpWtj4EeJ2hoYzk@us.smartproxy.com:10001\",\n",
|
|
||||||
")\n",
|
|
||||||
"display(jobs)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "6a581b2d-f7da-4fac-868d-9efe143ee20a",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# example 2 - remote USA & hyperlinks\n",
|
|
||||||
"jobs = scrape_jobs(\n",
|
|
||||||
" site_name=[\"linkedin\", \"zip_recruiter\", \"indeed\"],\n",
|
|
||||||
" # location='san francisco',\n",
|
|
||||||
" search_term=\"software engineer\",\n",
|
|
||||||
" country_indeed=\"USA\",\n",
|
|
||||||
" hyperlinks=True,\n",
|
|
||||||
" is_remote=True,\n",
|
|
||||||
" results_wanted=5, \n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "fe8289bc-5b64-4202-9a64-7c117c83fd9a",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# use if hyperlinks=True\n",
|
|
||||||
"html = jobs.to_html(escape=False)\n",
|
|
||||||
"# change max-width: 200px to show more or less of the content\n",
|
|
||||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
|
||||||
"display(HTML(truncate_width))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "951c2fe1-52ff-407d-8bb1-068049b36777",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# example 3 - with hyperlinks, international - linkedin (no zip_recruiter)\n",
|
|
||||||
"jobs = scrape_jobs(\n",
|
|
||||||
" site_name=[\"linkedin\"],\n",
|
|
||||||
" location='berlin',\n",
|
|
||||||
" search_term=\"engineer\",\n",
|
|
||||||
" hyperlinks=True,\n",
|
|
||||||
" results_wanted=5,\n",
|
|
||||||
" easy_apply=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "1e37a521-caef-441c-8fc2-2eb5b2e7da62",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# use if hyperlinks=True\n",
|
|
||||||
"html = jobs.to_html(escape=False)\n",
|
|
||||||
"# change max-width: 200px to show more or less of the content\n",
|
|
||||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
|
||||||
"display(HTML(truncate_width))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "0650e608-0b58-4bf5-ae86-68348035b16a",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# example 4 - international indeed (no zip_recruiter)\n",
|
|
||||||
"jobs = scrape_jobs(\n",
|
|
||||||
" site_name=[\"indeed\"],\n",
|
|
||||||
" search_term=\"engineer\",\n",
|
|
||||||
" country_indeed = \"China\",\n",
|
|
||||||
" hyperlinks=True\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "40913ac8-3f8a-4d7e-ac47-afb88316432b",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# use if hyperlinks=True\n",
|
|
||||||
"html = jobs.to_html(escape=False)\n",
|
|
||||||
"# change max-width: 200px to show more or less of the content\n",
|
|
||||||
"truncate_width = f'<style>.dataframe td {{ max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }}</style>{html}'\n",
|
|
||||||
"display(HTML(truncate_width))"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3 (ipykernel)",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.11.5"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 5
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
import pandas as pd
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
# creates csv a new filename if the jobs.csv already exists.
|
|
||||||
csv_filename = "jobs.csv"
|
|
||||||
counter = 1
|
|
||||||
while os.path.exists(csv_filename):
|
|
||||||
csv_filename = f"jobs_{counter}.csv"
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
# results wanted and offset
|
|
||||||
results_wanted = 1000
|
|
||||||
offset = 0
|
|
||||||
|
|
||||||
all_jobs = []
|
|
||||||
|
|
||||||
# max retries
|
|
||||||
max_retries = 3
|
|
||||||
|
|
||||||
# nuumber of results at each iteration
|
|
||||||
results_in_each_iteration = 30
|
|
||||||
|
|
||||||
while len(all_jobs) < results_wanted:
|
|
||||||
retry_count = 0
|
|
||||||
while retry_count < max_retries:
|
|
||||||
print("Doing from", offset, "to", offset + results_in_each_iteration, "jobs")
|
|
||||||
try:
|
|
||||||
jobs = scrape_jobs(
|
|
||||||
site_name=["indeed"],
|
|
||||||
search_term="software engineer",
|
|
||||||
# New York, NY
|
|
||||||
# Dallas, TX
|
|
||||||
|
|
||||||
# Los Angeles, CA
|
|
||||||
location="Los Angeles, CA",
|
|
||||||
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
|
|
||||||
country_indeed="USA",
|
|
||||||
offset=offset,
|
|
||||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add the scraped jobs to the list
|
|
||||||
all_jobs.extend(jobs.to_dict('records'))
|
|
||||||
|
|
||||||
# Increment the offset for the next page of results
|
|
||||||
offset += results_in_each_iteration
|
|
||||||
|
|
||||||
# Add a delay to avoid rate limiting (you can adjust the delay time as needed)
|
|
||||||
print(f"Scraped {len(all_jobs)} jobs")
|
|
||||||
print("Sleeping secs", 100 * (retry_count + 1))
|
|
||||||
time.sleep(100 * (retry_count + 1)) # Sleep for 2 seconds between requests
|
|
||||||
|
|
||||||
break # Break out of the retry loop if successful
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error: {e}")
|
|
||||||
retry_count += 1
|
|
||||||
print("Sleeping secs before retry", 100 * (retry_count + 1))
|
|
||||||
time.sleep(100 * (retry_count + 1))
|
|
||||||
if retry_count >= max_retries:
|
|
||||||
print("Max retries reached. Exiting.")
|
|
||||||
break
|
|
||||||
|
|
||||||
# DataFrame from the collected job data
|
|
||||||
jobs_df = pd.DataFrame(all_jobs)
|
|
||||||
|
|
||||||
# Formatting
|
|
||||||
pd.set_option("display.max_columns", None)
|
|
||||||
pd.set_option("display.max_rows", None)
|
|
||||||
pd.set_option("display.width", None)
|
|
||||||
pd.set_option("display.max_colwidth", 50)
|
|
||||||
|
|
||||||
print(jobs_df)
|
|
||||||
|
|
||||||
jobs_df.to_csv(csv_filename, index=False)
|
|
||||||
print(f"Outputted to {csv_filename}")
|
|
||||||
2234
poetry.lock
generated
2234
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
2
poetry.toml
Normal file
2
poetry.toml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[virtualenvs]
|
||||||
|
in-project = true
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.1.49"
|
version = "1.1.61"
|
||||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||||
homepage = "https://github.com/Bunsly/JobSpy"
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
@@ -15,17 +15,18 @@ python = "^3.10"
|
|||||||
requests = "^2.31.0"
|
requests = "^2.31.0"
|
||||||
beautifulsoup4 = "^4.12.2"
|
beautifulsoup4 = "^4.12.2"
|
||||||
pandas = "^2.1.0"
|
pandas = "^2.1.0"
|
||||||
NUMPY = "1.24.2"
|
NUMPY = "1.26.3"
|
||||||
pydantic = "^2.3.0"
|
pydantic = "^2.3.0"
|
||||||
tls-client = "^1.0.1"
|
tls-client = "^1.0.1"
|
||||||
markdownify = "^0.11.6"
|
markdownify = "^0.11.6"
|
||||||
|
regex = "^2024.4.28"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
pytest = "^7.4.1"
|
pytest = "^7.4.1"
|
||||||
jupyter = "^1.0.0"
|
jupyter = "^1.0.0"
|
||||||
black = "^24.2.0"
|
black = "*"
|
||||||
pre-commit = "^3.6.2"
|
pre-commit = "*"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core"]
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ from typing import Tuple
|
|||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
from .jobs import JobType, Location
|
from .jobs import JobType, Location
|
||||||
from .scrapers.utils import logger
|
from .scrapers.utils import logger, set_logger_level, extract_salary
|
||||||
from .scrapers.indeed import IndeedScraper
|
from .scrapers.indeed import IndeedScraper
|
||||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||||
from .scrapers.glassdoor import GlassdoorScraper
|
from .scrapers.glassdoor import GlassdoorScraper
|
||||||
from .scrapers.linkedin import LinkedInScraper
|
from .scrapers.linkedin import LinkedInScraper
|
||||||
from .scrapers import ScraperInput, Site, JobResponse, Country
|
from .scrapers import SalarySource, ScraperInput, Site, JobResponse, Country
|
||||||
from .scrapers.exceptions import (
|
from .scrapers.exceptions import (
|
||||||
LinkedInException,
|
LinkedInException,
|
||||||
IndeedException,
|
IndeedException,
|
||||||
@@ -30,17 +30,19 @@ def scrape_jobs(
|
|||||||
results_wanted: int = 15,
|
results_wanted: int = 15,
|
||||||
country_indeed: str = "usa",
|
country_indeed: str = "usa",
|
||||||
hyperlinks: bool = False,
|
hyperlinks: bool = False,
|
||||||
proxy: str | None = None,
|
proxies: list[str] | str | None = None,
|
||||||
description_format: str = "markdown",
|
description_format: str = "markdown",
|
||||||
linkedin_fetch_description: bool | None = False,
|
linkedin_fetch_description: bool | None = False,
|
||||||
linkedin_company_ids: list[int] | None = None,
|
linkedin_company_ids: list[int] | None = None,
|
||||||
offset: int | None = 0,
|
offset: int | None = 0,
|
||||||
hours_old: int = None,
|
hours_old: int = None,
|
||||||
|
enforce_annual_salary: bool = False,
|
||||||
|
verbose: int = 2,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Simultaneously scrapes job data from multiple job sites.
|
Simultaneously scrapes job data from multiple job sites.
|
||||||
:return: results_wanted: pandas dataframe containing job data
|
:return: pandas dataframe containing job data
|
||||||
"""
|
"""
|
||||||
SCRAPER_MAPPING = {
|
SCRAPER_MAPPING = {
|
||||||
Site.LINKEDIN: LinkedInScraper,
|
Site.LINKEDIN: LinkedInScraper,
|
||||||
@@ -48,6 +50,7 @@ def scrape_jobs(
|
|||||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||||
Site.GLASSDOOR: GlassdoorScraper,
|
Site.GLASSDOOR: GlassdoorScraper,
|
||||||
}
|
}
|
||||||
|
set_logger_level(verbose)
|
||||||
|
|
||||||
def map_str_to_site(site_name: str) -> Site:
|
def map_str_to_site(site_name: str) -> Site:
|
||||||
return Site[site_name.upper()]
|
return Site[site_name.upper()]
|
||||||
@@ -94,7 +97,7 @@ def scrape_jobs(
|
|||||||
|
|
||||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||||
scraper_class = SCRAPER_MAPPING[site]
|
scraper_class = SCRAPER_MAPPING[site]
|
||||||
scraper = scraper_class(proxy=proxy)
|
scraper = scraper_class(proxies=proxies)
|
||||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||||
cap_name = site.value.capitalize()
|
cap_name = site.value.capitalize()
|
||||||
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||||
@@ -116,6 +119,21 @@ def scrape_jobs(
|
|||||||
site_value, scraped_data = future.result()
|
site_value, scraped_data = future.result()
|
||||||
site_to_jobs_dict[site_value] = scraped_data
|
site_to_jobs_dict[site_value] = scraped_data
|
||||||
|
|
||||||
|
def convert_to_annual(job_data: dict):
|
||||||
|
if job_data["interval"] == "hourly":
|
||||||
|
job_data["min_amount"] *= 2080
|
||||||
|
job_data["max_amount"] *= 2080
|
||||||
|
if job_data["interval"] == "monthly":
|
||||||
|
job_data["min_amount"] *= 12
|
||||||
|
job_data["max_amount"] *= 12
|
||||||
|
if job_data["interval"] == "weekly":
|
||||||
|
job_data["min_amount"] *= 52
|
||||||
|
job_data["max_amount"] *= 52
|
||||||
|
if job_data["interval"] == "daily":
|
||||||
|
job_data["min_amount"] *= 260
|
||||||
|
job_data["max_amount"] *= 260
|
||||||
|
job_data["interval"] = "yearly"
|
||||||
|
|
||||||
jobs_dfs: list[pd.DataFrame] = []
|
jobs_dfs: list[pd.DataFrame] = []
|
||||||
|
|
||||||
for site, job_response in site_to_jobs_dict.items():
|
for site, job_response in site_to_jobs_dict.items():
|
||||||
@@ -148,12 +166,33 @@ def scrape_jobs(
|
|||||||
job_data["min_amount"] = compensation_obj.get("min_amount")
|
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||||
job_data["max_amount"] = compensation_obj.get("max_amount")
|
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||||
job_data["currency"] = compensation_obj.get("currency", "USD")
|
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||||
else:
|
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||||
job_data["interval"] = None
|
if enforce_annual_salary and (
|
||||||
job_data["min_amount"] = None
|
job_data["interval"]
|
||||||
job_data["max_amount"] = None
|
and job_data["interval"] != "yearly"
|
||||||
job_data["currency"] = None
|
and job_data["min_amount"]
|
||||||
|
and job_data["max_amount"]
|
||||||
|
):
|
||||||
|
convert_to_annual(job_data)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if country_enum == Country.USA:
|
||||||
|
(
|
||||||
|
job_data["interval"],
|
||||||
|
job_data["min_amount"],
|
||||||
|
job_data["max_amount"],
|
||||||
|
job_data["currency"],
|
||||||
|
) = extract_salary(
|
||||||
|
job_data["description"],
|
||||||
|
enforce_annual_salary=enforce_annual_salary,
|
||||||
|
)
|
||||||
|
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||||
|
|
||||||
|
job_data["salary_source"] = (
|
||||||
|
job_data["salary_source"]
|
||||||
|
if "min_amount" in job_data and job_data["min_amount"]
|
||||||
|
else None
|
||||||
|
)
|
||||||
job_df = pd.DataFrame([job_data])
|
job_df = pd.DataFrame([job_data])
|
||||||
jobs_dfs.append(job_df)
|
jobs_dfs.append(job_df)
|
||||||
|
|
||||||
@@ -166,6 +205,7 @@ def scrape_jobs(
|
|||||||
|
|
||||||
# Desired column order
|
# Desired column order
|
||||||
desired_order = [
|
desired_order = [
|
||||||
|
"id",
|
||||||
"site",
|
"site",
|
||||||
"job_url_hyper" if hyperlinks else "job_url",
|
"job_url_hyper" if hyperlinks else "job_url",
|
||||||
"job_url_direct",
|
"job_url_direct",
|
||||||
@@ -174,17 +214,21 @@ def scrape_jobs(
|
|||||||
"location",
|
"location",
|
||||||
"job_type",
|
"job_type",
|
||||||
"date_posted",
|
"date_posted",
|
||||||
|
"salary_source",
|
||||||
"interval",
|
"interval",
|
||||||
"min_amount",
|
"min_amount",
|
||||||
"max_amount",
|
"max_amount",
|
||||||
"currency",
|
"currency",
|
||||||
"is_remote",
|
"is_remote",
|
||||||
|
"job_level",
|
||||||
|
"job_function",
|
||||||
|
"company_industry",
|
||||||
|
"listing_type",
|
||||||
"emails",
|
"emails",
|
||||||
"description",
|
"description",
|
||||||
"company_url",
|
"company_url",
|
||||||
"company_url_direct",
|
"company_url_direct",
|
||||||
"company_addresses",
|
"company_addresses",
|
||||||
"company_industry",
|
|
||||||
"company_num_employees",
|
"company_num_employees",
|
||||||
"company_revenue",
|
"company_revenue",
|
||||||
"company_description",
|
"company_description",
|
||||||
|
|||||||
@@ -226,6 +226,7 @@ class DescriptionFormat(Enum):
|
|||||||
|
|
||||||
|
|
||||||
class JobPost(BaseModel):
|
class JobPost(BaseModel):
|
||||||
|
id: str | None = None
|
||||||
title: str
|
title: str
|
||||||
company_name: str | None
|
company_name: str | None
|
||||||
job_url: str
|
job_url: str
|
||||||
@@ -241,10 +242,16 @@ class JobPost(BaseModel):
|
|||||||
date_posted: date | None = None
|
date_posted: date | None = None
|
||||||
emails: list[str] | None = None
|
emails: list[str] | None = None
|
||||||
is_remote: bool | None = None
|
is_remote: bool | None = None
|
||||||
|
listing_type: str | None = None
|
||||||
|
|
||||||
|
# linkedin specific
|
||||||
|
job_level: str | None = None
|
||||||
|
|
||||||
|
# linkedin and indeed specific
|
||||||
|
company_industry: str | None = None
|
||||||
|
|
||||||
# indeed specific
|
# indeed specific
|
||||||
company_addresses: str | None = None
|
company_addresses: str | None = None
|
||||||
company_industry: str | None = None
|
|
||||||
company_num_employees: str | None = None
|
company_num_employees: str | None = None
|
||||||
company_revenue: str | None = None
|
company_revenue: str | None = None
|
||||||
company_description: str | None = None
|
company_description: str | None = None
|
||||||
@@ -253,6 +260,9 @@ class JobPost(BaseModel):
|
|||||||
logo_photo_url: str | None = None
|
logo_photo_url: str | None = None
|
||||||
banner_photo_url: str | None = None
|
banner_photo_url: str | None = None
|
||||||
|
|
||||||
|
# linkedin only atm
|
||||||
|
job_function: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class JobResponse(BaseModel):
|
class JobResponse(BaseModel):
|
||||||
jobs: list[JobPost] = []
|
jobs: list[JobPost] = []
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
from ..jobs import (
|
from ..jobs import (
|
||||||
Enum,
|
Enum,
|
||||||
BaseModel,
|
BaseModel,
|
||||||
@@ -16,6 +18,9 @@ class Site(Enum):
|
|||||||
ZIP_RECRUITER = "zip_recruiter"
|
ZIP_RECRUITER = "zip_recruiter"
|
||||||
GLASSDOOR = "glassdoor"
|
GLASSDOOR = "glassdoor"
|
||||||
|
|
||||||
|
class SalarySource(Enum):
|
||||||
|
DIRECT_DATA = "direct_data"
|
||||||
|
DESCRIPTION = "description"
|
||||||
|
|
||||||
class ScraperInput(BaseModel):
|
class ScraperInput(BaseModel):
|
||||||
site_type: list[Site]
|
site_type: list[Site]
|
||||||
@@ -36,9 +41,10 @@ class ScraperInput(BaseModel):
|
|||||||
hours_old: int | None = None
|
hours_old: int | None = None
|
||||||
|
|
||||||
|
|
||||||
class Scraper:
|
class Scraper(ABC):
|
||||||
def __init__(self, site: Site, proxy: list[str] | None = None):
|
def __init__(self, site: Site, proxies: list[str] | None = None):
|
||||||
|
self.proxies = proxies
|
||||||
self.site = site
|
self.site = site
|
||||||
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
|
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ from ...jobs import (
|
|||||||
|
|
||||||
|
|
||||||
class GlassdoorScraper(Scraper):
|
class GlassdoorScraper(Scraper):
|
||||||
def __init__(self, proxy: Optional[str] = None):
|
def __init__(self, proxies: list[str] | str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes GlassdoorScraper with the Glassdoor job search url
|
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.GLASSDOOR)
|
site = Site(Site.GLASSDOOR)
|
||||||
super().__init__(site, proxy=proxy)
|
super().__init__(site, proxies=proxies)
|
||||||
|
|
||||||
self.base_url = None
|
self.base_url = None
|
||||||
self.country = None
|
self.country = None
|
||||||
@@ -59,7 +59,7 @@ class GlassdoorScraper(Scraper):
|
|||||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||||
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||||
|
|
||||||
self.session = create_session(self.proxy, is_tls=True, has_retry=True)
|
self.session = create_session(proxies=self.proxies, is_tls=True, has_retry=True)
|
||||||
token = self._get_csrf_token()
|
token = self._get_csrf_token()
|
||||||
self.headers["gd-csrf-token"] = token if token else self.fallback_token
|
self.headers["gd-csrf-token"] = token if token else self.fallback_token
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ class GlassdoorScraper(Scraper):
|
|||||||
if location_type is None:
|
if location_type is None:
|
||||||
logger.error("Glassdoor: location not parsed")
|
logger.error("Glassdoor: location not parsed")
|
||||||
return JobResponse(jobs=[])
|
return JobResponse(jobs=[])
|
||||||
all_jobs: list[JobPost] = []
|
job_list: list[JobPost] = []
|
||||||
cursor = None
|
cursor = None
|
||||||
|
|
||||||
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||||
@@ -81,14 +81,14 @@ class GlassdoorScraper(Scraper):
|
|||||||
jobs, cursor = self._fetch_jobs_page(
|
jobs, cursor = self._fetch_jobs_page(
|
||||||
scraper_input, location_id, location_type, page, cursor
|
scraper_input, location_id, location_type, page, cursor
|
||||||
)
|
)
|
||||||
all_jobs.extend(jobs)
|
job_list.extend(jobs)
|
||||||
if not jobs or len(all_jobs) >= scraper_input.results_wanted:
|
if not jobs or len(job_list) >= scraper_input.results_wanted:
|
||||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Glassdoor: {str(e)}")
|
logger.error(f"Glassdoor: {str(e)}")
|
||||||
break
|
break
|
||||||
return JobResponse(jobs=all_jobs)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
def _fetch_jobs_page(
|
def _fetch_jobs_page(
|
||||||
self,
|
self,
|
||||||
@@ -189,7 +189,17 @@ class GlassdoorScraper(Scraper):
|
|||||||
except:
|
except:
|
||||||
description = None
|
description = None
|
||||||
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||||
|
company_logo = (
|
||||||
|
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||||
|
)
|
||||||
|
listing_type = (
|
||||||
|
job_data["jobview"]
|
||||||
|
.get("header", {})
|
||||||
|
.get("adOrderSponsorshipLevel", "")
|
||||||
|
.lower()
|
||||||
|
)
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=str(job_id),
|
||||||
title=title,
|
title=title,
|
||||||
company_url=company_url if company_id else None,
|
company_url=company_url if company_id else None,
|
||||||
company_name=company_name,
|
company_name=company_name,
|
||||||
@@ -200,6 +210,8 @@ class GlassdoorScraper(Scraper):
|
|||||||
is_remote=is_remote,
|
is_remote=is_remote,
|
||||||
description=description,
|
description=description,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
logo_photo_url=company_logo,
|
||||||
|
listing_type=listing_type,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _fetch_job_description(self, job_id):
|
def _fetch_job_description(self, job_id):
|
||||||
@@ -244,7 +256,6 @@ class GlassdoorScraper(Scraper):
|
|||||||
if not location or is_remote:
|
if not location or is_remote:
|
||||||
return "11047", "STATE" # remote options
|
return "11047", "STATE" # remote options
|
||||||
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||||
session = create_session(self.proxy, has_retry=True)
|
|
||||||
res = self.session.get(url, headers=self.headers)
|
res = self.session.get(url, headers=self.headers)
|
||||||
if res.status_code != 200:
|
if res.status_code != 200:
|
||||||
if res.status_code == 429:
|
if res.status_code == 429:
|
||||||
|
|||||||
@@ -12,14 +12,13 @@ from typing import Tuple
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
from concurrent.futures import ThreadPoolExecutor, Future
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
extract_emails_from_text,
|
extract_emails_from_text,
|
||||||
get_enum_from_job_type,
|
get_enum_from_job_type,
|
||||||
markdown_converter,
|
markdown_converter,
|
||||||
logger,
|
logger,
|
||||||
|
create_session,
|
||||||
)
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
@@ -33,10 +32,13 @@ from ...jobs import (
|
|||||||
|
|
||||||
|
|
||||||
class IndeedScraper(Scraper):
|
class IndeedScraper(Scraper):
|
||||||
def __init__(self, proxy: str | None = None):
|
def __init__(self, proxies: list[str] | str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes IndeedScraper with the Indeed API url
|
Initializes IndeedScraper with the Indeed API url
|
||||||
"""
|
"""
|
||||||
|
super().__init__(Site.INDEED, proxies=proxies)
|
||||||
|
|
||||||
|
self.session = create_session(proxies=self.proxies, is_tls=False)
|
||||||
self.scraper_input = None
|
self.scraper_input = None
|
||||||
self.jobs_per_page = 100
|
self.jobs_per_page = 100
|
||||||
self.num_workers = 10
|
self.num_workers = 10
|
||||||
@@ -45,8 +47,6 @@ class IndeedScraper(Scraper):
|
|||||||
self.api_country_code = None
|
self.api_country_code = None
|
||||||
self.base_url = None
|
self.base_url = None
|
||||||
self.api_url = "https://apis.indeed.com/graphql"
|
self.api_url = "https://apis.indeed.com/graphql"
|
||||||
site = Site(Site.INDEED)
|
|
||||||
super().__init__(site, proxy=proxy)
|
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -90,18 +90,18 @@ class IndeedScraper(Scraper):
|
|||||||
jobs = []
|
jobs = []
|
||||||
new_cursor = None
|
new_cursor = None
|
||||||
filters = self._build_filters()
|
filters = self._build_filters()
|
||||||
location = (
|
search_term = (
|
||||||
self.scraper_input.location
|
self.scraper_input.search_term.replace('"', '\\"')
|
||||||
or self.scraper_input.country.value[0].split(",")[-1]
|
if self.scraper_input.search_term
|
||||||
|
else ""
|
||||||
)
|
)
|
||||||
query = self.job_search_query.format(
|
query = self.job_search_query.format(
|
||||||
what=(
|
what=(f'what: "{search_term}"' if search_term else ""),
|
||||||
f'what: "{self.scraper_input.search_term}"'
|
location=(
|
||||||
if self.scraper_input.search_term
|
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||||
|
if self.scraper_input.location
|
||||||
else ""
|
else ""
|
||||||
),
|
),
|
||||||
location=location,
|
|
||||||
radius=self.scraper_input.distance,
|
|
||||||
dateOnIndeed=self.scraper_input.hours_old,
|
dateOnIndeed=self.scraper_input.hours_old,
|
||||||
cursor=f'cursor: "{cursor}"' if cursor else "",
|
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||||
filters=filters,
|
filters=filters,
|
||||||
@@ -111,16 +111,15 @@ class IndeedScraper(Scraper):
|
|||||||
}
|
}
|
||||||
api_headers = self.api_headers.copy()
|
api_headers = self.api_headers.copy()
|
||||||
api_headers["indeed-co"] = self.api_country_code
|
api_headers["indeed-co"] = self.api_country_code
|
||||||
response = requests.post(
|
response = self.session.post(
|
||||||
self.api_url,
|
self.api_url,
|
||||||
headers=api_headers,
|
headers=api_headers,
|
||||||
json=payload,
|
json=payload,
|
||||||
proxies=self.proxy,
|
|
||||||
timeout=10,
|
timeout=10,
|
||||||
)
|
)
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a beg)"
|
f"Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||||
)
|
)
|
||||||
return jobs, new_cursor
|
return jobs, new_cursor
|
||||||
data = response.json()
|
data = response.json()
|
||||||
@@ -151,6 +150,15 @@ class IndeedScraper(Scraper):
|
|||||||
""".format(
|
""".format(
|
||||||
start=self.scraper_input.hours_old
|
start=self.scraper_input.hours_old
|
||||||
)
|
)
|
||||||
|
elif self.scraper_input.easy_apply:
|
||||||
|
filters_str = """
|
||||||
|
filters: {
|
||||||
|
keyword: {
|
||||||
|
field: "indeedApplyScope",
|
||||||
|
keys: ["DESKTOP"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||||
job_type_key_mapping = {
|
job_type_key_mapping = {
|
||||||
JobType.FULL_TIME: "CF3CP",
|
JobType.FULL_TIME: "CF3CP",
|
||||||
@@ -168,7 +176,7 @@ class IndeedScraper(Scraper):
|
|||||||
keys.append("DSQF7")
|
keys.append("DSQF7")
|
||||||
|
|
||||||
if keys:
|
if keys:
|
||||||
keys_str = '", "'.join(keys) # Prepare your keys string
|
keys_str = '", "'.join(keys)
|
||||||
filters_str = f"""
|
filters_str = f"""
|
||||||
filters: {{
|
filters: {{
|
||||||
composite: {{
|
composite: {{
|
||||||
@@ -204,6 +212,7 @@ class IndeedScraper(Scraper):
|
|||||||
employer_details = employer.get("employerDetails", {}) if employer else {}
|
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||||
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=str(job["key"]),
|
||||||
title=job["title"],
|
title=job["title"],
|
||||||
description=description,
|
description=description,
|
||||||
company_name=job["employer"].get("name") if job.get("employer") else None,
|
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||||
@@ -217,7 +226,7 @@ class IndeedScraper(Scraper):
|
|||||||
country=job.get("location", {}).get("countryCode"),
|
country=job.get("location", {}).get("countryCode"),
|
||||||
),
|
),
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=self._get_compensation(job),
|
compensation=self._get_compensation(job["compensation"]),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
job_url_direct=(
|
job_url_direct=(
|
||||||
@@ -235,6 +244,7 @@ class IndeedScraper(Scraper):
|
|||||||
.replace("Iv1", "")
|
.replace("Iv1", "")
|
||||||
.replace("_", " ")
|
.replace("_", " ")
|
||||||
.title()
|
.title()
|
||||||
|
.strip()
|
||||||
if employer_details.get("industry")
|
if employer_details.get("industry")
|
||||||
else None
|
else None
|
||||||
),
|
),
|
||||||
@@ -271,14 +281,19 @@ class IndeedScraper(Scraper):
|
|||||||
return job_types
|
return job_types
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_compensation(job: dict) -> Compensation | None:
|
def _get_compensation(compensation: dict) -> Compensation | None:
|
||||||
"""
|
"""
|
||||||
Parses the job to get compensation
|
Parses the job to get compensation
|
||||||
:param job:
|
:param job:
|
||||||
:param job:
|
|
||||||
:return: compensation object
|
:return: compensation object
|
||||||
"""
|
"""
|
||||||
comp = job["compensation"]["baseSalary"]
|
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||||
|
return None
|
||||||
|
comp = (
|
||||||
|
compensation["baseSalary"]
|
||||||
|
if compensation["baseSalary"]
|
||||||
|
else compensation["estimated"]["baseSalary"]
|
||||||
|
)
|
||||||
if not comp:
|
if not comp:
|
||||||
return None
|
return None
|
||||||
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
||||||
@@ -288,9 +303,13 @@ class IndeedScraper(Scraper):
|
|||||||
max_range = comp["range"].get("max")
|
max_range = comp["range"].get("max")
|
||||||
return Compensation(
|
return Compensation(
|
||||||
interval=interval,
|
interval=interval,
|
||||||
min_amount=round(min_range, 2) if min_range is not None else None,
|
min_amount=int(min_range) if min_range is not None else None,
|
||||||
max_amount=round(max_range, 2) if max_range is not None else None,
|
max_amount=int(max_range) if max_range is not None else None,
|
||||||
currency=job["compensation"]["currencyCode"],
|
currency=(
|
||||||
|
compensation["estimated"]["currencyCode"]
|
||||||
|
if compensation["estimated"]
|
||||||
|
else compensation["currencyCode"]
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -343,8 +362,7 @@ class IndeedScraper(Scraper):
|
|||||||
query GetJobData {{
|
query GetJobData {{
|
||||||
jobSearch(
|
jobSearch(
|
||||||
{what}
|
{what}
|
||||||
location: {{ where: "{location}", radius: {radius}, radiusUnit: MILES }}
|
{location}
|
||||||
includeSponsoredResults: NONE
|
|
||||||
limit: 100
|
limit: 100
|
||||||
sort: DATE
|
sort: DATE
|
||||||
{cursor}
|
{cursor}
|
||||||
@@ -356,6 +374,9 @@ class IndeedScraper(Scraper):
|
|||||||
results {{
|
results {{
|
||||||
trackingKey
|
trackingKey
|
||||||
job {{
|
job {{
|
||||||
|
source {{
|
||||||
|
name
|
||||||
|
}}
|
||||||
key
|
key
|
||||||
title
|
title
|
||||||
datePublished
|
datePublished
|
||||||
@@ -376,6 +397,18 @@ class IndeedScraper(Scraper):
|
|||||||
}}
|
}}
|
||||||
}}
|
}}
|
||||||
compensation {{
|
compensation {{
|
||||||
|
estimated {{
|
||||||
|
currencyCode
|
||||||
|
baseSalary {{
|
||||||
|
unitOfWork
|
||||||
|
range {{
|
||||||
|
... on Range {{
|
||||||
|
min
|
||||||
|
max
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
baseSalary {{
|
baseSalary {{
|
||||||
unitOfWork
|
unitOfWork
|
||||||
range {{
|
range {{
|
||||||
|
|||||||
@@ -9,17 +9,17 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
|
import regex as re
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from threading import Lock
|
|
||||||
from bs4.element import Tag
|
from bs4.element import Tag
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from urllib.parse import urlparse, urlunparse
|
from urllib.parse import urlparse, urlunparse, unquote
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..exceptions import LinkedInException
|
from ..exceptions import LinkedInException
|
||||||
from ..utils import create_session
|
from ..utils import create_session, remove_attributes
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Location,
|
Location,
|
||||||
@@ -44,13 +44,22 @@ class LinkedInScraper(Scraper):
|
|||||||
band_delay = 4
|
band_delay = 4
|
||||||
jobs_per_page = 25
|
jobs_per_page = 25
|
||||||
|
|
||||||
def __init__(self, proxy: Optional[str] = None):
|
def __init__(self, proxies: list[str] | str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes LinkedInScraper with the LinkedIn job search url
|
Initializes LinkedInScraper with the LinkedIn job search url
|
||||||
"""
|
"""
|
||||||
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
|
super().__init__(Site.LINKEDIN, proxies=proxies)
|
||||||
|
self.session = create_session(
|
||||||
|
proxies=self.proxies,
|
||||||
|
is_tls=False,
|
||||||
|
has_retry=True,
|
||||||
|
delay=5,
|
||||||
|
clear_cookies=True,
|
||||||
|
)
|
||||||
|
self.session.headers.update(self.headers)
|
||||||
self.scraper_input = None
|
self.scraper_input = None
|
||||||
self.country = "worldwide"
|
self.country = "worldwide"
|
||||||
|
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -60,9 +69,9 @@ class LinkedInScraper(Scraper):
|
|||||||
"""
|
"""
|
||||||
self.scraper_input = scraper_input
|
self.scraper_input = scraper_input
|
||||||
job_list: list[JobPost] = []
|
job_list: list[JobPost] = []
|
||||||
seen_urls = set()
|
seen_ids = set()
|
||||||
url_lock = Lock()
|
page = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||||
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
request_count = 0
|
||||||
seconds_old = (
|
seconds_old = (
|
||||||
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||||
)
|
)
|
||||||
@@ -70,8 +79,8 @@ class LinkedInScraper(Scraper):
|
|||||||
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||||
)
|
)
|
||||||
while continue_search():
|
while continue_search():
|
||||||
logger.info(f"LinkedIn search page: {page // 25 + 1}")
|
request_count += 1
|
||||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
logger.info(f"LinkedIn search page: {request_count}")
|
||||||
params = {
|
params = {
|
||||||
"keywords": scraper_input.search_term,
|
"keywords": scraper_input.search_term,
|
||||||
"location": scraper_input.location,
|
"location": scraper_input.location,
|
||||||
@@ -83,7 +92,7 @@ class LinkedInScraper(Scraper):
|
|||||||
else None
|
else None
|
||||||
),
|
),
|
||||||
"pageNum": 0,
|
"pageNum": 0,
|
||||||
"start": page + scraper_input.offset,
|
"start": page,
|
||||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||||
"f_C": (
|
"f_C": (
|
||||||
",".join(map(str, scraper_input.linkedin_company_ids))
|
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||||
@@ -96,12 +105,9 @@ class LinkedInScraper(Scraper):
|
|||||||
|
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
try:
|
try:
|
||||||
response = session.get(
|
response = self.session.get(
|
||||||
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||||
params=params,
|
params=params,
|
||||||
allow_redirects=True,
|
|
||||||
proxies=self.proxy,
|
|
||||||
headers=self.headers,
|
|
||||||
timeout=10,
|
timeout=10,
|
||||||
)
|
)
|
||||||
if response.status_code not in range(200, 400):
|
if response.status_code not in range(200, 400):
|
||||||
@@ -127,36 +133,34 @@ class LinkedInScraper(Scraper):
|
|||||||
return JobResponse(jobs=job_list)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
for job_card in job_cards:
|
for job_card in job_cards:
|
||||||
job_url = None
|
|
||||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||||
if href_tag and "href" in href_tag.attrs:
|
if href_tag and "href" in href_tag.attrs:
|
||||||
href = href_tag.attrs["href"].split("?")[0]
|
href = href_tag.attrs["href"].split("?")[0]
|
||||||
job_id = href.split("-")[-1]
|
job_id = href.split("-")[-1]
|
||||||
job_url = f"{self.base_url}/jobs/view/{job_id}"
|
|
||||||
|
|
||||||
with url_lock:
|
if job_id in seen_ids:
|
||||||
if job_url in seen_urls:
|
|
||||||
continue
|
continue
|
||||||
seen_urls.add(job_url)
|
seen_ids.add(job_id)
|
||||||
try:
|
|
||||||
fetch_desc = scraper_input.linkedin_fetch_description
|
try:
|
||||||
job_post = self._process_job(job_card, job_url, fetch_desc)
|
fetch_desc = scraper_input.linkedin_fetch_description
|
||||||
if job_post:
|
job_post = self._process_job(job_card, job_id, fetch_desc)
|
||||||
job_list.append(job_post)
|
if job_post:
|
||||||
if not continue_search():
|
job_list.append(job_post)
|
||||||
break
|
if not continue_search():
|
||||||
except Exception as e:
|
break
|
||||||
raise LinkedInException(str(e))
|
except Exception as e:
|
||||||
|
raise LinkedInException(str(e))
|
||||||
|
|
||||||
if continue_search():
|
if continue_search():
|
||||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||||
page += self.jobs_per_page
|
page += len(job_list)
|
||||||
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
return JobResponse(jobs=job_list)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
def _process_job(
|
def _process_job(
|
||||||
self, job_card: Tag, job_url: str, full_descr: bool
|
self, job_card: Tag, job_id: str, full_descr: bool
|
||||||
) -> Optional[JobPost]:
|
) -> Optional[JobPost]:
|
||||||
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||||
|
|
||||||
@@ -194,48 +198,51 @@ class LinkedInScraper(Scraper):
|
|||||||
if metadata_card
|
if metadata_card
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
date_posted = description = job_type = None
|
date_posted = None
|
||||||
if datetime_tag and "datetime" in datetime_tag.attrs:
|
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||||
datetime_str = datetime_tag["datetime"]
|
datetime_str = datetime_tag["datetime"]
|
||||||
try:
|
try:
|
||||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||||
except:
|
except:
|
||||||
date_posted = None
|
date_posted = None
|
||||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
job_details = {}
|
||||||
if full_descr:
|
if full_descr:
|
||||||
description, job_type = self._get_job_description(job_url)
|
job_details = self._get_job_details(job_id)
|
||||||
|
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=job_id,
|
||||||
title=title,
|
title=title,
|
||||||
company_name=company,
|
company_name=company,
|
||||||
company_url=company_url,
|
company_url=company_url,
|
||||||
location=location,
|
location=location,
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||||
compensation=compensation,
|
compensation=compensation,
|
||||||
job_type=job_type,
|
job_type=job_details.get("job_type"),
|
||||||
description=description,
|
job_level=job_details.get("job_level", "").lower(),
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
company_industry=job_details.get("company_industry"),
|
||||||
|
description=job_details.get("description"),
|
||||||
|
job_url_direct=job_details.get("job_url_direct"),
|
||||||
|
emails=extract_emails_from_text(job_details.get("description")),
|
||||||
|
logo_photo_url=job_details.get("logo_photo_url"),
|
||||||
|
job_function=job_details.get("job_function"),
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_job_description(
|
def _get_job_details(self, job_id: str) -> dict:
|
||||||
self, job_page_url: str
|
|
||||||
) -> tuple[None, None] | tuple[str | None, tuple[str | None, JobType | None]]:
|
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Retrieves job description and other job details by going to the job page url
|
||||||
:param job_page_url:
|
:param job_page_url:
|
||||||
:return: description or None
|
:return: dict
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
session = create_session(is_tls=False, has_retry=True)
|
response = self.session.get(
|
||||||
response = session.get(
|
f"{self.base_url}/jobs-guest/jobs/api/jobPosting/{job_id}", timeout=5
|
||||||
job_page_url, headers=self.headers, timeout=5, proxies=self.proxy
|
|
||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
except:
|
except:
|
||||||
return None, None
|
return {}
|
||||||
if response.url == "https://www.linkedin.com/signup":
|
if "linkedin.com/signup" in response.url:
|
||||||
return None, None
|
return {}
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
div_content = soup.find(
|
div_content = soup.find(
|
||||||
@@ -243,17 +250,33 @@ class LinkedInScraper(Scraper):
|
|||||||
)
|
)
|
||||||
description = None
|
description = None
|
||||||
if div_content is not None:
|
if div_content is not None:
|
||||||
|
|
||||||
def remove_attributes(tag):
|
|
||||||
for attr in list(tag.attrs):
|
|
||||||
del tag[attr]
|
|
||||||
return tag
|
|
||||||
|
|
||||||
div_content = remove_attributes(div_content)
|
div_content = remove_attributes(div_content)
|
||||||
description = div_content.prettify(formatter="html")
|
description = div_content.prettify(formatter="html")
|
||||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
description = markdown_converter(description)
|
description = markdown_converter(description)
|
||||||
return description, self._parse_job_type(soup)
|
|
||||||
|
h3_tag = soup.find(
|
||||||
|
"h3", text=lambda text: text and "Job function" in text.strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
job_function = None
|
||||||
|
if h3_tag:
|
||||||
|
job_function_span = h3_tag.find_next(
|
||||||
|
"span", class_="description__job-criteria-text"
|
||||||
|
)
|
||||||
|
if job_function_span:
|
||||||
|
job_function = job_function_span.text.strip()
|
||||||
|
return {
|
||||||
|
"description": description,
|
||||||
|
"job_level": self._parse_job_level(soup),
|
||||||
|
"company_industry": self._parse_company_industry(soup),
|
||||||
|
"job_type": self._parse_job_type(soup),
|
||||||
|
"job_url_direct": self._parse_job_url_direct(soup),
|
||||||
|
"logo_photo_url": soup.find("img", {"class": "artdeco-entity-image"}).get(
|
||||||
|
"data-delayed-url"
|
||||||
|
),
|
||||||
|
"job_function": job_function,
|
||||||
|
}
|
||||||
|
|
||||||
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||||
"""
|
"""
|
||||||
@@ -306,6 +329,69 @@ class LinkedInScraper(Scraper):
|
|||||||
|
|
||||||
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job level from job page
|
||||||
|
:param soup_job_level:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_job_level.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Seniority level" in text,
|
||||||
|
)
|
||||||
|
job_level = None
|
||||||
|
if h3_tag:
|
||||||
|
job_level_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if job_level_span:
|
||||||
|
job_level = job_level_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return job_level
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the company industry from job page
|
||||||
|
:param soup_industry:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_industry.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Industries" in text,
|
||||||
|
)
|
||||||
|
industry = None
|
||||||
|
if h3_tag:
|
||||||
|
industry_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if industry_span:
|
||||||
|
industry = industry_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return industry
|
||||||
|
|
||||||
|
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job url direct from job page
|
||||||
|
:param soup:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
job_url_direct = None
|
||||||
|
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||||
|
if job_url_direct_content:
|
||||||
|
job_url_direct_match = self.job_url_direct_regex.search(
|
||||||
|
job_url_direct_content.decode_contents().strip()
|
||||||
|
)
|
||||||
|
if job_url_direct_match:
|
||||||
|
job_url_direct = unquote(job_url_direct_match.group())
|
||||||
|
|
||||||
|
return job_url_direct
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def job_type_code(job_type_enum: JobType) -> str:
|
def job_type_code(job_type_enum: JobType) -> str:
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -2,13 +2,15 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import logging
|
import logging
|
||||||
|
from itertools import cycle
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import tls_client
|
import tls_client
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from markdownify import markdownify as md
|
from markdownify import markdownify as md
|
||||||
from requests.adapters import HTTPAdapter, Retry
|
from requests.adapters import HTTPAdapter, Retry
|
||||||
|
|
||||||
from ..jobs import JobType
|
from ..jobs import CompensationInterval, JobType
|
||||||
|
|
||||||
logger = logging.getLogger("JobSpy")
|
logger = logging.getLogger("JobSpy")
|
||||||
logger.propagate = False
|
logger.propagate = False
|
||||||
@@ -21,6 +23,122 @@ if not logger.handlers:
|
|||||||
logger.addHandler(console_handler)
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
|
||||||
|
class RotatingProxySession:
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
if isinstance(proxies, str):
|
||||||
|
self.proxy_cycle = cycle([self.format_proxy(proxies)])
|
||||||
|
elif isinstance(proxies, list):
|
||||||
|
self.proxy_cycle = (
|
||||||
|
cycle([self.format_proxy(proxy) for proxy in proxies])
|
||||||
|
if proxies
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.proxy_cycle = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_proxy(proxy):
|
||||||
|
"""Utility method to format a proxy string into a dictionary."""
|
||||||
|
if proxy.startswith("http://") or proxy.startswith("https://"):
|
||||||
|
return {"http": proxy, "https": proxy}
|
||||||
|
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
|
||||||
|
|
||||||
|
|
||||||
|
class RequestsRotating(RotatingProxySession, requests.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
requests.Session.__init__(self)
|
||||||
|
self.clear_cookies = clear_cookies
|
||||||
|
self.allow_redirects = True
|
||||||
|
self.setup_session(has_retry, delay)
|
||||||
|
|
||||||
|
def setup_session(self, has_retry, delay):
|
||||||
|
if has_retry:
|
||||||
|
retries = Retry(
|
||||||
|
total=3,
|
||||||
|
connect=3,
|
||||||
|
status=3,
|
||||||
|
status_forcelist=[500, 502, 503, 504, 429],
|
||||||
|
backoff_factor=delay,
|
||||||
|
)
|
||||||
|
adapter = HTTPAdapter(max_retries=retries)
|
||||||
|
self.mount("http://", adapter)
|
||||||
|
self.mount("https://", adapter)
|
||||||
|
|
||||||
|
def request(self, method, url, **kwargs):
|
||||||
|
if self.clear_cookies:
|
||||||
|
self.cookies.clear()
|
||||||
|
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
return requests.Session.request(self, method, url, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
tls_client.Session.__init__(self, random_tls_extension_order=True)
|
||||||
|
|
||||||
|
def execute_request(self, *args, **kwargs):
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
response = tls_client.Session.execute_request(self, *args, **kwargs)
|
||||||
|
response.ok = response.status_code in range(200, 400)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def create_session(
|
||||||
|
*,
|
||||||
|
proxies: dict | str | None = None,
|
||||||
|
is_tls: bool = True,
|
||||||
|
has_retry: bool = False,
|
||||||
|
delay: int = 1,
|
||||||
|
clear_cookies: bool = False,
|
||||||
|
) -> requests.Session:
|
||||||
|
"""
|
||||||
|
Creates a requests session with optional tls, proxy, and retry settings.
|
||||||
|
:return: A session object
|
||||||
|
"""
|
||||||
|
if is_tls:
|
||||||
|
session = TLSRotating(proxies=proxies)
|
||||||
|
else:
|
||||||
|
session = RequestsRotating(
|
||||||
|
proxies=proxies,
|
||||||
|
has_retry=has_retry,
|
||||||
|
delay=delay,
|
||||||
|
clear_cookies=clear_cookies,
|
||||||
|
)
|
||||||
|
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def set_logger_level(verbose: int = 2):
|
||||||
|
"""
|
||||||
|
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||||
|
"""
|
||||||
|
if verbose is None:
|
||||||
|
return
|
||||||
|
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||||
|
level = getattr(logging, level_name.upper(), None)
|
||||||
|
if level is not None:
|
||||||
|
logger.setLevel(level)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid log level: {level_name}")
|
||||||
|
|
||||||
|
|
||||||
def markdown_converter(description_html: str):
|
def markdown_converter(description_html: str):
|
||||||
if description_html is None:
|
if description_html is None:
|
||||||
return None
|
return None
|
||||||
@@ -35,39 +153,6 @@ def extract_emails_from_text(text: str) -> list[str] | None:
|
|||||||
return email_regex.findall(text)
|
return email_regex.findall(text)
|
||||||
|
|
||||||
|
|
||||||
def create_session(
|
|
||||||
proxy: dict | None = None,
|
|
||||||
is_tls: bool = True,
|
|
||||||
has_retry: bool = False,
|
|
||||||
delay: int = 1,
|
|
||||||
) -> requests.Session:
|
|
||||||
"""
|
|
||||||
Creates a requests session with optional tls, proxy, and retry settings.
|
|
||||||
:return: A session object
|
|
||||||
"""
|
|
||||||
if is_tls:
|
|
||||||
session = tls_client.Session(random_tls_extension_order=True)
|
|
||||||
session.proxies = proxy
|
|
||||||
else:
|
|
||||||
session = requests.Session()
|
|
||||||
session.allow_redirects = True
|
|
||||||
if proxy:
|
|
||||||
session.proxies.update(proxy)
|
|
||||||
if has_retry:
|
|
||||||
retries = Retry(
|
|
||||||
total=3,
|
|
||||||
connect=3,
|
|
||||||
status=3,
|
|
||||||
status_forcelist=[500, 502, 503, 504, 429],
|
|
||||||
backoff_factor=delay,
|
|
||||||
)
|
|
||||||
adapter = HTTPAdapter(max_retries=retries)
|
|
||||||
|
|
||||||
session.mount("http://", adapter)
|
|
||||||
session.mount("https://", adapter)
|
|
||||||
return session
|
|
||||||
|
|
||||||
|
|
||||||
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||||
"""
|
"""
|
||||||
Given a string, returns the corresponding JobType enum member if a match is found.
|
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||||
@@ -94,3 +179,72 @@ def currency_parser(cur_str):
|
|||||||
num = float(cur_str)
|
num = float(cur_str)
|
||||||
|
|
||||||
return np.round(num, 2)
|
return np.round(num, 2)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_attributes(tag):
|
||||||
|
for attr in list(tag.attrs):
|
||||||
|
del tag[attr]
|
||||||
|
return tag
|
||||||
|
|
||||||
|
|
||||||
|
def extract_salary(
|
||||||
|
salary_str,
|
||||||
|
lower_limit=1000,
|
||||||
|
upper_limit=700000,
|
||||||
|
hourly_threshold=350,
|
||||||
|
monthly_threshold=30000,
|
||||||
|
enforce_annual_salary=False,
|
||||||
|
):
|
||||||
|
if not salary_str:
|
||||||
|
return None, None, None, None
|
||||||
|
|
||||||
|
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||||
|
|
||||||
|
def to_int(s):
|
||||||
|
return int(float(s.replace(",", "")))
|
||||||
|
|
||||||
|
def convert_hourly_to_annual(hourly_wage):
|
||||||
|
return hourly_wage * 2080
|
||||||
|
|
||||||
|
def convert_monthly_to_annual(monthly_wage):
|
||||||
|
return monthly_wage * 12
|
||||||
|
|
||||||
|
match = re.search(min_max_pattern, salary_str)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
min_salary = to_int(match.group(1))
|
||||||
|
max_salary = to_int(match.group(3))
|
||||||
|
# Handle 'k' suffix for min and max salaries independently
|
||||||
|
if "k" in match.group(2).lower() or "k" in match.group(4).lower():
|
||||||
|
min_salary *= 1000
|
||||||
|
max_salary *= 1000
|
||||||
|
|
||||||
|
# Convert to annual if less than the hourly threshold
|
||||||
|
if min_salary < hourly_threshold:
|
||||||
|
interval = CompensationInterval.HOURLY.value
|
||||||
|
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||||
|
if max_salary < hourly_threshold:
|
||||||
|
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||||
|
|
||||||
|
elif min_salary < monthly_threshold:
|
||||||
|
interval = CompensationInterval.MONTHLY.value
|
||||||
|
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||||
|
if max_salary < monthly_threshold:
|
||||||
|
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||||
|
|
||||||
|
else:
|
||||||
|
interval = CompensationInterval.YEARLY.value
|
||||||
|
annual_min_salary = min_salary
|
||||||
|
annual_max_salary = max_salary
|
||||||
|
|
||||||
|
# Ensure salary range is within specified limits
|
||||||
|
if (
|
||||||
|
lower_limit <= annual_min_salary <= upper_limit
|
||||||
|
and lower_limit <= annual_max_salary <= upper_limit
|
||||||
|
and annual_min_salary < annual_max_salary
|
||||||
|
):
|
||||||
|
if enforce_annual_salary:
|
||||||
|
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||||
|
else:
|
||||||
|
return interval, min_salary, max_salary, "USD"
|
||||||
|
return None, None, None, None
|
||||||
|
|||||||
@@ -7,19 +7,24 @@ This module contains routines to scrape ZipRecruiter.
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
import math
|
import math
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple, Any
|
from typing import Optional, Tuple, Any
|
||||||
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
logger,
|
logger,
|
||||||
extract_emails_from_text,
|
extract_emails_from_text,
|
||||||
create_session,
|
create_session,
|
||||||
markdown_converter,
|
markdown_converter,
|
||||||
|
remove_attributes,
|
||||||
)
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
@@ -36,14 +41,15 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
base_url = "https://www.ziprecruiter.com"
|
base_url = "https://www.ziprecruiter.com"
|
||||||
api_url = "https://api.ziprecruiter.com"
|
api_url = "https://api.ziprecruiter.com"
|
||||||
|
|
||||||
def __init__(self, proxy: Optional[str] = None):
|
def __init__(self, proxies: list[str] | str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||||
"""
|
"""
|
||||||
|
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||||
|
|
||||||
self.scraper_input = None
|
self.scraper_input = None
|
||||||
self.session = create_session(proxy)
|
self.session = create_session(proxies=proxies)
|
||||||
self._get_cookies()
|
self._get_cookies()
|
||||||
super().__init__(Site.ZIP_RECRUITER, proxy=proxy)
|
|
||||||
|
|
||||||
self.delay = 5
|
self.delay = 5
|
||||||
self.jobs_per_page = 20
|
self.jobs_per_page = 20
|
||||||
@@ -129,6 +135,7 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
self.seen_urls.add(job_url)
|
self.seen_urls.add(job_url)
|
||||||
|
|
||||||
description = job.get("job_description", "").strip()
|
description = job.get("job_description", "").strip()
|
||||||
|
listing_type = job.get("buyer_type", "")
|
||||||
description = (
|
description = (
|
||||||
markdown_converter(description)
|
markdown_converter(description)
|
||||||
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||||
@@ -150,7 +157,10 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||||
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||||
comp_currency = job.get("compensation_currency")
|
comp_currency = job.get("compensation_currency")
|
||||||
|
description_full, job_url_direct = self._get_descr(job_url)
|
||||||
|
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=str(job["listing_key"]),
|
||||||
title=title,
|
title=title,
|
||||||
company_name=company,
|
company_name=company,
|
||||||
location=location,
|
location=location,
|
||||||
@@ -163,10 +173,43 @@ class ZipRecruiterScraper(Scraper):
|
|||||||
),
|
),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
description=description,
|
description=description_full if description_full else description,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
job_url_direct=job_url_direct,
|
||||||
|
listing_type=listing_type,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _get_descr(self, job_url):
|
||||||
|
res = self.session.get(job_url, headers=self.headers, allow_redirects=True)
|
||||||
|
description_full = job_url_direct = None
|
||||||
|
if res.ok:
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
job_descr_div = soup.find("div", class_="job_description")
|
||||||
|
company_descr_section = soup.find("section", class_="company_description")
|
||||||
|
job_description_clean = (
|
||||||
|
remove_attributes(job_descr_div).prettify(formatter="html")
|
||||||
|
if job_descr_div
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
company_description_clean = (
|
||||||
|
remove_attributes(company_descr_section).prettify(formatter="html")
|
||||||
|
if company_descr_section
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
description_full = job_description_clean + company_description_clean
|
||||||
|
script_tag = soup.find("script", type="application/json")
|
||||||
|
if script_tag:
|
||||||
|
job_json = json.loads(script_tag.string)
|
||||||
|
job_url_val = job_json["model"]["saveJobURL"]
|
||||||
|
m = re.search(r"job_url=(.+)", job_url_val)
|
||||||
|
if m:
|
||||||
|
job_url_direct = m.group(1)
|
||||||
|
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description_full = markdown_converter(description_full)
|
||||||
|
|
||||||
|
return description_full, job_url_direct
|
||||||
|
|
||||||
def _get_cookies(self):
|
def _get_cookies(self):
|
||||||
data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||||
url = f"{self.api_url}/jobs-app/event"
|
url = f"{self.api_url}/jobs-app/event"
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ import pandas as pd
|
|||||||
def test_all():
|
def test_all():
|
||||||
result = scrape_jobs(
|
result = scrape_jobs(
|
||||||
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
site_name=["linkedin", "indeed", "zip_recruiter", "glassdoor"],
|
||||||
search_term="software engineer",
|
search_term="engineer",
|
||||||
results_wanted=5,
|
results_wanted=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
isinstance(result, pd.DataFrame) and not result.empty
|
isinstance(result, pd.DataFrame) and len(result) == 20
|
||||||
), "Result should be a non-empty DataFrame"
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -2,10 +2,12 @@ from ..jobspy import scrape_jobs
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def test_indeed():
|
def test_glassdoor():
|
||||||
result = scrape_jobs(
|
result = scrape_jobs(
|
||||||
site_name="glassdoor", search_term="software engineer", country_indeed="USA"
|
site_name="glassdoor",
|
||||||
|
search_term="engineer",
|
||||||
|
results_wanted=5,
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
isinstance(result, pd.DataFrame) and not result.empty
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
), "Result should be a non-empty DataFrame"
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ import pandas as pd
|
|||||||
|
|
||||||
def test_indeed():
|
def test_indeed():
|
||||||
result = scrape_jobs(
|
result = scrape_jobs(
|
||||||
site_name="indeed", search_term="software engineer", country_indeed="usa"
|
site_name="indeed",
|
||||||
|
search_term="engineer",
|
||||||
|
results_wanted=5,
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
isinstance(result, pd.DataFrame) and not result.empty
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
), "Result should be a non-empty DataFrame"
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -3,10 +3,7 @@ import pandas as pd
|
|||||||
|
|
||||||
|
|
||||||
def test_linkedin():
|
def test_linkedin():
|
||||||
result = scrape_jobs(
|
result = scrape_jobs(site_name="linkedin", search_term="engineer", results_wanted=5)
|
||||||
site_name="linkedin",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
assert (
|
assert (
|
||||||
isinstance(result, pd.DataFrame) and not result.empty
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
), "Result should be a non-empty DataFrame"
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
@@ -4,10 +4,9 @@ import pandas as pd
|
|||||||
|
|
||||||
def test_ziprecruiter():
|
def test_ziprecruiter():
|
||||||
result = scrape_jobs(
|
result = scrape_jobs(
|
||||||
site_name="zip_recruiter",
|
site_name="zip_recruiter", search_term="software engineer", results_wanted=5
|
||||||
search_term="software engineer",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
isinstance(result, pd.DataFrame) and not result.empty
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
), "Result should be a non-empty DataFrame"
|
), "Result should be a non-empty DataFrame"
|
||||||
|
|||||||
Reference in New Issue
Block a user