mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
150 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6372e41bd9 | ||
|
|
6c869decb8 | ||
|
|
9f4083380d | ||
|
|
9207ab56f6 | ||
|
|
757a94853e | ||
|
|
6bc191d5c7 | ||
|
|
0cc34287f7 | ||
|
|
923979093b | ||
|
|
286f0e4487 | ||
|
|
f7b29d43a2 | ||
|
|
6f1490458c | ||
|
|
6bb7d81ba8 | ||
|
|
0e046432d1 | ||
|
|
209e0e65b6 | ||
|
|
8570c0651e | ||
|
|
8678b0bbe4 | ||
|
|
60d4d911c9 | ||
|
|
2a0cba8c7e | ||
|
|
de70189fa2 | ||
|
|
b55c0eb86d | ||
|
|
88c95c4ad5 | ||
|
|
d8d33d602f | ||
|
|
6330c14879 | ||
|
|
48631ea271 | ||
|
|
edffe18e65 | ||
|
|
0988230a24 | ||
|
|
d000a81eb3 | ||
|
|
ccb0c17660 | ||
|
|
df339610fa | ||
|
|
c501006bd8 | ||
|
|
89a3ee231c | ||
|
|
6439f71433 | ||
|
|
7f6271b2e0 | ||
|
|
5cb7ffe5fd | ||
|
|
cd29f79796 | ||
|
|
65d2e5e707 | ||
|
|
08d63a87a2 | ||
|
|
1ffdb1756f | ||
|
|
1185693422 | ||
|
|
dcd7144318 | ||
|
|
bf73c061bd | ||
|
|
8dd08ed9fd | ||
|
|
5d3df732e6 | ||
|
|
86f858e06d | ||
|
|
1089d1f0a5 | ||
|
|
3e93454738 | ||
|
|
0d150d519f | ||
|
|
cc3497f929 | ||
|
|
5986f75346 | ||
|
|
4b7bdb9313 | ||
|
|
80213f28d2 | ||
|
|
ada38532c3 | ||
|
|
3b0017964c | ||
|
|
94d8f555fd | ||
|
|
e8b4b376b8 | ||
|
|
54ac1bad16 | ||
|
|
0a669e9ba8 | ||
|
|
a4f6851c32 | ||
|
|
db01bc6bbb | ||
|
|
f8a4eccc6b | ||
|
|
ba3a16b228 | ||
|
|
aeb1a50d2c | ||
|
|
91b137ef86 | ||
|
|
2563c5ca08 | ||
|
|
32282305c8 | ||
|
|
ccbea51f3c | ||
|
|
6ec7c24f7f | ||
|
|
02caf1b38d | ||
|
|
8e2ab277da | ||
|
|
ce3bd84ee5 | ||
|
|
1ccf2290fe | ||
|
|
ec2eefc58a | ||
|
|
13c7694474 | ||
|
|
bbe46fe3f4 | ||
|
|
b97c73ffd6 | ||
|
|
5b3627b244 | ||
|
|
2ec3b04777 | ||
|
|
89a5264391 | ||
|
|
a7ad616567 | ||
|
|
53bc33a43a | ||
|
|
22870438c7 | ||
|
|
aeb93b99f5 | ||
|
|
a5916edcdd | ||
|
|
33d442bf1e | ||
|
|
6587e464fa | ||
|
|
eed7fca300 | ||
|
|
dfb8c18c51 | ||
|
|
81f70ff8a5 | ||
|
|
cc9e7866b7 | ||
|
|
a2c8fe046e | ||
|
|
2b7fea40a5 | ||
|
|
d37f86e1b9 | ||
|
|
0302ab14f5 | ||
|
|
3f2b582445 | ||
|
|
93223b6a38 | ||
|
|
e3fc222eb5 | ||
|
|
b303b3f841 | ||
|
|
1a0c75f323 | ||
|
|
e2f6885d61 | ||
|
|
8d65d1b652 | ||
|
|
216d3fd39f | ||
|
|
d3bfdc0a6e | ||
|
|
ba5ed803ca | ||
|
|
ff1eb0f7b0 | ||
|
|
f2cc74b7f2 | ||
|
|
5e71866630 | ||
|
|
4e67c6e5a3 | ||
|
|
caf655525a | ||
|
|
90fa4a4c4f | ||
|
|
e5353e604d | ||
|
|
628f4dee9c | ||
|
|
2e59ab03e3 | ||
|
|
008ca61e12 | ||
|
|
8fc4c3bf90 | ||
|
|
bff39a2625 | ||
|
|
c676050dc0 | ||
|
|
37976f7ec2 | ||
|
|
9fb2fdd80f | ||
|
|
af07c1ecbd | ||
|
|
286b9e1256 | ||
|
|
162dd40b0f | ||
|
|
558e352939 | ||
|
|
efad1a1b7d | ||
|
|
eaa481c2f4 | ||
|
|
b914aa6449 | ||
|
|
6adbfb8b29 | ||
|
|
a3b9dd50ff | ||
|
|
d3ba3a4878 | ||
|
|
f524789d74 | ||
|
|
f3890d4830 | ||
|
|
60c9728691 | ||
|
|
f79d975e5f | ||
|
|
d6368f909b | ||
|
|
6fcf7f666e | ||
|
|
4406f9350f | ||
|
|
ca5155f234 | ||
|
|
822a55783e | ||
|
|
59f739018a | ||
|
|
a37e7f235e | ||
|
|
690739e858 | ||
|
|
43eb2fe0e8 | ||
|
|
e50227bba6 | ||
|
|
45c2d76e15 | ||
|
|
fd883178be | ||
|
|
70e2218c67 | ||
|
|
d6947ecdd7 | ||
|
|
5191658562 | ||
|
|
1c264b8c58 | ||
|
|
1598d4ff63 | ||
|
|
bf2460684b |
42
.github/workflows/publish-to-pypi.yml
vendored
42
.github/workflows/publish-to-pypi.yml
vendored
@@ -7,27 +7,27 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Install poetry
|
- name: Install poetry
|
||||||
run: >-
|
run: >-
|
||||||
python3 -m
|
python3 -m
|
||||||
pip install
|
pip install
|
||||||
poetry
|
poetry
|
||||||
--user
|
--user
|
||||||
|
|
||||||
- name: Build distribution 📦
|
- name: Build distribution 📦
|
||||||
run: >-
|
run: >-
|
||||||
python3 -m
|
python3 -m
|
||||||
poetry
|
poetry
|
||||||
build
|
build
|
||||||
|
|
||||||
- name: Publish distribution 📦 to PyPI
|
- name: Publish distribution 📦 to PyPI
|
||||||
if: startsWith(github.ref, 'refs/tags')
|
if: startsWith(github.ref, 'refs/tags')
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
with:
|
with:
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
22
.github/workflows/python-test.yml
vendored
Normal file
22
.github/workflows/python-test.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
name: Python Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install poetry
|
||||||
|
poetry install
|
||||||
|
- name: Run tests
|
||||||
|
run: poetry run pytest tests/test_all.py
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
|||||||
/.idea
|
|
||||||
**/.DS_Store
|
|
||||||
/venv/
|
/venv/
|
||||||
/ven/
|
/.idea
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
**/.pytest_cache/
|
**/.pytest_cache/
|
||||||
|
/.ipynb_checkpoints/
|
||||||
|
**/output/
|
||||||
|
**/.DS_Store
|
||||||
*.pyc
|
*.pyc
|
||||||
.env
|
.env
|
||||||
dist
|
dist
|
||||||
/.ipynb_checkpoints/
|
|
||||||
7
.pre-commit-config.yaml
Normal file
7
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 24.2.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
language_version: python
|
||||||
|
args: [--line-length=88, --quiet]
|
||||||
@@ -1,689 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"id": "00a94b47-f47b-420f-ba7e-714ef219c006",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from jobspy import scrape_jobs\n",
|
|
||||||
"import pandas as pd"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 2,
|
|
||||||
"id": "9f773e6c-d9fc-42cc-b0ef-63b739e78435",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"pd.set_option('display.max_columns', None)\n",
|
|
||||||
"pd.set_option('display.max_rows', None)\n",
|
|
||||||
"pd.set_option('display.width', None)\n",
|
|
||||||
"pd.set_option('display.max_colwidth', 50)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 5,
|
|
||||||
"id": "1253c1f8-9437-492e-9dd3-e7fe51099420",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/html": [
|
|
||||||
"<div>\n",
|
|
||||||
"<style scoped>\n",
|
|
||||||
" .dataframe tbody tr th:only-of-type {\n",
|
|
||||||
" vertical-align: middle;\n",
|
|
||||||
" }\n",
|
|
||||||
"\n",
|
|
||||||
" .dataframe tbody tr th {\n",
|
|
||||||
" vertical-align: top;\n",
|
|
||||||
" }\n",
|
|
||||||
"\n",
|
|
||||||
" .dataframe thead th {\n",
|
|
||||||
" text-align: right;\n",
|
|
||||||
" }\n",
|
|
||||||
"</style>\n",
|
|
||||||
"<table border=\"1\" class=\"dataframe\">\n",
|
|
||||||
" <thead>\n",
|
|
||||||
" <tr style=\"text-align: right;\">\n",
|
|
||||||
" <th></th>\n",
|
|
||||||
" <th>site</th>\n",
|
|
||||||
" <th>title</th>\n",
|
|
||||||
" <th>company_name</th>\n",
|
|
||||||
" <th>city</th>\n",
|
|
||||||
" <th>state</th>\n",
|
|
||||||
" <th>job_type</th>\n",
|
|
||||||
" <th>interval</th>\n",
|
|
||||||
" <th>min_amount</th>\n",
|
|
||||||
" <th>max_amount</th>\n",
|
|
||||||
" <th>job_url</th>\n",
|
|
||||||
" <th>description</th>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" </thead>\n",
|
|
||||||
" <tbody>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>0</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Mental Health Therapist</td>\n",
|
|
||||||
" <td>Sandstone Care</td>\n",
|
|
||||||
" <td>Broomfield</td>\n",
|
|
||||||
" <td>CO</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>68000</td>\n",
|
|
||||||
" <td>57500</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=f5f33d72e030...</td>\n",
|
|
||||||
" <td>Mental Health Therapist- Broomfield, CO Locati...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>1</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>.NET Developer</td>\n",
|
|
||||||
" <td>Noir Consulting</td>\n",
|
|
||||||
" <td>Irving</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>200000</td>\n",
|
|
||||||
" <td>200000</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=1b22ba65296c...</td>\n",
|
|
||||||
" <td>.NET Software Engineer, C#, WPF - Irving (Tech...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>2</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Senior Software Engineer</td>\n",
|
|
||||||
" <td>Johns Hopkins Applied Physics Laboratory (APL)</td>\n",
|
|
||||||
" <td>Laurel</td>\n",
|
|
||||||
" <td>MD</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=309eed270a88...</td>\n",
|
|
||||||
" <td>Description Are you a communications systems d...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>3</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Front End Developer</td>\n",
|
|
||||||
" <td>Verkada</td>\n",
|
|
||||||
" <td>San Mateo</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>285000</td>\n",
|
|
||||||
" <td>120000</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=a3ea45daca75...</td>\n",
|
|
||||||
" <td>Who We Are Verkada is the largest cloud-based ...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>4</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Software Engineer</td>\n",
|
|
||||||
" <td>Adobe</td>\n",
|
|
||||||
" <td>San Jose</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>142700</td>\n",
|
|
||||||
" <td>73200</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=0f2dc9901fc7...</td>\n",
|
|
||||||
" <td>Our Company Changing the world through digital...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>5</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Full Stack Developer</td>\n",
|
|
||||||
" <td>Comcast</td>\n",
|
|
||||||
" <td>Philadelphia</td>\n",
|
|
||||||
" <td>PA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>184663</td>\n",
|
|
||||||
" <td>78789</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=eb5c927221eb...</td>\n",
|
|
||||||
" <td>Make your mark at Comcast - a Fortune 30 globa...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>6</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Senior Software Engineer</td>\n",
|
|
||||||
" <td>Smart City Solutions</td>\n",
|
|
||||||
" <td></td>\n",
|
|
||||||
" <td>FL</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>100000</td>\n",
|
|
||||||
" <td>85000</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=ba1945f143a1...</td>\n",
|
|
||||||
" <td>Smart City hiring a full stack software develo...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>7</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Computer Engineer</td>\n",
|
|
||||||
" <td>Honeywell</td>\n",
|
|
||||||
" <td></td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=5a1da623ee75...</td>\n",
|
|
||||||
" <td>Join a team recognized for leadership, innovat...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>8</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Software Engineer</td>\n",
|
|
||||||
" <td>Fidelity Investments</td>\n",
|
|
||||||
" <td>Westlake</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=b600392166bb...</td>\n",
|
|
||||||
" <td>Job Description: Software Engineer in Test The...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>9</th>\n",
|
|
||||||
" <td>indeed</td>\n",
|
|
||||||
" <td>Fpga Engineer</td>\n",
|
|
||||||
" <td>R-DEX Systems, Inc.</td>\n",
|
|
||||||
" <td>Atlanta</td>\n",
|
|
||||||
" <td>GA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>160000</td>\n",
|
|
||||||
" <td>120000</td>\n",
|
|
||||||
" <td>https://www.indeed.com/viewjob?jk=a7e9d356c333...</td>\n",
|
|
||||||
" <td>Title: Senior DSP/FPGA Firmware Engineer Descr...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>10</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer</td>\n",
|
|
||||||
" <td>Fieldguide</td>\n",
|
|
||||||
" <td>San Francisco</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3696158160</td>\n",
|
|
||||||
" <td>About us:Fieldguide is establishing a new stat...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>11</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Sunnyvale</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3693012711</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>12</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Edwards</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3700669785</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>13</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Fort Worth</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3701770659</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>14</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Fort Worth</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3701769637</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>15</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Fort Worth</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3701772329</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>16</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer - Early Career</td>\n",
|
|
||||||
" <td>Lockheed Martin</td>\n",
|
|
||||||
" <td>Fort Worth</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3701775201</td>\n",
|
|
||||||
" <td>Description:By bringing together people that u...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>17</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer</td>\n",
|
|
||||||
" <td>SpiderOak</td>\n",
|
|
||||||
" <td>Austin</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3707174719</td>\n",
|
|
||||||
" <td>We're only as strong as our weakest link.In th...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>18</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Full-Stack Software Engineer</td>\n",
|
|
||||||
" <td>Rain</td>\n",
|
|
||||||
" <td>New York</td>\n",
|
|
||||||
" <td>NY</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3696158877</td>\n",
|
|
||||||
" <td>Rain’s mission is to create the fastest and ea...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>19</th>\n",
|
|
||||||
" <td>linkedin</td>\n",
|
|
||||||
" <td>Software Engineer</td>\n",
|
|
||||||
" <td>Nike</td>\n",
|
|
||||||
" <td>Portland</td>\n",
|
|
||||||
" <td>OR</td>\n",
|
|
||||||
" <td>contract</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>https://www.linkedin.com/jobs/view/3693340247</td>\n",
|
|
||||||
" <td>Work options: FlexibleWe consider remote, on-p...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>20</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Software Engineer - New Grad</td>\n",
|
|
||||||
" <td>ZipRecruiter</td>\n",
|
|
||||||
" <td>Santa Monica</td>\n",
|
|
||||||
" <td>CA</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>130000</td>\n",
|
|
||||||
" <td>150000</td>\n",
|
|
||||||
" <td>https://www.ziprecruiter.com/c/ZipRecruiter/Jo...</td>\n",
|
|
||||||
" <td>Demonstrated foundation in software engineerin...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>21</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Full Stack Software Engineer</td>\n",
|
|
||||||
" <td>ZipRecruiter</td>\n",
|
|
||||||
" <td>Phoenix</td>\n",
|
|
||||||
" <td>AZ</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>105000</td>\n",
|
|
||||||
" <td>145000</td>\n",
|
|
||||||
" <td>https://www.ziprecruiter.com/c/ZipRecruiter/Jo...</td>\n",
|
|
||||||
" <td>Experience in client side development using Re...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>22</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Software Developer | Onsite | Omaha, NE - Omaha</td>\n",
|
|
||||||
" <td>OneStaff Medical</td>\n",
|
|
||||||
" <td>Omaha</td>\n",
|
|
||||||
" <td>NE</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>60000</td>\n",
|
|
||||||
" <td>110000</td>\n",
|
|
||||||
" <td>https://www.ziprecruiter.com/c/OneStaff-Medica...</td>\n",
|
|
||||||
" <td>We are looking for a well-rounded Software Dev...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>23</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Senior Software Engineer, Onsite [Real-time]</td>\n",
|
|
||||||
" <td>Raytheon</td>\n",
|
|
||||||
" <td>McKinney</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>116000</td>\n",
|
|
||||||
" <td>153000</td>\n",
|
|
||||||
" <td>https://jsv3.recruitics.com/redirect?rx_cid=34...</td>\n",
|
|
||||||
" <td>By joining the Silent Knight team as a Senior ...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>24</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Senior Software Engineer - TS/SCI **Minimum $2...</td>\n",
|
|
||||||
" <td>Raytheon</td>\n",
|
|
||||||
" <td>Dallas</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>122000</td>\n",
|
|
||||||
" <td>162000</td>\n",
|
|
||||||
" <td>https://jsv3.recruitics.com/redirect?rx_cid=34...</td>\n",
|
|
||||||
" <td>Object Oriented Programming using C++ with Lin...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>25</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Software Engineer III (full stack, AI/ML, Djan...</td>\n",
|
|
||||||
" <td>Ayahealthcare</td>\n",
|
|
||||||
" <td>Remote</td>\n",
|
|
||||||
" <td>OR</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>156000</td>\n",
|
|
||||||
" <td>165000</td>\n",
|
|
||||||
" <td>https://click.appcast.io/track/hcbh0qq?cs=ngp&...</td>\n",
|
|
||||||
" <td>The Software Engineer III will be an integral ...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>26</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Software Engineer Full Stack</td>\n",
|
|
||||||
" <td>Generac Power Systems</td>\n",
|
|
||||||
" <td>Denver</td>\n",
|
|
||||||
" <td>CO</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>90000</td>\n",
|
|
||||||
" <td>115000</td>\n",
|
|
||||||
" <td>https://www.ziprecruiter.com/c/Generac-Power-S...</td>\n",
|
|
||||||
" <td>As a Software Engineer on the Energy Technolog...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>27</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Embedded Software Engineer (Fort Worth, TX or ...</td>\n",
|
|
||||||
" <td>Kubota</td>\n",
|
|
||||||
" <td>Fort Worth</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>122000</td>\n",
|
|
||||||
" <td>167000</td>\n",
|
|
||||||
" <td>https://us62e2.dayforcehcm.com/CandidatePortal...</td>\n",
|
|
||||||
" <td>Work with a cross-functional team to design, t...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>28</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>Senior Software Engineer (FT)</td>\n",
|
|
||||||
" <td>National Indoor RV Center</td>\n",
|
|
||||||
" <td>Lewisville</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>fulltime</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>125000</td>\n",
|
|
||||||
" <td>0</td>\n",
|
|
||||||
" <td>https://www.ziprecruiter.com/c/National-Indoor...</td>\n",
|
|
||||||
" <td>As a Senior Software Engineer, you will: * Des...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>29</th>\n",
|
|
||||||
" <td>zip_recruiter</td>\n",
|
|
||||||
" <td>2024 Next Gen IT Program | Software Engineerin...</td>\n",
|
|
||||||
" <td>Southern Glazer's Wine & Spirits</td>\n",
|
|
||||||
" <td>Dallas</td>\n",
|
|
||||||
" <td>TX</td>\n",
|
|
||||||
" <td>None</td>\n",
|
|
||||||
" <td>yearly</td>\n",
|
|
||||||
" <td>70000</td>\n",
|
|
||||||
" <td>0</td>\n",
|
|
||||||
" <td>https://click.appcast.io/track/hdsbnae?cs=b4&j...</td>\n",
|
|
||||||
" <td>Finally, through the work assigned, the analys...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" </tbody>\n",
|
|
||||||
"</table>\n",
|
|
||||||
"</div>"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
" site title \\\n",
|
|
||||||
"0 indeed Mental Health Therapist \n",
|
|
||||||
"1 indeed .NET Developer \n",
|
|
||||||
"2 indeed Senior Software Engineer \n",
|
|
||||||
"3 indeed Front End Developer \n",
|
|
||||||
"4 indeed Software Engineer \n",
|
|
||||||
"5 indeed Full Stack Developer \n",
|
|
||||||
"6 indeed Senior Software Engineer \n",
|
|
||||||
"7 indeed Computer Engineer \n",
|
|
||||||
"8 indeed Software Engineer \n",
|
|
||||||
"9 indeed Fpga Engineer \n",
|
|
||||||
"10 linkedin Software Engineer \n",
|
|
||||||
"11 linkedin Software Engineer - Early Career \n",
|
|
||||||
"12 linkedin Software Engineer - Early Career \n",
|
|
||||||
"13 linkedin Software Engineer - Early Career \n",
|
|
||||||
"14 linkedin Software Engineer - Early Career \n",
|
|
||||||
"15 linkedin Software Engineer - Early Career \n",
|
|
||||||
"16 linkedin Software Engineer - Early Career \n",
|
|
||||||
"17 linkedin Software Engineer \n",
|
|
||||||
"18 linkedin Full-Stack Software Engineer \n",
|
|
||||||
"19 linkedin Software Engineer \n",
|
|
||||||
"20 zip_recruiter Software Engineer - New Grad \n",
|
|
||||||
"21 zip_recruiter Full Stack Software Engineer \n",
|
|
||||||
"22 zip_recruiter Software Developer | Onsite | Omaha, NE - Omaha \n",
|
|
||||||
"23 zip_recruiter Senior Software Engineer, Onsite [Real-time] \n",
|
|
||||||
"24 zip_recruiter Senior Software Engineer - TS/SCI **Minimum $2... \n",
|
|
||||||
"25 zip_recruiter Software Engineer III (full stack, AI/ML, Djan... \n",
|
|
||||||
"26 zip_recruiter Software Engineer Full Stack \n",
|
|
||||||
"27 zip_recruiter Embedded Software Engineer (Fort Worth, TX or ... \n",
|
|
||||||
"28 zip_recruiter Senior Software Engineer (FT) \n",
|
|
||||||
"29 zip_recruiter 2024 Next Gen IT Program | Software Engineerin... \n",
|
|
||||||
"\n",
|
|
||||||
" company_name city state \\\n",
|
|
||||||
"0 Sandstone Care Broomfield CO \n",
|
|
||||||
"1 Noir Consulting Irving TX \n",
|
|
||||||
"2 Johns Hopkins Applied Physics Laboratory (APL) Laurel MD \n",
|
|
||||||
"3 Verkada San Mateo CA \n",
|
|
||||||
"4 Adobe San Jose CA \n",
|
|
||||||
"5 Comcast Philadelphia PA \n",
|
|
||||||
"6 Smart City Solutions FL \n",
|
|
||||||
"7 Honeywell None \n",
|
|
||||||
"8 Fidelity Investments Westlake TX \n",
|
|
||||||
"9 R-DEX Systems, Inc. Atlanta GA \n",
|
|
||||||
"10 Fieldguide San Francisco CA \n",
|
|
||||||
"11 Lockheed Martin Sunnyvale CA \n",
|
|
||||||
"12 Lockheed Martin Edwards CA \n",
|
|
||||||
"13 Lockheed Martin Fort Worth TX \n",
|
|
||||||
"14 Lockheed Martin Fort Worth TX \n",
|
|
||||||
"15 Lockheed Martin Fort Worth TX \n",
|
|
||||||
"16 Lockheed Martin Fort Worth TX \n",
|
|
||||||
"17 SpiderOak Austin TX \n",
|
|
||||||
"18 Rain New York NY \n",
|
|
||||||
"19 Nike Portland OR \n",
|
|
||||||
"20 ZipRecruiter Santa Monica CA \n",
|
|
||||||
"21 ZipRecruiter Phoenix AZ \n",
|
|
||||||
"22 OneStaff Medical Omaha NE \n",
|
|
||||||
"23 Raytheon McKinney TX \n",
|
|
||||||
"24 Raytheon Dallas TX \n",
|
|
||||||
"25 Ayahealthcare Remote OR \n",
|
|
||||||
"26 Generac Power Systems Denver CO \n",
|
|
||||||
"27 Kubota Fort Worth TX \n",
|
|
||||||
"28 National Indoor RV Center Lewisville TX \n",
|
|
||||||
"29 Southern Glazer's Wine & Spirits Dallas TX \n",
|
|
||||||
"\n",
|
|
||||||
" job_type interval min_amount max_amount \\\n",
|
|
||||||
"0 fulltime yearly 68000 57500 \n",
|
|
||||||
"1 None yearly 200000 200000 \n",
|
|
||||||
"2 None None None None \n",
|
|
||||||
"3 fulltime yearly 285000 120000 \n",
|
|
||||||
"4 fulltime yearly 142700 73200 \n",
|
|
||||||
"5 fulltime yearly 184663 78789 \n",
|
|
||||||
"6 fulltime yearly 100000 85000 \n",
|
|
||||||
"7 fulltime None None None \n",
|
|
||||||
"8 None None None None \n",
|
|
||||||
"9 fulltime yearly 160000 120000 \n",
|
|
||||||
"10 fulltime yearly None None \n",
|
|
||||||
"11 fulltime yearly None None \n",
|
|
||||||
"12 fulltime yearly None None \n",
|
|
||||||
"13 fulltime yearly None None \n",
|
|
||||||
"14 fulltime yearly None None \n",
|
|
||||||
"15 fulltime yearly None None \n",
|
|
||||||
"16 fulltime yearly None None \n",
|
|
||||||
"17 fulltime yearly None None \n",
|
|
||||||
"18 fulltime yearly None None \n",
|
|
||||||
"19 contract yearly None None \n",
|
|
||||||
"20 fulltime yearly 130000 150000 \n",
|
|
||||||
"21 fulltime yearly 105000 145000 \n",
|
|
||||||
"22 fulltime yearly 60000 110000 \n",
|
|
||||||
"23 fulltime yearly 116000 153000 \n",
|
|
||||||
"24 fulltime yearly 122000 162000 \n",
|
|
||||||
"25 None yearly 156000 165000 \n",
|
|
||||||
"26 fulltime yearly 90000 115000 \n",
|
|
||||||
"27 fulltime yearly 122000 167000 \n",
|
|
||||||
"28 fulltime yearly 125000 0 \n",
|
|
||||||
"29 None yearly 70000 0 \n",
|
|
||||||
"\n",
|
|
||||||
" job_url \\\n",
|
|
||||||
"0 https://www.indeed.com/viewjob?jk=f5f33d72e030... \n",
|
|
||||||
"1 https://www.indeed.com/viewjob?jk=1b22ba65296c... \n",
|
|
||||||
"2 https://www.indeed.com/viewjob?jk=309eed270a88... \n",
|
|
||||||
"3 https://www.indeed.com/viewjob?jk=a3ea45daca75... \n",
|
|
||||||
"4 https://www.indeed.com/viewjob?jk=0f2dc9901fc7... \n",
|
|
||||||
"5 https://www.indeed.com/viewjob?jk=eb5c927221eb... \n",
|
|
||||||
"6 https://www.indeed.com/viewjob?jk=ba1945f143a1... \n",
|
|
||||||
"7 https://www.indeed.com/viewjob?jk=5a1da623ee75... \n",
|
|
||||||
"8 https://www.indeed.com/viewjob?jk=b600392166bb... \n",
|
|
||||||
"9 https://www.indeed.com/viewjob?jk=a7e9d356c333... \n",
|
|
||||||
"10 https://www.linkedin.com/jobs/view/3696158160 \n",
|
|
||||||
"11 https://www.linkedin.com/jobs/view/3693012711 \n",
|
|
||||||
"12 https://www.linkedin.com/jobs/view/3700669785 \n",
|
|
||||||
"13 https://www.linkedin.com/jobs/view/3701770659 \n",
|
|
||||||
"14 https://www.linkedin.com/jobs/view/3701769637 \n",
|
|
||||||
"15 https://www.linkedin.com/jobs/view/3701772329 \n",
|
|
||||||
"16 https://www.linkedin.com/jobs/view/3701775201 \n",
|
|
||||||
"17 https://www.linkedin.com/jobs/view/3707174719 \n",
|
|
||||||
"18 https://www.linkedin.com/jobs/view/3696158877 \n",
|
|
||||||
"19 https://www.linkedin.com/jobs/view/3693340247 \n",
|
|
||||||
"20 https://www.ziprecruiter.com/c/ZipRecruiter/Jo... \n",
|
|
||||||
"21 https://www.ziprecruiter.com/c/ZipRecruiter/Jo... \n",
|
|
||||||
"22 https://www.ziprecruiter.com/c/OneStaff-Medica... \n",
|
|
||||||
"23 https://jsv3.recruitics.com/redirect?rx_cid=34... \n",
|
|
||||||
"24 https://jsv3.recruitics.com/redirect?rx_cid=34... \n",
|
|
||||||
"25 https://click.appcast.io/track/hcbh0qq?cs=ngp&... \n",
|
|
||||||
"26 https://www.ziprecruiter.com/c/Generac-Power-S... \n",
|
|
||||||
"27 https://us62e2.dayforcehcm.com/CandidatePortal... \n",
|
|
||||||
"28 https://www.ziprecruiter.com/c/National-Indoor... \n",
|
|
||||||
"29 https://click.appcast.io/track/hdsbnae?cs=b4&j... \n",
|
|
||||||
"\n",
|
|
||||||
" description \n",
|
|
||||||
"0 Mental Health Therapist- Broomfield, CO Locati... \n",
|
|
||||||
"1 .NET Software Engineer, C#, WPF - Irving (Tech... \n",
|
|
||||||
"2 Description Are you a communications systems d... \n",
|
|
||||||
"3 Who We Are Verkada is the largest cloud-based ... \n",
|
|
||||||
"4 Our Company Changing the world through digital... \n",
|
|
||||||
"5 Make your mark at Comcast - a Fortune 30 globa... \n",
|
|
||||||
"6 Smart City hiring a full stack software develo... \n",
|
|
||||||
"7 Join a team recognized for leadership, innovat... \n",
|
|
||||||
"8 Job Description: Software Engineer in Test The... \n",
|
|
||||||
"9 Title: Senior DSP/FPGA Firmware Engineer Descr... \n",
|
|
||||||
"10 About us:Fieldguide is establishing a new stat... \n",
|
|
||||||
"11 Description:By bringing together people that u... \n",
|
|
||||||
"12 Description:By bringing together people that u... \n",
|
|
||||||
"13 Description:By bringing together people that u... \n",
|
|
||||||
"14 Description:By bringing together people that u... \n",
|
|
||||||
"15 Description:By bringing together people that u... \n",
|
|
||||||
"16 Description:By bringing together people that u... \n",
|
|
||||||
"17 We're only as strong as our weakest link.In th... \n",
|
|
||||||
"18 Rain’s mission is to create the fastest and ea... \n",
|
|
||||||
"19 Work options: FlexibleWe consider remote, on-p... \n",
|
|
||||||
"20 Demonstrated foundation in software engineerin... \n",
|
|
||||||
"21 Experience in client side development using Re... \n",
|
|
||||||
"22 We are looking for a well-rounded Software Dev... \n",
|
|
||||||
"23 By joining the Silent Knight team as a Senior ... \n",
|
|
||||||
"24 Object Oriented Programming using C++ with Lin... \n",
|
|
||||||
"25 The Software Engineer III will be an integral ... \n",
|
|
||||||
"26 As a Software Engineer on the Energy Technolog... \n",
|
|
||||||
"27 Work with a cross-functional team to design, t... \n",
|
|
||||||
"28 As a Senior Software Engineer, you will: * Des... \n",
|
|
||||||
"29 Finally, through the work assigned, the analys... "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 5,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"scrape_jobs(\n",
|
|
||||||
" site_name=[\"indeed\", \"linkedin\", \"zip_recruiter\"],\n",
|
|
||||||
" search_term=\"software engineer\",\n",
|
|
||||||
" results_wanted=10\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3 (ipykernel)",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.11.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 5
|
|
||||||
}
|
|
||||||
247
README.md
247
README.md
@@ -1,50 +1,55 @@
|
|||||||
# <img src="https://github.com/cullenwatson/JobSpy/assets/78247585/2f61a059-9647-4a9c-bfb9-e3a9448bdc6a" style="vertical-align: sub; margin-right: 5px;"> JobSpy
|
<img src="https://github.com/cullenwatson/JobSpy/assets/78247585/ae185b7e-e444-4712-8bb9-fa97f53e896b" width="400">
|
||||||
|
|
||||||
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
**JobSpy** is a simple, yet comprehensive, job scraping library.
|
||||||
|
|
||||||
|
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
|
||||||
|
|
||||||
|
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
|
||||||
|
work with us.*
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Scrapes job postings from **LinkedIn**, **Indeed** & **ZipRecruiter** simultaneously
|
- Scrapes job postings from **LinkedIn**, **Indeed**, **Glassdoor**, & **ZipRecruiter** simultaneously
|
||||||
- Aggregates the job postings in a Pandas DataFrame
|
- Aggregates the job postings in a Pandas DataFrame
|
||||||
|
- Proxies support
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
`pip install python-jobspy`
|
|
||||||
|
```
|
||||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
pip install -U python-jobspy
|
||||||
|
```
|
||||||
|
|
||||||
|
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
import csv
|
||||||
from jobspy import scrape_jobs
|
from jobspy import scrape_jobs
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
jobs: pd.DataFrame = scrape_jobs(
|
jobs = scrape_jobs(
|
||||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||||
search_term="software engineer",
|
search_term="software engineer",
|
||||||
results_wanted=10
|
location="Dallas, TX",
|
||||||
|
results_wanted=20,
|
||||||
|
hours_old=72, # (only Linkedin/Indeed is hour specific, others round up to days old)
|
||||||
|
country_indeed='USA', # only needed for indeed / glassdoor
|
||||||
|
|
||||||
|
# linkedin_fetch_description=True # get more info such as full description, direct job url for linkedin (slower)
|
||||||
|
# proxies=["208.195.175.46:65095", "208.195.175.45:65095", "localhost"],
|
||||||
|
|
||||||
)
|
)
|
||||||
|
print(f"Found {len(jobs)} jobs")
|
||||||
if jobs.empty:
|
print(jobs.head())
|
||||||
print("No jobs found.")
|
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_excel
|
||||||
else:
|
|
||||||
#1 print
|
|
||||||
pd.set_option('display.max_columns', None)
|
|
||||||
pd.set_option('display.max_rows', None)
|
|
||||||
pd.set_option('display.width', None)
|
|
||||||
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
|
|
||||||
print(jobs)
|
|
||||||
|
|
||||||
#2 display in Jupyter Notebook
|
|
||||||
#display(jobs)
|
|
||||||
|
|
||||||
#3 output to .csv
|
|
||||||
#jobs.to_csv('jobs.csv', index=False)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Output
|
### Output
|
||||||
|
|
||||||
```
|
```
|
||||||
SITE TITLE COMPANY_NAME CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
SITE TITLE COMPANY CITY STATE JOB_TYPE INTERVAL MIN_AMOUNT MAX_AMOUNT JOB_URL DESCRIPTION
|
||||||
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
indeed Software Engineer AMERICAN SYSTEMS Arlington VA None yearly 200000 150000 https://www.indeed.com/viewjob?jk=5e409e577046... THIS POSITION COMES WITH A 10K SIGNING BONUS!...
|
||||||
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
indeed Senior Software Engineer TherapyNotes.com Philadelphia PA fulltime yearly 135000 110000 https://www.indeed.com/viewjob?jk=da39574a40cb... About Us TherapyNotes is the national leader i...
|
||||||
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
linkedin Software Engineer - Early Career Lockheed Martin Sunnyvale CA fulltime yearly None None https://www.linkedin.com/jobs/view/3693012711 Description:By bringing together people that u...
|
||||||
@@ -52,58 +57,180 @@ linkedin Full-Stack Software Engineer Rain New York
|
|||||||
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
zip_recruiter Software Engineer - New Grad ZipRecruiter Santa Monica CA fulltime yearly 130000 150000 https://www.ziprecruiter.com/jobs/ziprecruiter... We offer a hybrid work environment. Most US-ba...
|
||||||
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
zip_recruiter Software Developer TEKsystems Phoenix AZ fulltime hourly 65 75 https://www.ziprecruiter.com/jobs/teksystems-0... Top Skills' Details• 6 years of Java developme...
|
||||||
```
|
```
|
||||||
|
|
||||||
### Parameters for `scrape_jobs()`
|
### Parameters for `scrape_jobs()`
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
Required
|
|
||||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed
|
|
||||||
└── search_term (str)
|
|
||||||
Optional
|
Optional
|
||||||
├── location (int)
|
├── site_name (list|str):
|
||||||
├── distance (int): in miles
|
| linkedin, zip_recruiter, indeed, glassdoor
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
| (default is all four)
|
||||||
|
│
|
||||||
|
├── search_term (str)
|
||||||
|
│
|
||||||
|
├── location (str)
|
||||||
|
│
|
||||||
|
├── distance (int):
|
||||||
|
| in miles, default 50
|
||||||
|
│
|
||||||
|
├── job_type (str):
|
||||||
|
| fulltime, parttime, internship, contract
|
||||||
|
│
|
||||||
|
├── proxies (list):
|
||||||
|
| in format ['user:pass@host:port', 'localhost']
|
||||||
|
| each job board scraper will round robin through the proxies
|
||||||
|
|
|
||||||
|
├── ca_cert (str)
|
||||||
|
| path to CA Certificate file for proxies
|
||||||
|
│
|
||||||
├── is_remote (bool)
|
├── is_remote (bool)
|
||||||
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
|
│
|
||||||
├── easy_apply (bool): filters for jobs on LinkedIn that have the 'Easy Apply' option
|
├── results_wanted (int):
|
||||||
|
| number of job results to retrieve for each site specified in 'site_name'
|
||||||
|
│
|
||||||
|
├── easy_apply (bool):
|
||||||
|
| filters for jobs that are hosted on the job board site
|
||||||
|
│
|
||||||
|
├── description_format (str):
|
||||||
|
| markdown, html (Format type of the job descriptions. Default is markdown.)
|
||||||
|
│
|
||||||
|
├── offset (int):
|
||||||
|
| starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||||
|
│
|
||||||
|
├── hours_old (int):
|
||||||
|
| filters jobs by the number of hours since the job was posted
|
||||||
|
| (ZipRecruiter and Glassdoor round up to next day.)
|
||||||
|
│
|
||||||
|
├── verbose (int) {0, 1, 2}:
|
||||||
|
| Controls the verbosity of the runtime printouts
|
||||||
|
| (0 prints only errors, 1 is errors+warnings, 2 is all logs. Default is 2.)
|
||||||
|
|
||||||
|
├── linkedin_fetch_description (bool):
|
||||||
|
| fetches full description and direct job url for LinkedIn (Increases requests by O(n))
|
||||||
|
│
|
||||||
|
├── linkedin_company_ids (list[int]):
|
||||||
|
| searches for linkedin jobs with specific company ids
|
||||||
|
|
|
||||||
|
├── country_indeed (str):
|
||||||
|
| filters the country on Indeed & Glassdoor (see below for correct spelling)
|
||||||
|
|
|
||||||
|
├── enforce_annual_salary (bool):
|
||||||
|
| converts wages to annual salary
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
├── Indeed limitations:
|
||||||
|
| Only one from this list can be used in a search:
|
||||||
|
| - hours_old
|
||||||
|
| - job_type & is_remote
|
||||||
|
| - easy_apply
|
||||||
|
│
|
||||||
|
└── LinkedIn limitations:
|
||||||
|
| Only one from this list can be used in a search:
|
||||||
|
| - hours_old
|
||||||
|
| - easy_apply
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### JobPost Schema
|
### JobPost Schema
|
||||||
|
|
||||||
```plaintext
|
```plaintext
|
||||||
JobPost
|
JobPost
|
||||||
├── title (str)
|
├── title
|
||||||
├── company_name (str)
|
├── company
|
||||||
├── job_url (str)
|
├── company_url
|
||||||
├── location (object)
|
├── job_url
|
||||||
│ ├── country (str)
|
├── location
|
||||||
│ ├── city (str)
|
│ ├── country
|
||||||
│ ├── state (str)
|
│ ├── city
|
||||||
├── description (str)
|
│ ├── state
|
||||||
├── job_type (enum)
|
├── description
|
||||||
├── compensation (object)
|
├── job_type: fulltime, parttime, internship, contract
|
||||||
│ ├── interval (CompensationInterval): yearly, monthly, weekly, daily, hourly
|
├── job_function
|
||||||
│ ├── min_amount (float)
|
│ ├── interval: yearly, monthly, weekly, daily, hourly
|
||||||
│ ├── max_amount (float)
|
│ ├── min_amount
|
||||||
│ └── currency (str)
|
│ ├── max_amount
|
||||||
└── date_posted (datetime)
|
│ ├── currency
|
||||||
|
│ └── salary_source: direct_data, description (parsed from posting)
|
||||||
|
├── date_posted
|
||||||
|
├── emails
|
||||||
|
└── is_remote
|
||||||
|
|
||||||
|
Linkedin specific
|
||||||
|
└── job_level
|
||||||
|
|
||||||
|
Linkedin & Indeed specific
|
||||||
|
└── company_industry
|
||||||
|
|
||||||
|
Indeed specific
|
||||||
|
├── company_country
|
||||||
|
├── company_addresses
|
||||||
|
├── company_employees_label
|
||||||
|
├── company_revenue_label
|
||||||
|
├── company_description
|
||||||
|
└── logo_photo_url
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Supported Countries for Job Searching
|
||||||
|
|
||||||
|
### **LinkedIn**
|
||||||
|
|
||||||
|
LinkedIn searches globally & uses only the `location` parameter.
|
||||||
|
|
||||||
|
### **ZipRecruiter**
|
||||||
|
|
||||||
|
ZipRecruiter searches for jobs in **US/Canada** & uses only the `location` parameter.
|
||||||
|
|
||||||
|
### **Indeed / Glassdoor**
|
||||||
|
|
||||||
|
Indeed & Glassdoor supports most countries, but the `country_indeed` parameter is required. Additionally, use the `location`
|
||||||
|
parameter to narrow down the location, e.g. city & state if necessary.
|
||||||
|
|
||||||
|
You can specify the following countries when searching on Indeed (use the exact name, * indicates support for Glassdoor):
|
||||||
|
|
||||||
|
| | | | |
|
||||||
|
|----------------------|--------------|------------|----------------|
|
||||||
|
| Argentina | Australia* | Austria* | Bahrain |
|
||||||
|
| Belgium* | Brazil* | Canada* | Chile |
|
||||||
|
| China | Colombia | Costa Rica | Czech Republic |
|
||||||
|
| Denmark | Ecuador | Egypt | Finland |
|
||||||
|
| France* | Germany* | Greece | Hong Kong* |
|
||||||
|
| Hungary | India* | Indonesia | Ireland* |
|
||||||
|
| Israel | Italy* | Japan | Kuwait |
|
||||||
|
| Luxembourg | Malaysia | Mexico* | Morocco |
|
||||||
|
| Netherlands* | New Zealand* | Nigeria | Norway |
|
||||||
|
| Oman | Pakistan | Panama | Peru |
|
||||||
|
| Philippines | Poland | Portugal | Qatar |
|
||||||
|
| Romania | Saudi Arabia | Singapore* | South Africa |
|
||||||
|
| South Korea | Spain* | Sweden | Switzerland* |
|
||||||
|
| Taiwan | Thailand | Turkey | Ukraine |
|
||||||
|
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||||
|
| Venezuela | Vietnam* | | |
|
||||||
|
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
* Indeed is the best scraper currently with no rate limiting.
|
||||||
|
* All the job board endpoints are capped at around 1000 jobs on a given search.
|
||||||
|
* LinkedIn is the most restrictive and usually rate limits around the 10th page with one ip. Proxies are a must basically.
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
---
|
---
|
||||||
|
**Q: Why is Indeed giving unrelated roles?**
|
||||||
**Q: Encountering issues with your queries?**
|
**A:** Indeed is searching each one of your terms e.g. software intern, it searches software OR intern. Try search_term='"software intern"' in quotes for stricter searching
|
||||||
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems persist, [submit an issue](#).
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Q: Received a response code 429?**
|
**Q: Received a response code 429?**
|
||||||
**A:** This indicates that you have been blocked by the job board site for sending too many requests. Currently, **ZipRecruiter** is particularly aggressive with blocking. We recommend:
|
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
|
||||||
|
|
||||||
- Waiting a few seconds between requests.
|
- Wait some time between scrapes (site-dependent).
|
||||||
- Trying a VPN to change your IP address.
|
- Try using the proxies param to change your IP address.
|
||||||
|
|
||||||
**Note:** Proxy support is in development and coming soon!
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
**Q: Encountering issues with your queries?**
|
||||||
|
**A:** Try reducing the number of `results_wanted` and/or broadening the filters. If problems
|
||||||
|
persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||||
|
|
||||||
|
---
|
||||||
|
|||||||
2665
poetry.lock
generated
2665
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
2
poetry.toml
Normal file
2
poetry.toml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[virtualenvs]
|
||||||
|
in-project = true
|
||||||
@@ -1,9 +1,11 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.0.3"
|
version = "1.1.71"
|
||||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||||
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
keywords = ['jobs-scraper', 'linkedin', 'indeed', 'glassdoor', 'ziprecruiter']
|
||||||
|
|
||||||
packages = [
|
packages = [
|
||||||
{ include = "jobspy", from = "src" }
|
{ include = "jobspy", from = "src" }
|
||||||
@@ -12,16 +14,24 @@ packages = [
|
|||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.10"
|
python = "^3.10"
|
||||||
requests = "^2.31.0"
|
requests = "^2.31.0"
|
||||||
tls-client = "^0.2.1"
|
|
||||||
beautifulsoup4 = "^4.12.2"
|
beautifulsoup4 = "^4.12.2"
|
||||||
pandas = "^2.1.0"
|
pandas = "^2.1.0"
|
||||||
|
NUMPY = "1.26.3"
|
||||||
pydantic = "^2.3.0"
|
pydantic = "^2.3.0"
|
||||||
|
tls-client = "^1.0.1"
|
||||||
|
markdownify = "^0.13.1"
|
||||||
|
regex = "^2024.4.28"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
pytest = "^7.4.1"
|
pytest = "^7.4.1"
|
||||||
jupyter = "^1.0.0"
|
jupyter = "^1.0.0"
|
||||||
|
black = "*"
|
||||||
|
pre-commit = "*"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
|
[tool.black]
|
||||||
|
line-length = 88
|
||||||
|
|||||||
@@ -1,128 +1,250 @@
|
|||||||
import pandas as pd
|
from __future__ import annotations
|
||||||
from typing import List, Tuple
|
|
||||||
|
|
||||||
from .jobs import JobType
|
import pandas as pd
|
||||||
|
from typing import Tuple
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
|
from .jobs import JobType, Location
|
||||||
|
from .scrapers.utils import set_logger_level, extract_salary, create_logger
|
||||||
from .scrapers.indeed import IndeedScraper
|
from .scrapers.indeed import IndeedScraper
|
||||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||||
|
from .scrapers.glassdoor import GlassdoorScraper
|
||||||
from .scrapers.linkedin import LinkedInScraper
|
from .scrapers.linkedin import LinkedInScraper
|
||||||
from .scrapers import (
|
from .scrapers import SalarySource, ScraperInput, Site, JobResponse, Country
|
||||||
ScraperInput,
|
from .scrapers.exceptions import (
|
||||||
Site,
|
LinkedInException,
|
||||||
JobResponse,
|
IndeedException,
|
||||||
|
ZipRecruiterException,
|
||||||
|
GlassdoorException,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
SCRAPER_MAPPING = {
|
|
||||||
Site.LINKEDIN: LinkedInScraper,
|
|
||||||
Site.INDEED: IndeedScraper,
|
|
||||||
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _map_str_to_site(site_name: str) -> Site:
|
|
||||||
return Site[site_name.upper()]
|
|
||||||
|
|
||||||
|
|
||||||
def scrape_jobs(
|
def scrape_jobs(
|
||||||
site_name: str | Site | List[Site],
|
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||||
search_term: str,
|
search_term: str | None = None,
|
||||||
location: str = "",
|
location: str | None = None,
|
||||||
distance: int = None,
|
distance: int | None = 50,
|
||||||
is_remote: bool = False,
|
is_remote: bool = False,
|
||||||
job_type: JobType = None,
|
job_type: str | None = None,
|
||||||
easy_apply: bool = False, # linkedin
|
easy_apply: bool | None = None,
|
||||||
results_wanted: int = 15,
|
results_wanted: int = 15,
|
||||||
|
country_indeed: str = "usa",
|
||||||
|
hyperlinks: bool = False,
|
||||||
|
proxies: list[str] | str | None = None,
|
||||||
|
ca_cert: str | None = None,
|
||||||
|
description_format: str = "markdown",
|
||||||
|
linkedin_fetch_description: bool | None = False,
|
||||||
|
linkedin_company_ids: list[int] | None = None,
|
||||||
|
offset: int | None = 0,
|
||||||
|
hours_old: int = None,
|
||||||
|
enforce_annual_salary: bool = False,
|
||||||
|
verbose: int = 2,
|
||||||
|
**kwargs,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Asynchronously scrapes job data from multiple job sites.
|
Simultaneously scrapes job data from multiple job sites.
|
||||||
:return: results_wanted: pandas dataframe containing job data
|
:return: pandas dataframe containing job data
|
||||||
"""
|
"""
|
||||||
|
SCRAPER_MAPPING = {
|
||||||
|
Site.LINKEDIN: LinkedInScraper,
|
||||||
|
Site.INDEED: IndeedScraper,
|
||||||
|
Site.ZIP_RECRUITER: ZipRecruiterScraper,
|
||||||
|
Site.GLASSDOOR: GlassdoorScraper,
|
||||||
|
}
|
||||||
|
set_logger_level(verbose)
|
||||||
|
|
||||||
if type(site_name) == str:
|
def map_str_to_site(site_name: str) -> Site:
|
||||||
site_name = _map_str_to_site(site_name)
|
return Site[site_name.upper()]
|
||||||
|
|
||||||
|
def get_enum_from_value(value_str):
|
||||||
|
for job_type in JobType:
|
||||||
|
if value_str in job_type.value:
|
||||||
|
return job_type
|
||||||
|
raise Exception(f"Invalid job type: {value_str}")
|
||||||
|
|
||||||
|
job_type = get_enum_from_value(job_type) if job_type else None
|
||||||
|
|
||||||
|
def get_site_type():
|
||||||
|
site_types = list(Site)
|
||||||
|
if isinstance(site_name, str):
|
||||||
|
site_types = [map_str_to_site(site_name)]
|
||||||
|
elif isinstance(site_name, Site):
|
||||||
|
site_types = [site_name]
|
||||||
|
elif isinstance(site_name, list):
|
||||||
|
site_types = [
|
||||||
|
map_str_to_site(site) if isinstance(site, str) else site
|
||||||
|
for site in site_name
|
||||||
|
]
|
||||||
|
return site_types
|
||||||
|
|
||||||
|
country_enum = Country.from_string(country_indeed)
|
||||||
|
|
||||||
site_type = [site_name] if type(site_name) == Site else site_name
|
|
||||||
scraper_input = ScraperInput(
|
scraper_input = ScraperInput(
|
||||||
site_type=site_type,
|
site_type=get_site_type(),
|
||||||
|
country=country_enum,
|
||||||
search_term=search_term,
|
search_term=search_term,
|
||||||
location=location,
|
location=location,
|
||||||
distance=distance,
|
distance=distance,
|
||||||
is_remote=is_remote,
|
is_remote=is_remote,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
easy_apply=easy_apply,
|
easy_apply=easy_apply,
|
||||||
|
description_format=description_format,
|
||||||
|
linkedin_fetch_description=linkedin_fetch_description,
|
||||||
results_wanted=results_wanted,
|
results_wanted=results_wanted,
|
||||||
|
linkedin_company_ids=linkedin_company_ids,
|
||||||
|
offset=offset,
|
||||||
|
hours_old=hours_old,
|
||||||
)
|
)
|
||||||
|
|
||||||
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
|
||||||
scraper_class = SCRAPER_MAPPING[site]
|
scraper_class = SCRAPER_MAPPING[site]
|
||||||
scraper = scraper_class()
|
scraper = scraper_class(proxies=proxies, ca_cert=ca_cert)
|
||||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||||
|
cap_name = site.value.capitalize()
|
||||||
|
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name
|
||||||
|
create_logger(site_name).info(f"finished scraping")
|
||||||
return site.value, scraped_data
|
return site.value, scraped_data
|
||||||
|
|
||||||
results = {}
|
site_to_jobs_dict = {}
|
||||||
for site in scraper_input.site_type:
|
|
||||||
site_value, scraped_data = scrape_site(site)
|
|
||||||
results[site_value] = scraped_data
|
|
||||||
|
|
||||||
dfs = []
|
def worker(site):
|
||||||
|
site_val, scraped_info = scrape_site(site)
|
||||||
|
return site_val, scraped_info
|
||||||
|
|
||||||
for site, job_response in results.items():
|
with ThreadPoolExecutor() as executor:
|
||||||
|
future_to_site = {
|
||||||
|
executor.submit(worker, site): site for site in scraper_input.site_type
|
||||||
|
}
|
||||||
|
|
||||||
|
for future in as_completed(future_to_site):
|
||||||
|
site_value, scraped_data = future.result()
|
||||||
|
site_to_jobs_dict[site_value] = scraped_data
|
||||||
|
|
||||||
|
def convert_to_annual(job_data: dict):
|
||||||
|
if job_data["interval"] == "hourly":
|
||||||
|
job_data["min_amount"] *= 2080
|
||||||
|
job_data["max_amount"] *= 2080
|
||||||
|
if job_data["interval"] == "monthly":
|
||||||
|
job_data["min_amount"] *= 12
|
||||||
|
job_data["max_amount"] *= 12
|
||||||
|
if job_data["interval"] == "weekly":
|
||||||
|
job_data["min_amount"] *= 52
|
||||||
|
job_data["max_amount"] *= 52
|
||||||
|
if job_data["interval"] == "daily":
|
||||||
|
job_data["min_amount"] *= 260
|
||||||
|
job_data["max_amount"] *= 260
|
||||||
|
job_data["interval"] = "yearly"
|
||||||
|
|
||||||
|
jobs_dfs: list[pd.DataFrame] = []
|
||||||
|
|
||||||
|
for site, job_response in site_to_jobs_dict.items():
|
||||||
for job in job_response.jobs:
|
for job in job_response.jobs:
|
||||||
data = job.dict()
|
job_data = job.dict()
|
||||||
data["site"] = site
|
job_url = job_data["job_url"]
|
||||||
|
job_data["job_url_hyper"] = f'<a href="{job_url}">{job_url}</a>'
|
||||||
|
job_data["site"] = site
|
||||||
|
job_data["company"] = job_data["company_name"]
|
||||||
|
job_data["job_type"] = (
|
||||||
|
", ".join(job_type.value[0] for job_type in job_data["job_type"])
|
||||||
|
if job_data["job_type"]
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
job_data["emails"] = (
|
||||||
|
", ".join(job_data["emails"]) if job_data["emails"] else None
|
||||||
|
)
|
||||||
|
if job_data["location"]:
|
||||||
|
job_data["location"] = Location(
|
||||||
|
**job_data["location"]
|
||||||
|
).display_location()
|
||||||
|
|
||||||
# Formatting JobType
|
compensation_obj = job_data.get("compensation")
|
||||||
data["job_type"] = data["job_type"].value if data["job_type"] else None
|
|
||||||
|
|
||||||
# Formatting Location
|
|
||||||
location_obj = data.get("location")
|
|
||||||
if location_obj and isinstance(location_obj, dict):
|
|
||||||
data["city"] = location_obj.get("city", "")
|
|
||||||
data["state"] = location_obj.get("state", "")
|
|
||||||
data["country"] = location_obj.get("country", "USA")
|
|
||||||
else:
|
|
||||||
data["city"] = None
|
|
||||||
data["state"] = None
|
|
||||||
data["country"] = None
|
|
||||||
|
|
||||||
# Formatting Compensation
|
|
||||||
compensation_obj = data.get("compensation")
|
|
||||||
if compensation_obj and isinstance(compensation_obj, dict):
|
if compensation_obj and isinstance(compensation_obj, dict):
|
||||||
data["interval"] = (
|
job_data["interval"] = (
|
||||||
compensation_obj.get("interval").value
|
compensation_obj.get("interval").value
|
||||||
if compensation_obj.get("interval")
|
if compensation_obj.get("interval")
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
data["min_amount"] = compensation_obj.get("min_amount")
|
job_data["min_amount"] = compensation_obj.get("min_amount")
|
||||||
data["max_amount"] = compensation_obj.get("max_amount")
|
job_data["max_amount"] = compensation_obj.get("max_amount")
|
||||||
data["currency"] = compensation_obj.get("currency", "USD")
|
job_data["currency"] = compensation_obj.get("currency", "USD")
|
||||||
|
job_data["salary_source"] = SalarySource.DIRECT_DATA.value
|
||||||
|
if enforce_annual_salary and (
|
||||||
|
job_data["interval"]
|
||||||
|
and job_data["interval"] != "yearly"
|
||||||
|
and job_data["min_amount"]
|
||||||
|
and job_data["max_amount"]
|
||||||
|
):
|
||||||
|
convert_to_annual(job_data)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
data["interval"] = None
|
if country_enum == Country.USA:
|
||||||
data["min_amount"] = None
|
(
|
||||||
data["max_amount"] = None
|
job_data["interval"],
|
||||||
data["currency"] = None
|
job_data["min_amount"],
|
||||||
|
job_data["max_amount"],
|
||||||
|
job_data["currency"],
|
||||||
|
) = extract_salary(
|
||||||
|
job_data["description"],
|
||||||
|
enforce_annual_salary=enforce_annual_salary,
|
||||||
|
)
|
||||||
|
job_data["salary_source"] = SalarySource.DESCRIPTION.value
|
||||||
|
|
||||||
job_df = pd.DataFrame([data])
|
job_data["salary_source"] = (
|
||||||
dfs.append(job_df)
|
job_data["salary_source"]
|
||||||
|
if "min_amount" in job_data and job_data["min_amount"]
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
job_df = pd.DataFrame([job_data])
|
||||||
|
jobs_dfs.append(job_df)
|
||||||
|
|
||||||
if dfs:
|
if jobs_dfs:
|
||||||
df = pd.concat(dfs, ignore_index=True)
|
# Step 1: Filter out all-NA columns from each DataFrame before concatenation
|
||||||
|
filtered_dfs = [df.dropna(axis=1, how="all") for df in jobs_dfs]
|
||||||
|
|
||||||
|
# Step 2: Concatenate the filtered DataFrames
|
||||||
|
jobs_df = pd.concat(filtered_dfs, ignore_index=True)
|
||||||
|
|
||||||
|
# Desired column order
|
||||||
desired_order = [
|
desired_order = [
|
||||||
|
"id",
|
||||||
"site",
|
"site",
|
||||||
|
"job_url_hyper" if hyperlinks else "job_url",
|
||||||
|
"job_url_direct",
|
||||||
"title",
|
"title",
|
||||||
"company_name",
|
"company",
|
||||||
"city",
|
"location",
|
||||||
"state",
|
|
||||||
"job_type",
|
"job_type",
|
||||||
|
"date_posted",
|
||||||
|
"salary_source",
|
||||||
"interval",
|
"interval",
|
||||||
"min_amount",
|
"min_amount",
|
||||||
"max_amount",
|
"max_amount",
|
||||||
"job_url",
|
"currency",
|
||||||
|
"is_remote",
|
||||||
|
"job_level",
|
||||||
|
"job_function",
|
||||||
|
"company_industry",
|
||||||
|
"listing_type",
|
||||||
|
"emails",
|
||||||
"description",
|
"description",
|
||||||
|
"company_url",
|
||||||
|
"logo_photo_url",
|
||||||
|
"company_url_direct",
|
||||||
|
"company_addresses",
|
||||||
|
"company_num_employees",
|
||||||
|
"company_revenue",
|
||||||
|
"company_description",
|
||||||
]
|
]
|
||||||
df = df[desired_order]
|
|
||||||
else:
|
|
||||||
df = pd.DataFrame()
|
|
||||||
|
|
||||||
return df
|
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||||
|
for column in desired_order:
|
||||||
|
if column not in jobs_df.columns:
|
||||||
|
jobs_df[column] = None # Add missing columns as empty
|
||||||
|
|
||||||
|
# Reorder the DataFrame according to the desired order
|
||||||
|
jobs_df = jobs_df[desired_order]
|
||||||
|
|
||||||
|
# Step 4: Sort the DataFrame as required
|
||||||
|
return jobs_df.sort_values(by=["site", "date_posted"], ascending=[True, False])
|
||||||
|
else:
|
||||||
|
return pd.DataFrame()
|
||||||
|
|||||||
@@ -1,29 +1,199 @@
|
|||||||
from typing import Union, Optional
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
from datetime import date
|
from datetime import date
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from pydantic import BaseModel
|
||||||
from pydantic import BaseModel, validator
|
|
||||||
|
|
||||||
|
|
||||||
class JobType(Enum):
|
class JobType(Enum):
|
||||||
FULL_TIME = "fulltime"
|
FULL_TIME = (
|
||||||
PART_TIME = "parttime"
|
"fulltime",
|
||||||
CONTRACT = "contract"
|
"períodointegral",
|
||||||
TEMPORARY = "temporary"
|
"estágio/trainee",
|
||||||
INTERNSHIP = "internship"
|
"cunormăîntreagă",
|
||||||
|
"tiempocompleto",
|
||||||
|
"vollzeit",
|
||||||
|
"voltijds",
|
||||||
|
"tempointegral",
|
||||||
|
"全职",
|
||||||
|
"plnýúvazek",
|
||||||
|
"fuldtid",
|
||||||
|
"دوامكامل",
|
||||||
|
"kokopäivätyö",
|
||||||
|
"tempsplein",
|
||||||
|
"vollzeit",
|
||||||
|
"πλήρηςαπασχόληση",
|
||||||
|
"teljesmunkaidő",
|
||||||
|
"tempopieno",
|
||||||
|
"tempsplein",
|
||||||
|
"heltid",
|
||||||
|
"jornadacompleta",
|
||||||
|
"pełnyetat",
|
||||||
|
"정규직",
|
||||||
|
"100%",
|
||||||
|
"全職",
|
||||||
|
"งานประจำ",
|
||||||
|
"tamzamanlı",
|
||||||
|
"повназайнятість",
|
||||||
|
"toànthờigian",
|
||||||
|
)
|
||||||
|
PART_TIME = ("parttime", "teilzeit", "částečnýúvazek", "deltid")
|
||||||
|
CONTRACT = ("contract", "contractor")
|
||||||
|
TEMPORARY = ("temporary",)
|
||||||
|
INTERNSHIP = (
|
||||||
|
"internship",
|
||||||
|
"prácticas",
|
||||||
|
"ojt(onthejobtraining)",
|
||||||
|
"praktikum",
|
||||||
|
"praktik",
|
||||||
|
)
|
||||||
|
|
||||||
PER_DIEM = "perdiem"
|
PER_DIEM = ("perdiem",)
|
||||||
NIGHTS = "nights"
|
NIGHTS = ("nights",)
|
||||||
OTHER = "other"
|
OTHER = ("other",)
|
||||||
SUMMER = "summer"
|
SUMMER = ("summer",)
|
||||||
VOLUNTEER = "volunteer"
|
VOLUNTEER = ("volunteer",)
|
||||||
|
|
||||||
|
|
||||||
|
class Country(Enum):
|
||||||
|
"""
|
||||||
|
Gets the subdomain for Indeed and Glassdoor.
|
||||||
|
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||||
|
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||||
|
"""
|
||||||
|
|
||||||
|
ARGENTINA = ("argentina", "ar", "com.ar")
|
||||||
|
AUSTRALIA = ("australia", "au", "com.au")
|
||||||
|
AUSTRIA = ("austria", "at", "at")
|
||||||
|
BAHRAIN = ("bahrain", "bh")
|
||||||
|
BELGIUM = ("belgium", "be", "fr:be")
|
||||||
|
BRAZIL = ("brazil", "br", "com.br")
|
||||||
|
CANADA = ("canada", "ca", "ca")
|
||||||
|
CHILE = ("chile", "cl")
|
||||||
|
CHINA = ("china", "cn")
|
||||||
|
COLOMBIA = ("colombia", "co")
|
||||||
|
COSTARICA = ("costa rica", "cr")
|
||||||
|
CZECHREPUBLIC = ("czech republic,czechia", "cz")
|
||||||
|
DENMARK = ("denmark", "dk")
|
||||||
|
ECUADOR = ("ecuador", "ec")
|
||||||
|
EGYPT = ("egypt", "eg")
|
||||||
|
FINLAND = ("finland", "fi")
|
||||||
|
FRANCE = ("france", "fr", "fr")
|
||||||
|
GERMANY = ("germany", "de", "de")
|
||||||
|
GREECE = ("greece", "gr")
|
||||||
|
HONGKONG = ("hong kong", "hk", "com.hk")
|
||||||
|
HUNGARY = ("hungary", "hu")
|
||||||
|
INDIA = ("india", "in", "co.in")
|
||||||
|
INDONESIA = ("indonesia", "id")
|
||||||
|
IRELAND = ("ireland", "ie", "ie")
|
||||||
|
ISRAEL = ("israel", "il")
|
||||||
|
ITALY = ("italy", "it", "it")
|
||||||
|
JAPAN = ("japan", "jp")
|
||||||
|
KUWAIT = ("kuwait", "kw")
|
||||||
|
LUXEMBOURG = ("luxembourg", "lu")
|
||||||
|
MALAYSIA = ("malaysia", "malaysia:my", "com")
|
||||||
|
MALTA = ("malta", "malta:mt", "mt")
|
||||||
|
MEXICO = ("mexico", "mx", "com.mx")
|
||||||
|
MOROCCO = ("morocco", "ma")
|
||||||
|
NETHERLANDS = ("netherlands", "nl", "nl")
|
||||||
|
NEWZEALAND = ("new zealand", "nz", "co.nz")
|
||||||
|
NIGERIA = ("nigeria", "ng")
|
||||||
|
NORWAY = ("norway", "no")
|
||||||
|
OMAN = ("oman", "om")
|
||||||
|
PAKISTAN = ("pakistan", "pk")
|
||||||
|
PANAMA = ("panama", "pa")
|
||||||
|
PERU = ("peru", "pe")
|
||||||
|
PHILIPPINES = ("philippines", "ph")
|
||||||
|
POLAND = ("poland", "pl")
|
||||||
|
PORTUGAL = ("portugal", "pt")
|
||||||
|
QATAR = ("qatar", "qa")
|
||||||
|
ROMANIA = ("romania", "ro")
|
||||||
|
SAUDIARABIA = ("saudi arabia", "sa")
|
||||||
|
SINGAPORE = ("singapore", "sg", "sg")
|
||||||
|
SOUTHAFRICA = ("south africa", "za")
|
||||||
|
SOUTHKOREA = ("south korea", "kr")
|
||||||
|
SPAIN = ("spain", "es", "es")
|
||||||
|
SWEDEN = ("sweden", "se")
|
||||||
|
SWITZERLAND = ("switzerland", "ch", "de:ch")
|
||||||
|
TAIWAN = ("taiwan", "tw")
|
||||||
|
THAILAND = ("thailand", "th")
|
||||||
|
TURKEY = ("türkiye,turkey", "tr")
|
||||||
|
UKRAINE = ("ukraine", "ua")
|
||||||
|
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||||
|
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||||
|
USA = ("usa,us,united states", "www:us", "com")
|
||||||
|
URUGUAY = ("uruguay", "uy")
|
||||||
|
VENEZUELA = ("venezuela", "ve")
|
||||||
|
VIETNAM = ("vietnam", "vn", "com")
|
||||||
|
|
||||||
|
# internal for ziprecruiter
|
||||||
|
US_CANADA = ("usa/ca", "www")
|
||||||
|
|
||||||
|
# internal for linkedin
|
||||||
|
WORLDWIDE = ("worldwide", "www")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indeed_domain_value(self):
|
||||||
|
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||||
|
if subdomain and api_country_code:
|
||||||
|
return subdomain, api_country_code.upper()
|
||||||
|
return self.value[1], self.value[1].upper()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def glassdoor_domain_value(self):
|
||||||
|
if len(self.value) == 3:
|
||||||
|
subdomain, _, domain = self.value[2].partition(":")
|
||||||
|
if subdomain and domain:
|
||||||
|
return f"{subdomain}.glassdoor.{domain}"
|
||||||
|
else:
|
||||||
|
return f"www.glassdoor.{self.value[2]}"
|
||||||
|
else:
|
||||||
|
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||||
|
|
||||||
|
def get_glassdoor_url(self):
|
||||||
|
return f"https://{self.glassdoor_domain_value}/"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, country_str: str):
|
||||||
|
"""Convert a string to the corresponding Country enum."""
|
||||||
|
country_str = country_str.strip().lower()
|
||||||
|
for country in cls:
|
||||||
|
country_names = country.value[0].split(",")
|
||||||
|
if country_str in country_names:
|
||||||
|
return country
|
||||||
|
valid_countries = [country.value for country in cls]
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid country string: '{country_str}'. Valid countries are: {', '.join([country[0] for country in valid_countries])}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Location(BaseModel):
|
class Location(BaseModel):
|
||||||
country: str = "USA"
|
country: Country | str | None = None
|
||||||
city: str = None
|
city: Optional[str] = None
|
||||||
state: Optional[str] = None
|
state: Optional[str] = None
|
||||||
|
|
||||||
|
def display_location(self) -> str:
|
||||||
|
location_parts = []
|
||||||
|
if self.city:
|
||||||
|
location_parts.append(self.city)
|
||||||
|
if self.state:
|
||||||
|
location_parts.append(self.state)
|
||||||
|
if isinstance(self.country, str):
|
||||||
|
location_parts.append(self.country)
|
||||||
|
elif self.country and self.country not in (
|
||||||
|
Country.US_CANADA,
|
||||||
|
Country.WORLDWIDE,
|
||||||
|
):
|
||||||
|
country_name = self.country.value[0]
|
||||||
|
if "," in country_name:
|
||||||
|
country_name = country_name.split(",")[0]
|
||||||
|
if country_name in ("usa", "uk"):
|
||||||
|
location_parts.append(country_name.upper())
|
||||||
|
else:
|
||||||
|
location_parts.append(country_name.title())
|
||||||
|
return ", ".join(location_parts)
|
||||||
|
|
||||||
|
|
||||||
class CompensationInterval(Enum):
|
class CompensationInterval(Enum):
|
||||||
YEARLY = "yearly"
|
YEARLY = "yearly"
|
||||||
@@ -32,43 +202,66 @@ class CompensationInterval(Enum):
|
|||||||
DAILY = "daily"
|
DAILY = "daily"
|
||||||
HOURLY = "hourly"
|
HOURLY = "hourly"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_interval(cls, pay_period):
|
||||||
|
interval_mapping = {
|
||||||
|
"YEAR": cls.YEARLY,
|
||||||
|
"HOUR": cls.HOURLY,
|
||||||
|
}
|
||||||
|
if pay_period in interval_mapping:
|
||||||
|
return interval_mapping[pay_period].value
|
||||||
|
else:
|
||||||
|
return cls[pay_period].value if pay_period in cls.__members__ else None
|
||||||
|
|
||||||
|
|
||||||
class Compensation(BaseModel):
|
class Compensation(BaseModel):
|
||||||
interval: CompensationInterval
|
interval: Optional[CompensationInterval] = None
|
||||||
min_amount: int = None
|
min_amount: float | None = None
|
||||||
max_amount: int = None
|
max_amount: float | None = None
|
||||||
currency: str = "USD"
|
currency: Optional[str] = "USD"
|
||||||
|
|
||||||
|
|
||||||
|
class DescriptionFormat(Enum):
|
||||||
|
MARKDOWN = "markdown"
|
||||||
|
HTML = "html"
|
||||||
|
|
||||||
|
|
||||||
class JobPost(BaseModel):
|
class JobPost(BaseModel):
|
||||||
|
id: str | None = None
|
||||||
title: str
|
title: str
|
||||||
company_name: str
|
company_name: str | None
|
||||||
job_url: str
|
job_url: str
|
||||||
|
job_url_direct: str | None = None
|
||||||
location: Optional[Location]
|
location: Optional[Location]
|
||||||
|
|
||||||
description: Optional[str] = None
|
description: str | None = None
|
||||||
job_type: Optional[JobType] = None
|
company_url: str | None = None
|
||||||
compensation: Optional[Compensation] = None
|
company_url_direct: str | None = None
|
||||||
date_posted: Optional[date] = None
|
|
||||||
|
job_type: list[JobType] | None = None
|
||||||
|
compensation: Compensation | None = None
|
||||||
|
date_posted: date | None = None
|
||||||
|
emails: list[str] | None = None
|
||||||
|
is_remote: bool | None = None
|
||||||
|
listing_type: str | None = None
|
||||||
|
|
||||||
|
# linkedin specific
|
||||||
|
job_level: str | None = None
|
||||||
|
|
||||||
|
# linkedin and indeed specific
|
||||||
|
company_industry: str | None = None
|
||||||
|
|
||||||
|
# indeed specific
|
||||||
|
company_addresses: str | None = None
|
||||||
|
company_num_employees: str | None = None
|
||||||
|
company_revenue: str | None = None
|
||||||
|
company_description: str | None = None
|
||||||
|
logo_photo_url: str | None = None
|
||||||
|
banner_photo_url: str | None = None
|
||||||
|
|
||||||
|
# linkedin only atm
|
||||||
|
job_function: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class JobResponse(BaseModel):
|
class JobResponse(BaseModel):
|
||||||
success: bool
|
|
||||||
error: str = None
|
|
||||||
|
|
||||||
total_results: Optional[int] = None
|
|
||||||
|
|
||||||
jobs: list[JobPost] = []
|
jobs: list[JobPost] = []
|
||||||
|
|
||||||
returned_results: int = None
|
|
||||||
|
|
||||||
@validator("returned_results", pre=True, always=True)
|
|
||||||
def set_returned_results(cls, v, values):
|
|
||||||
jobs_list = values.get("jobs")
|
|
||||||
|
|
||||||
if v is None:
|
|
||||||
if jobs_list is not None:
|
|
||||||
return len(jobs_list)
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
return v
|
|
||||||
|
|||||||
@@ -1,43 +1,51 @@
|
|||||||
from ..jobs import Enum, BaseModel, JobType, JobResponse
|
from __future__ import annotations
|
||||||
from typing import List, Optional, Any
|
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
class StatusException(Exception):
|
from ..jobs import (
|
||||||
def __init__(self, status_code: int):
|
Enum,
|
||||||
self.status_code = status_code
|
BaseModel,
|
||||||
|
JobType,
|
||||||
|
JobResponse,
|
||||||
|
Country,
|
||||||
|
DescriptionFormat,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Site(Enum):
|
class Site(Enum):
|
||||||
LINKEDIN = "linkedin"
|
LINKEDIN = "linkedin"
|
||||||
INDEED = "indeed"
|
INDEED = "indeed"
|
||||||
ZIP_RECRUITER = "zip_recruiter"
|
ZIP_RECRUITER = "zip_recruiter"
|
||||||
|
GLASSDOOR = "glassdoor"
|
||||||
|
|
||||||
|
class SalarySource(Enum):
|
||||||
|
DIRECT_DATA = "direct_data"
|
||||||
|
DESCRIPTION = "description"
|
||||||
|
|
||||||
class ScraperInput(BaseModel):
|
class ScraperInput(BaseModel):
|
||||||
site_type: List[Site]
|
site_type: list[Site]
|
||||||
search_term: str
|
search_term: str | None = None
|
||||||
|
|
||||||
location: str = None
|
location: str | None = None
|
||||||
distance: Optional[int] = None
|
country: Country | None = Country.USA
|
||||||
|
distance: int | None = None
|
||||||
is_remote: bool = False
|
is_remote: bool = False
|
||||||
job_type: Optional[JobType] = None
|
job_type: JobType | None = None
|
||||||
easy_apply: bool = None # linkedin
|
easy_apply: bool | None = None
|
||||||
|
offset: int = 0
|
||||||
|
linkedin_fetch_description: bool = False
|
||||||
|
linkedin_company_ids: list[int] | None = None
|
||||||
|
description_format: DescriptionFormat | None = DescriptionFormat.MARKDOWN
|
||||||
|
|
||||||
results_wanted: int = 15
|
results_wanted: int = 15
|
||||||
|
hours_old: int | None = None
|
||||||
|
|
||||||
|
|
||||||
class CommonResponse(BaseModel):
|
class Scraper(ABC):
|
||||||
status: Optional[str]
|
def __init__(self, site: Site, proxies: list[str] | None = None, ca_cert: str | None = None):
|
||||||
error: Optional[str]
|
|
||||||
linkedin: Optional[Any] = None
|
|
||||||
indeed: Optional[Any] = None
|
|
||||||
zip_recruiter: Optional[Any] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Scraper:
|
|
||||||
def __init__(self, site: Site, url: str):
|
|
||||||
self.site = site
|
self.site = site
|
||||||
self.url = url
|
self.proxies = proxies
|
||||||
|
self.ca_cert = ca_cert
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
@abstractmethod
|
||||||
...
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
|
||||||
|
|||||||
26
src/jobspy/scrapers/exceptions.py
Normal file
26
src/jobspy/scrapers/exceptions.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.exceptions
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains the set of Scrapers' exceptions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class LinkedInException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with LinkedIn")
|
||||||
|
|
||||||
|
|
||||||
|
class IndeedException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Indeed")
|
||||||
|
|
||||||
|
|
||||||
|
class ZipRecruiterException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with ZipRecruiter")
|
||||||
|
|
||||||
|
|
||||||
|
class GlassdoorException(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super().__init__(message or "An error occurred with Glassdoor")
|
||||||
364
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
364
src/jobspy/scrapers/glassdoor/__init__.py
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
"""
|
||||||
|
jobspy.scrapers.glassdoor
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Glassdoor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
|
from .constants import fallback_token, query_template, headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import extract_emails_from_text, create_logger
|
||||||
|
from ..exceptions import GlassdoorException
|
||||||
|
from ..utils import (
|
||||||
|
create_session,
|
||||||
|
markdown_converter,
|
||||||
|
)
|
||||||
|
from ...jobs import (
|
||||||
|
JobPost,
|
||||||
|
Compensation,
|
||||||
|
CompensationInterval,
|
||||||
|
Location,
|
||||||
|
JobResponse,
|
||||||
|
JobType,
|
||||||
|
DescriptionFormat,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = create_logger("Glassdoor")
|
||||||
|
|
||||||
|
|
||||||
|
class GlassdoorScraper(Scraper):
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes GlassdoorScraper with the Glassdoor job search url
|
||||||
|
"""
|
||||||
|
site = Site(Site.GLASSDOOR)
|
||||||
|
super().__init__(site, proxies=proxies, ca_cert=ca_cert)
|
||||||
|
|
||||||
|
self.base_url = None
|
||||||
|
self.country = None
|
||||||
|
self.session = None
|
||||||
|
self.scraper_input = None
|
||||||
|
self.jobs_per_page = 30
|
||||||
|
self.max_pages = 30
|
||||||
|
self.seen_urls = set()
|
||||||
|
|
||||||
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
|
"""
|
||||||
|
Scrapes Glassdoor for jobs with scraper_input criteria.
|
||||||
|
:param scraper_input: Information about job search criteria.
|
||||||
|
:return: JobResponse containing a list of jobs.
|
||||||
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||||
|
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||||
|
|
||||||
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=self.ca_cert, is_tls=True, has_retry=True
|
||||||
|
)
|
||||||
|
token = self._get_csrf_token()
|
||||||
|
headers["gd-csrf-token"] = token if token else fallback_token
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
|
||||||
|
location_id, location_type = self._get_location(
|
||||||
|
scraper_input.location, scraper_input.is_remote
|
||||||
|
)
|
||||||
|
if location_type is None:
|
||||||
|
logger.error("Glassdoor: location not parsed")
|
||||||
|
return JobResponse(jobs=[])
|
||||||
|
job_list: list[JobPost] = []
|
||||||
|
cursor = None
|
||||||
|
|
||||||
|
range_start = 1 + (scraper_input.offset // self.jobs_per_page)
|
||||||
|
tot_pages = (scraper_input.results_wanted // self.jobs_per_page) + 2
|
||||||
|
range_end = min(tot_pages, self.max_pages + 1)
|
||||||
|
for page in range(range_start, range_end):
|
||||||
|
logger.info(f"search page: {page} / {range_end-1}")
|
||||||
|
try:
|
||||||
|
jobs, cursor = self._fetch_jobs_page(
|
||||||
|
scraper_input, location_id, location_type, page, cursor
|
||||||
|
)
|
||||||
|
job_list.extend(jobs)
|
||||||
|
if not jobs or len(job_list) >= scraper_input.results_wanted:
|
||||||
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Glassdoor: {str(e)}")
|
||||||
|
break
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
|
def _fetch_jobs_page(
|
||||||
|
self,
|
||||||
|
scraper_input: ScraperInput,
|
||||||
|
location_id: int,
|
||||||
|
location_type: str,
|
||||||
|
page_num: int,
|
||||||
|
cursor: str | None,
|
||||||
|
) -> Tuple[list[JobPost], str | None]:
|
||||||
|
"""
|
||||||
|
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||||
|
"""
|
||||||
|
jobs = []
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
try:
|
||||||
|
payload = self._add_payload(location_id, location_type, page_num, cursor)
|
||||||
|
response = self.session.post(
|
||||||
|
f"{self.base_url}/graph",
|
||||||
|
timeout_seconds=15,
|
||||||
|
data=payload,
|
||||||
|
)
|
||||||
|
if response.status_code != 200:
|
||||||
|
exc_msg = f"bad response status code: {response.status_code}"
|
||||||
|
raise GlassdoorException(exc_msg)
|
||||||
|
res_json = response.json()[0]
|
||||||
|
if "errors" in res_json:
|
||||||
|
raise ValueError("Error encountered in API response")
|
||||||
|
except (
|
||||||
|
requests.exceptions.ReadTimeout,
|
||||||
|
GlassdoorException,
|
||||||
|
ValueError,
|
||||||
|
Exception,
|
||||||
|
) as e:
|
||||||
|
logger.error(f"Glassdoor: {str(e)}")
|
||||||
|
return jobs, None
|
||||||
|
|
||||||
|
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||||
|
future_to_job_data = {
|
||||||
|
executor.submit(self._process_job, job): job for job in jobs_data
|
||||||
|
}
|
||||||
|
for future in as_completed(future_to_job_data):
|
||||||
|
try:
|
||||||
|
job_post = future.result()
|
||||||
|
if job_post:
|
||||||
|
jobs.append(job_post)
|
||||||
|
except Exception as exc:
|
||||||
|
raise GlassdoorException(f"Glassdoor generated an exception: {exc}")
|
||||||
|
|
||||||
|
return jobs, self.get_cursor_for_page(
|
||||||
|
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_csrf_token(self):
|
||||||
|
"""
|
||||||
|
Fetches csrf token needed for API by visiting a generic page
|
||||||
|
"""
|
||||||
|
res = self.session.get(f"{self.base_url}/Job/computer-science-jobs.htm")
|
||||||
|
pattern = r'"token":\s*"([^"]+)"'
|
||||||
|
matches = re.findall(pattern, res.text)
|
||||||
|
token = None
|
||||||
|
if matches:
|
||||||
|
token = matches[0]
|
||||||
|
return token
|
||||||
|
|
||||||
|
def _process_job(self, job_data):
|
||||||
|
"""
|
||||||
|
Processes a single job and fetches its description.
|
||||||
|
"""
|
||||||
|
job_id = job_data["jobview"]["job"]["listingId"]
|
||||||
|
job_url = f"{self.base_url}job-listing/j?jl={job_id}"
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return None
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
job = job_data["jobview"]
|
||||||
|
title = job["job"]["jobTitleText"]
|
||||||
|
company_name = job["header"]["employerNameFromSearch"]
|
||||||
|
company_id = job_data["jobview"]["header"]["employer"]["id"]
|
||||||
|
location_name = job["header"].get("locationName", "")
|
||||||
|
location_type = job["header"].get("locationType", "")
|
||||||
|
age_in_days = job["header"].get("ageInDays")
|
||||||
|
is_remote, location = False, None
|
||||||
|
date_diff = (datetime.now() - timedelta(days=age_in_days)).date()
|
||||||
|
date_posted = date_diff if age_in_days is not None else None
|
||||||
|
|
||||||
|
if location_type == "S":
|
||||||
|
is_remote = True
|
||||||
|
else:
|
||||||
|
location = self.parse_location(location_name)
|
||||||
|
|
||||||
|
compensation = self.parse_compensation(job["header"])
|
||||||
|
try:
|
||||||
|
description = self._fetch_job_description(job_id)
|
||||||
|
except:
|
||||||
|
description = None
|
||||||
|
company_url = f"{self.base_url}Overview/W-EI_IE{company_id}.htm"
|
||||||
|
company_logo = (
|
||||||
|
job_data["jobview"].get("overview", {}).get("squareLogoUrl", None)
|
||||||
|
)
|
||||||
|
listing_type = (
|
||||||
|
job_data["jobview"]
|
||||||
|
.get("header", {})
|
||||||
|
.get("adOrderSponsorshipLevel", "")
|
||||||
|
.lower()
|
||||||
|
)
|
||||||
|
return JobPost(
|
||||||
|
id=f"gd-{job_id}",
|
||||||
|
title=title,
|
||||||
|
company_url=company_url if company_id else None,
|
||||||
|
company_name=company_name,
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=job_url,
|
||||||
|
location=location,
|
||||||
|
compensation=compensation,
|
||||||
|
is_remote=is_remote,
|
||||||
|
description=description,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
logo_photo_url=company_logo,
|
||||||
|
listing_type=listing_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _fetch_job_description(self, job_id):
|
||||||
|
"""
|
||||||
|
Fetches the job description for a single job ID.
|
||||||
|
"""
|
||||||
|
url = f"{self.base_url}/graph"
|
||||||
|
body = [
|
||||||
|
{
|
||||||
|
"operationName": "JobDetailQuery",
|
||||||
|
"variables": {
|
||||||
|
"jl": job_id,
|
||||||
|
"queryString": "q",
|
||||||
|
"pageTypeEnum": "SERP",
|
||||||
|
},
|
||||||
|
"query": """
|
||||||
|
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
|
||||||
|
jobview: jobView(
|
||||||
|
listingId: $jl
|
||||||
|
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
|
||||||
|
) {
|
||||||
|
job {
|
||||||
|
description
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
""",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
res = requests.post(url, json=body, headers=headers)
|
||||||
|
if res.status_code != 200:
|
||||||
|
return None
|
||||||
|
data = res.json()[0]
|
||||||
|
desc = data["data"]["jobview"]["job"]["description"]
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
desc = markdown_converter(desc)
|
||||||
|
return desc
|
||||||
|
|
||||||
|
def _get_location(self, location: str, is_remote: bool) -> (int, str):
|
||||||
|
if not location or is_remote:
|
||||||
|
return "11047", "STATE" # remote options
|
||||||
|
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||||
|
res = self.session.get(url)
|
||||||
|
if res.status_code != 200:
|
||||||
|
if res.status_code == 429:
|
||||||
|
err = f"429 Response - Blocked by Glassdoor for too many requests"
|
||||||
|
logger.error(err)
|
||||||
|
return None, None
|
||||||
|
else:
|
||||||
|
err = f"Glassdoor response status code {res.status_code}"
|
||||||
|
err += f" - {res.text}"
|
||||||
|
logger.error(f"Glassdoor response status code {res.status_code}")
|
||||||
|
return None, None
|
||||||
|
items = res.json()
|
||||||
|
|
||||||
|
if not items:
|
||||||
|
raise ValueError(f"Location '{location}' not found on Glassdoor")
|
||||||
|
location_type = items[0]["locationType"]
|
||||||
|
if location_type == "C":
|
||||||
|
location_type = "CITY"
|
||||||
|
elif location_type == "S":
|
||||||
|
location_type = "STATE"
|
||||||
|
elif location_type == "N":
|
||||||
|
location_type = "COUNTRY"
|
||||||
|
return int(items[0]["locationId"]), location_type
|
||||||
|
|
||||||
|
def _add_payload(
|
||||||
|
self,
|
||||||
|
location_id: int,
|
||||||
|
location_type: str,
|
||||||
|
page_num: int,
|
||||||
|
cursor: str | None = None,
|
||||||
|
) -> str:
|
||||||
|
fromage = None
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
fromage = max(self.scraper_input.hours_old // 24, 1)
|
||||||
|
filter_params = []
|
||||||
|
if self.scraper_input.easy_apply:
|
||||||
|
filter_params.append({"filterKey": "applicationType", "values": "1"})
|
||||||
|
if fromage:
|
||||||
|
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
|
||||||
|
payload = {
|
||||||
|
"operationName": "JobSearchResultsQuery",
|
||||||
|
"variables": {
|
||||||
|
"excludeJobListingIds": [],
|
||||||
|
"filterParams": filter_params,
|
||||||
|
"keyword": self.scraper_input.search_term,
|
||||||
|
"numJobsToShow": 30,
|
||||||
|
"locationType": location_type,
|
||||||
|
"locationId": int(location_id),
|
||||||
|
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
|
||||||
|
"pageNumber": page_num,
|
||||||
|
"pageCursor": cursor,
|
||||||
|
"fromage": fromage,
|
||||||
|
"sort": "date",
|
||||||
|
},
|
||||||
|
"query": query_template,
|
||||||
|
}
|
||||||
|
if self.scraper_input.job_type:
|
||||||
|
payload["variables"]["filterParams"].append(
|
||||||
|
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||||
|
)
|
||||||
|
return json.dumps([payload])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||||
|
pay_period = data.get("payPeriod")
|
||||||
|
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||||
|
currency = data.get("payCurrency", "USD")
|
||||||
|
if not pay_period or not adjusted_pay:
|
||||||
|
return None
|
||||||
|
|
||||||
|
interval = None
|
||||||
|
if pay_period == "ANNUAL":
|
||||||
|
interval = CompensationInterval.YEARLY
|
||||||
|
elif pay_period:
|
||||||
|
interval = CompensationInterval.get_interval(pay_period)
|
||||||
|
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||||
|
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||||
|
return Compensation(
|
||||||
|
interval=interval,
|
||||||
|
min_amount=min_amount,
|
||||||
|
max_amount=max_amount,
|
||||||
|
currency=currency,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
|
for job_type in JobType:
|
||||||
|
if job_type_str in job_type.value:
|
||||||
|
return [job_type]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_location(location_name: str) -> Location | None:
|
||||||
|
if not location_name or location_name == "Remote":
|
||||||
|
return
|
||||||
|
city, _, state = location_name.partition(", ")
|
||||||
|
return Location(city=city, state=state)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_cursor_for_page(pagination_cursors, page_num):
|
||||||
|
for cursor_data in pagination_cursors:
|
||||||
|
if cursor_data["pageNumber"] == page_num:
|
||||||
|
return cursor_data["cursor"]
|
||||||
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
184
src/jobspy/scrapers/glassdoor/constants.py
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
headers = {
|
||||||
|
"authority": "www.glassdoor.com",
|
||||||
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"apollographql-client-name": "job-search-next",
|
||||||
|
"apollographql-client-version": "4.65.5",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"origin": "https://www.glassdoor.com",
|
||||||
|
"referer": "https://www.glassdoor.com/",
|
||||||
|
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-platform": '"macOS"',
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
|
query_template = """
|
||||||
|
query JobSearchResultsQuery(
|
||||||
|
$excludeJobListingIds: [Long!],
|
||||||
|
$keyword: String,
|
||||||
|
$locationId: Int,
|
||||||
|
$locationType: LocationTypeEnum,
|
||||||
|
$numJobsToShow: Int!,
|
||||||
|
$pageCursor: String,
|
||||||
|
$pageNumber: Int,
|
||||||
|
$filterParams: [FilterParams],
|
||||||
|
$originalPageUrl: String,
|
||||||
|
$seoFriendlyUrlInput: String,
|
||||||
|
$parameterUrlInput: String,
|
||||||
|
$seoUrl: Boolean
|
||||||
|
) {
|
||||||
|
jobListings(
|
||||||
|
contextHolder: {
|
||||||
|
searchParams: {
|
||||||
|
excludeJobListingIds: $excludeJobListingIds,
|
||||||
|
keyword: $keyword,
|
||||||
|
locationId: $locationId,
|
||||||
|
locationType: $locationType,
|
||||||
|
numPerPage: $numJobsToShow,
|
||||||
|
pageCursor: $pageCursor,
|
||||||
|
pageNumber: $pageNumber,
|
||||||
|
filterParams: $filterParams,
|
||||||
|
originalPageUrl: $originalPageUrl,
|
||||||
|
seoFriendlyUrlInput: $seoFriendlyUrlInput,
|
||||||
|
parameterUrlInput: $parameterUrlInput,
|
||||||
|
seoUrl: $seoUrl,
|
||||||
|
searchType: SR
|
||||||
|
}
|
||||||
|
}
|
||||||
|
) {
|
||||||
|
companyFilterOptions {
|
||||||
|
id
|
||||||
|
shortName
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
filterOptions
|
||||||
|
indeedCtk
|
||||||
|
jobListings {
|
||||||
|
...JobView
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobListingSeoLinks {
|
||||||
|
linkItems {
|
||||||
|
position
|
||||||
|
url
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobSearchTrackingKey
|
||||||
|
jobsPageSeoData {
|
||||||
|
pageMetaDescription
|
||||||
|
pageTitle
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
paginationCursors {
|
||||||
|
cursor
|
||||||
|
pageNumber
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
indexablePageForSeo
|
||||||
|
searchResultsMetadata {
|
||||||
|
searchCriteria {
|
||||||
|
implicitLocation {
|
||||||
|
id
|
||||||
|
localizedDisplayName
|
||||||
|
type
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
keyword
|
||||||
|
location {
|
||||||
|
id
|
||||||
|
shortName
|
||||||
|
localizedShortName
|
||||||
|
localizedDisplayName
|
||||||
|
type
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
helpCenterDomain
|
||||||
|
helpCenterLocale
|
||||||
|
jobSerpJobOutlook {
|
||||||
|
occupation
|
||||||
|
paragraph
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
showMachineReadableJobs
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
totalJobsCount
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment JobView on JobListingSearchResult {
|
||||||
|
jobview {
|
||||||
|
header {
|
||||||
|
adOrderId
|
||||||
|
advertiserType
|
||||||
|
adOrderSponsorshipLevel
|
||||||
|
ageInDays
|
||||||
|
divisionEmployerName
|
||||||
|
easyApply
|
||||||
|
employer {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
shortName
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
employerNameFromSearch
|
||||||
|
goc
|
||||||
|
gocConfidence
|
||||||
|
gocId
|
||||||
|
jobCountryId
|
||||||
|
jobLink
|
||||||
|
jobResultTrackingKey
|
||||||
|
jobTitleText
|
||||||
|
locationName
|
||||||
|
locationType
|
||||||
|
locId
|
||||||
|
needsCommission
|
||||||
|
payCurrency
|
||||||
|
payPeriod
|
||||||
|
payPeriodAdjustedPay {
|
||||||
|
p10
|
||||||
|
p50
|
||||||
|
p90
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
rating
|
||||||
|
salarySource
|
||||||
|
savedJobId
|
||||||
|
sponsored
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
job {
|
||||||
|
description
|
||||||
|
importConfigId
|
||||||
|
jobTitleId
|
||||||
|
jobTitleText
|
||||||
|
listingId
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
jobListingAdminDetails {
|
||||||
|
cpcVal
|
||||||
|
importConfigId
|
||||||
|
jobListingId
|
||||||
|
jobSourceId
|
||||||
|
userEligibleForAdminJobDetails
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
overview {
|
||||||
|
shortName
|
||||||
|
squareLogoUrl
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||||
@@ -1,15 +1,25 @@
|
|||||||
import re
|
"""
|
||||||
|
jobspy.scrapers.indeed
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape Indeed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import json
|
from typing import Tuple
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import tls_client
|
|
||||||
import urllib.parse
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from bs4.element import Tag
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
|
from .constants import job_search_query, api_headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
get_enum_from_job_type,
|
||||||
|
markdown_converter,
|
||||||
|
create_session,
|
||||||
|
create_logger,
|
||||||
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Compensation,
|
Compensation,
|
||||||
@@ -17,136 +27,32 @@ from ...jobs import (
|
|||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
from .. import Scraper, ScraperInput, Site, StatusException
|
|
||||||
|
|
||||||
|
logger = create_logger("Indeed")
|
||||||
class ParsingException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class IndeedScraper(Scraper):
|
class IndeedScraper(Scraper):
|
||||||
def __init__(self):
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes IndeedScraper with the Indeed job search url
|
Initializes IndeedScraper with the Indeed API url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.INDEED)
|
super().__init__(Site.INDEED, proxies=proxies)
|
||||||
url = "https://www.indeed.com"
|
|
||||||
super().__init__(site, url)
|
|
||||||
|
|
||||||
self.jobs_per_page = 15
|
self.session = create_session(
|
||||||
|
proxies=self.proxies, ca_cert=ca_cert, is_tls=False
|
||||||
|
)
|
||||||
|
self.scraper_input = None
|
||||||
|
self.jobs_per_page = 100
|
||||||
|
self.num_workers = 10
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
|
self.headers = None
|
||||||
def scrape_page(
|
self.api_country_code = None
|
||||||
self, scraper_input: ScraperInput, page: int, session: tls_client.Session
|
self.base_url = None
|
||||||
) -> tuple[list[JobPost], int]:
|
self.api_url = "https://apis.indeed.com/graphql"
|
||||||
"""
|
|
||||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
|
||||||
:param scraper_input:
|
|
||||||
:param page:
|
|
||||||
:param session:
|
|
||||||
:return: jobs found on page, total number of jobs found for search
|
|
||||||
"""
|
|
||||||
|
|
||||||
job_list = []
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"q": scraper_input.search_term,
|
|
||||||
"l": scraper_input.location,
|
|
||||||
"radius": scraper_input.distance,
|
|
||||||
"filter": 0,
|
|
||||||
"start": 0 + page * 10,
|
|
||||||
}
|
|
||||||
sc_values = []
|
|
||||||
if scraper_input.is_remote:
|
|
||||||
sc_values.append("attr(DSQF7)")
|
|
||||||
if scraper_input.job_type:
|
|
||||||
sc_values.append("jt({})".format(scraper_input.job_type.value))
|
|
||||||
|
|
||||||
if sc_values:
|
|
||||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
|
||||||
response = session.get(self.url + "/jobs", params=params)
|
|
||||||
|
|
||||||
if response.status_code != 200 and response.status_code != 307:
|
|
||||||
raise StatusException(response.status_code)
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.content, "html.parser")
|
|
||||||
if "did not match any jobs" in str(soup):
|
|
||||||
raise ParsingException("Search did not match any jobs")
|
|
||||||
|
|
||||||
jobs = IndeedScraper.parse_jobs(
|
|
||||||
soup
|
|
||||||
) #: can raise exception, handled by main scrape function
|
|
||||||
total_num_jobs = IndeedScraper.total_jobs(soup)
|
|
||||||
|
|
||||||
if (
|
|
||||||
not jobs.get("metaData", {})
|
|
||||||
.get("mosaicProviderJobCardsModel", {})
|
|
||||||
.get("results")
|
|
||||||
):
|
|
||||||
raise Exception("No jobs found.")
|
|
||||||
|
|
||||||
def process_job(job) -> Optional[JobPost]:
|
|
||||||
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}'
|
|
||||||
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
snippet_html = BeautifulSoup(job["snippet"], "html.parser")
|
|
||||||
|
|
||||||
extracted_salary = job.get("extractedSalary")
|
|
||||||
compensation = None
|
|
||||||
if extracted_salary:
|
|
||||||
salary_snippet = job.get("salarySnippet")
|
|
||||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
|
||||||
interval = (extracted_salary.get("type"),)
|
|
||||||
if isinstance(interval, tuple):
|
|
||||||
interval = interval[0]
|
|
||||||
|
|
||||||
interval = interval.upper()
|
|
||||||
if interval in CompensationInterval.__members__:
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=CompensationInterval[interval],
|
|
||||||
min_amount=int(extracted_salary.get("max")),
|
|
||||||
max_amount=int(extracted_salary.get("min")),
|
|
||||||
currency=currency,
|
|
||||||
)
|
|
||||||
|
|
||||||
job_type = IndeedScraper.get_job_type(job)
|
|
||||||
timestamp_seconds = job["pubDate"] / 1000
|
|
||||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
|
||||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
|
||||||
|
|
||||||
description = self.get_description(job_url, session)
|
|
||||||
li_elements = snippet_html.find_all("li")
|
|
||||||
if description is None and li_elements:
|
|
||||||
description = " ".join(li.text for li in li_elements)
|
|
||||||
|
|
||||||
first_li = snippet_html.find("li")
|
|
||||||
job_post = JobPost(
|
|
||||||
title=job["normTitle"],
|
|
||||||
description=description,
|
|
||||||
company_name=job["company"],
|
|
||||||
location=Location(
|
|
||||||
city=job.get("jobLocationCity"),
|
|
||||||
state=job.get("jobLocationState"),
|
|
||||||
),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=compensation,
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url_client,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
||||||
job_results: list[Future] = [
|
|
||||||
executor.submit(process_job, job)
|
|
||||||
for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
|
||||||
|
|
||||||
return job_list, total_num_jobs
|
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -154,153 +60,292 @@ class IndeedScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
session = tls_client.Session(
|
self.scraper_input = scraper_input
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||||
)
|
self.base_url = f"https://{domain}.indeed.com"
|
||||||
|
self.headers = api_headers.copy()
|
||||||
|
self.headers["indeed-co"] = self.scraper_input.country.indeed_domain_value
|
||||||
|
job_list = []
|
||||||
|
page = 1
|
||||||
|
|
||||||
pages_to_process = (
|
cursor = None
|
||||||
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
|
offset_pages = math.ceil(self.scraper_input.offset / 100)
|
||||||
)
|
for _ in range(offset_pages):
|
||||||
|
logger.info(f"skipping search page: {page}")
|
||||||
|
__, cursor = self._scrape_page(cursor)
|
||||||
|
if not __:
|
||||||
|
logger.info(f"found no jobs on page: {page}")
|
||||||
|
break
|
||||||
|
|
||||||
try:
|
while len(self.seen_urls) < scraper_input.results_wanted:
|
||||||
#: get first page to initialize session
|
logger.info(
|
||||||
job_list, total_results = self.scrape_page(scraper_input, 0, session)
|
f"search page: {page} / {math.ceil(scraper_input.results_wanted / 100)}"
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.scrape_page, scraper_input, page, session)
|
|
||||||
for page in range(1, pages_to_process + 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
|
||||||
jobs, _ = future.result()
|
|
||||||
|
|
||||||
job_list += jobs
|
|
||||||
except StatusException as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed returned status code {e.status_code}",
|
|
||||||
)
|
)
|
||||||
|
jobs, cursor = self._scrape_page(cursor)
|
||||||
|
if not jobs:
|
||||||
|
logger.info(f"found no jobs on page: {page}")
|
||||||
|
break
|
||||||
|
job_list += jobs
|
||||||
|
page += 1
|
||||||
|
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||||
|
|
||||||
except ParsingException as e:
|
def _scrape_page(self, cursor: str | None) -> Tuple[list[JobPost], str | None]:
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed failed to parse response: {e}",
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"Indeed failed to scrape: {e}",
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(job_list) > scraper_input.results_wanted:
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
|
||||||
|
|
||||||
job_response = JobResponse(
|
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=total_results,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
def get_description(self, job_page_url: str, session: tls_client.Session) -> str:
|
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||||
:param job_page_url:
|
:param cursor:
|
||||||
:param session:
|
:return: jobs found on page, next page cursor
|
||||||
:return: description
|
|
||||||
"""
|
"""
|
||||||
parsed_url = urllib.parse.urlparse(job_page_url)
|
jobs = []
|
||||||
params = urllib.parse.parse_qs(parsed_url.query)
|
new_cursor = None
|
||||||
jk_value = params.get("jk", [None])[0]
|
filters = self._build_filters()
|
||||||
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1"
|
search_term = (
|
||||||
|
self.scraper_input.search_term.replace('"', '\\"')
|
||||||
|
if self.scraper_input.search_term
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
query = job_search_query.format(
|
||||||
|
what=(f'what: "{search_term}"' if search_term else ""),
|
||||||
|
location=(
|
||||||
|
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
|
||||||
|
if self.scraper_input.location
|
||||||
|
else ""
|
||||||
|
),
|
||||||
|
dateOnIndeed=self.scraper_input.hours_old,
|
||||||
|
cursor=f'cursor: "{cursor}"' if cursor else "",
|
||||||
|
filters=filters,
|
||||||
|
)
|
||||||
|
payload = {
|
||||||
|
"query": query,
|
||||||
|
}
|
||||||
|
api_headers_temp = api_headers.copy()
|
||||||
|
api_headers_temp["indeed-co"] = self.api_country_code
|
||||||
|
response = self.session.post(
|
||||||
|
self.api_url,
|
||||||
|
headers=api_headers_temp,
|
||||||
|
json=payload,
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if not response.ok:
|
||||||
|
logger.info(
|
||||||
|
f"responded with status code: {response.status_code} (submit GitHub issue if this appears to be a bug)"
|
||||||
|
)
|
||||||
|
return jobs, new_cursor
|
||||||
|
data = response.json()
|
||||||
|
jobs = data["data"]["jobSearch"]["results"]
|
||||||
|
new_cursor = data["data"]["jobSearch"]["pageInfo"]["nextCursor"]
|
||||||
|
|
||||||
response = session.get(formatted_url, allow_redirects=True)
|
job_list = []
|
||||||
|
for job in jobs:
|
||||||
|
processed_job = self._process_job(job["job"])
|
||||||
|
if processed_job:
|
||||||
|
job_list.append(processed_job)
|
||||||
|
|
||||||
if response.status_code not in range(200, 400):
|
return job_list, new_cursor
|
||||||
return None
|
|
||||||
|
|
||||||
raw_description = response.json()["body"]["jobInfoWrapperModel"][
|
def _build_filters(self):
|
||||||
"jobInfoModel"
|
"""
|
||||||
]["sanitizedJobDescription"]
|
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||||
soup = BeautifulSoup(raw_description, "html.parser")
|
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||||
text_content = " ".join(soup.get_text().split()).strip()
|
"""
|
||||||
return text_content
|
filters_str = ""
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
filters_str = """
|
||||||
|
filters: {{
|
||||||
|
date: {{
|
||||||
|
field: "dateOnIndeed",
|
||||||
|
start: "{start}h"
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
""".format(
|
||||||
|
start=self.scraper_input.hours_old
|
||||||
|
)
|
||||||
|
elif self.scraper_input.easy_apply:
|
||||||
|
filters_str = """
|
||||||
|
filters: {
|
||||||
|
keyword: {
|
||||||
|
field: "indeedApplyScope",
|
||||||
|
keys: ["DESKTOP"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||||
|
job_type_key_mapping = {
|
||||||
|
JobType.FULL_TIME: "CF3CP",
|
||||||
|
JobType.PART_TIME: "75GKK",
|
||||||
|
JobType.CONTRACT: "NJXCK",
|
||||||
|
JobType.INTERNSHIP: "VDTG7",
|
||||||
|
}
|
||||||
|
|
||||||
|
keys = []
|
||||||
|
if self.scraper_input.job_type:
|
||||||
|
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||||
|
keys.append(key)
|
||||||
|
|
||||||
|
if self.scraper_input.is_remote:
|
||||||
|
keys.append("DSQF7")
|
||||||
|
|
||||||
|
if keys:
|
||||||
|
keys_str = '", "'.join(keys)
|
||||||
|
filters_str = f"""
|
||||||
|
filters: {{
|
||||||
|
composite: {{
|
||||||
|
filters: [{{
|
||||||
|
keyword: {{
|
||||||
|
field: "attributes",
|
||||||
|
keys: ["{keys_str}"]
|
||||||
|
}}
|
||||||
|
}}]
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
return filters_str
|
||||||
|
|
||||||
|
def _process_job(self, job: dict) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Parses the job dict into JobPost model
|
||||||
|
:param job: dict to parse
|
||||||
|
:return: JobPost if it's a new job
|
||||||
|
"""
|
||||||
|
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
description = job["description"]["html"]
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description = markdown_converter(description)
|
||||||
|
|
||||||
|
job_type = self._get_job_type(job["attributes"])
|
||||||
|
timestamp_seconds = job["datePublished"] / 1000
|
||||||
|
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||||
|
employer = job["employer"].get("dossier") if job["employer"] else None
|
||||||
|
employer_details = employer.get("employerDetails", {}) if employer else {}
|
||||||
|
rel_url = job["employer"]["relativeCompanyPageUrl"] if job["employer"] else None
|
||||||
|
return JobPost(
|
||||||
|
id=f'in-{job["key"]}',
|
||||||
|
title=job["title"],
|
||||||
|
description=description,
|
||||||
|
company_name=job["employer"].get("name") if job.get("employer") else None,
|
||||||
|
company_url=(f"{self.base_url}{rel_url}" if job["employer"] else None),
|
||||||
|
company_url_direct=(
|
||||||
|
employer["links"]["corporateWebsite"] if employer else None
|
||||||
|
),
|
||||||
|
location=Location(
|
||||||
|
city=job.get("location", {}).get("city"),
|
||||||
|
state=job.get("location", {}).get("admin1Code"),
|
||||||
|
country=job.get("location", {}).get("countryCode"),
|
||||||
|
),
|
||||||
|
job_type=job_type,
|
||||||
|
compensation=self._get_compensation(job["compensation"]),
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=job_url,
|
||||||
|
job_url_direct=(
|
||||||
|
job["recruit"].get("viewJobUrl") if job.get("recruit") else None
|
||||||
|
),
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
is_remote=self._is_job_remote(job, description),
|
||||||
|
company_addresses=(
|
||||||
|
employer_details["addresses"][0]
|
||||||
|
if employer_details.get("addresses")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
company_industry=(
|
||||||
|
employer_details["industry"]
|
||||||
|
.replace("Iv1", "")
|
||||||
|
.replace("_", " ")
|
||||||
|
.title()
|
||||||
|
.strip()
|
||||||
|
if employer_details.get("industry")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
company_num_employees=employer_details.get("employeesLocalizedLabel"),
|
||||||
|
company_revenue=employer_details.get("revenueLocalizedLabel"),
|
||||||
|
company_description=employer_details.get("briefDescription"),
|
||||||
|
logo_photo_url=(
|
||||||
|
employer["images"].get("squareLogoUrl")
|
||||||
|
if employer and employer.get("images")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_type(job: dict) -> Optional[JobType]:
|
def _get_job_type(attributes: list) -> list[JobType]:
|
||||||
"""
|
"""
|
||||||
Parses the job to get JobTypeIndeed
|
Parses the attributes to get list of job types
|
||||||
|
:param attributes:
|
||||||
|
:return: list of JobType
|
||||||
|
"""
|
||||||
|
job_types: list[JobType] = []
|
||||||
|
for attribute in attributes:
|
||||||
|
job_type_str = attribute["label"].replace("-", "").replace(" ", "").lower()
|
||||||
|
job_type = get_enum_from_job_type(job_type_str)
|
||||||
|
if job_type:
|
||||||
|
job_types.append(job_type)
|
||||||
|
return job_types
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_compensation(compensation: dict) -> Compensation | None:
|
||||||
|
"""
|
||||||
|
Parses the job to get compensation
|
||||||
:param job:
|
:param job:
|
||||||
:return:
|
:return: compensation object
|
||||||
"""
|
"""
|
||||||
for taxonomy in job["taxonomyAttributes"]:
|
if not compensation["baseSalary"] and not compensation["estimated"]:
|
||||||
if taxonomy["label"] == "job-types":
|
|
||||||
if len(taxonomy["attributes"]) > 0:
|
|
||||||
job_type_str = (
|
|
||||||
taxonomy["attributes"][0]["label"]
|
|
||||||
.replace("-", "_")
|
|
||||||
.replace(" ", "_")
|
|
||||||
.upper()
|
|
||||||
)
|
|
||||||
return JobType[job_type_str]
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_jobs(soup: BeautifulSoup) -> dict:
|
|
||||||
"""
|
|
||||||
Parses the jobs from the soup object
|
|
||||||
:param soup:
|
|
||||||
:return: jobs
|
|
||||||
"""
|
|
||||||
|
|
||||||
def find_mosaic_script() -> Optional[Tag]:
|
|
||||||
"""
|
|
||||||
Finds jobcards script tag
|
|
||||||
:return: script_tag
|
|
||||||
"""
|
|
||||||
script_tags = soup.find_all("script")
|
|
||||||
|
|
||||||
for tag in script_tags:
|
|
||||||
if (
|
|
||||||
tag.string
|
|
||||||
and "mosaic.providerData" in tag.string
|
|
||||||
and "mosaic-provider-jobcards" in tag.string
|
|
||||||
):
|
|
||||||
return tag
|
|
||||||
return None
|
return None
|
||||||
|
comp = (
|
||||||
script_tag = find_mosaic_script()
|
compensation["baseSalary"]
|
||||||
|
if compensation["baseSalary"]
|
||||||
if script_tag:
|
else compensation["estimated"]["baseSalary"]
|
||||||
script_str = script_tag.string
|
)
|
||||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
if not comp:
|
||||||
p = re.compile(pattern, re.DOTALL)
|
return None
|
||||||
m = p.search(script_str)
|
interval = IndeedScraper._get_compensation_interval(comp["unitOfWork"])
|
||||||
if m:
|
if not interval:
|
||||||
jobs = json.loads(m.group(1).strip())
|
return None
|
||||||
return jobs
|
min_range = comp["range"].get("min")
|
||||||
else:
|
max_range = comp["range"].get("max")
|
||||||
raise ParsingException("Could not find mosaic provider job cards data")
|
return Compensation(
|
||||||
else:
|
interval=interval,
|
||||||
raise ParsingException(
|
min_amount=int(min_range) if min_range is not None else None,
|
||||||
"Could not find a script tag containing mosaic provider data"
|
max_amount=int(max_range) if max_range is not None else None,
|
||||||
)
|
currency=(
|
||||||
|
compensation["estimated"]["currencyCode"]
|
||||||
|
if compensation["estimated"]
|
||||||
|
else compensation["currencyCode"]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def total_jobs(soup: BeautifulSoup) -> int:
|
def _is_job_remote(job: dict, description: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Parses the total jobs for that search from soup object
|
Searches the description, location, and attributes to check if job is remote
|
||||||
:param soup:
|
|
||||||
:return: total_num_jobs
|
|
||||||
"""
|
"""
|
||||||
script = soup.find("script", string=lambda t: "window._initialData" in t)
|
remote_keywords = ["remote", "work from home", "wfh"]
|
||||||
|
is_remote_in_attributes = any(
|
||||||
|
any(keyword in attr["label"].lower() for keyword in remote_keywords)
|
||||||
|
for attr in job["attributes"]
|
||||||
|
)
|
||||||
|
is_remote_in_description = any(
|
||||||
|
keyword in description.lower() for keyword in remote_keywords
|
||||||
|
)
|
||||||
|
is_remote_in_location = any(
|
||||||
|
keyword in job["location"]["formatted"]["long"].lower()
|
||||||
|
for keyword in remote_keywords
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
is_remote_in_attributes or is_remote_in_description or is_remote_in_location
|
||||||
|
)
|
||||||
|
|
||||||
pattern = re.compile(r"window._initialData\s*=\s*({.*})\s*;", re.DOTALL)
|
@staticmethod
|
||||||
match = pattern.search(script.string)
|
def _get_compensation_interval(interval: str) -> CompensationInterval:
|
||||||
total_num_jobs = 0
|
interval_mapping = {
|
||||||
if match:
|
"DAY": "DAILY",
|
||||||
json_str = match.group(1)
|
"YEAR": "YEARLY",
|
||||||
data = json.loads(json_str)
|
"HOUR": "HOURLY",
|
||||||
total_num_jobs = int(data["searchTitleBarModel"]["totalNumResults"])
|
"WEEK": "WEEKLY",
|
||||||
return total_num_jobs
|
"MONTH": "MONTHLY",
|
||||||
|
}
|
||||||
|
mapped_interval = interval_mapping.get(interval.upper(), None)
|
||||||
|
if mapped_interval and mapped_interval in CompensationInterval.__members__:
|
||||||
|
return CompensationInterval[mapped_interval]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported interval: {interval}")
|
||||||
|
|||||||
109
src/jobspy/scrapers/indeed/constants.py
Normal file
109
src/jobspy/scrapers/indeed/constants.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
job_search_query = """
|
||||||
|
query GetJobData {{
|
||||||
|
jobSearch(
|
||||||
|
{what}
|
||||||
|
{location}
|
||||||
|
limit: 100
|
||||||
|
{cursor}
|
||||||
|
sort: RELEVANCE
|
||||||
|
{filters}
|
||||||
|
) {{
|
||||||
|
pageInfo {{
|
||||||
|
nextCursor
|
||||||
|
}}
|
||||||
|
results {{
|
||||||
|
trackingKey
|
||||||
|
job {{
|
||||||
|
source {{
|
||||||
|
name
|
||||||
|
}}
|
||||||
|
key
|
||||||
|
title
|
||||||
|
datePublished
|
||||||
|
dateOnIndeed
|
||||||
|
description {{
|
||||||
|
html
|
||||||
|
}}
|
||||||
|
location {{
|
||||||
|
countryName
|
||||||
|
countryCode
|
||||||
|
admin1Code
|
||||||
|
city
|
||||||
|
postalCode
|
||||||
|
streetAddress
|
||||||
|
formatted {{
|
||||||
|
short
|
||||||
|
long
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
compensation {{
|
||||||
|
estimated {{
|
||||||
|
currencyCode
|
||||||
|
baseSalary {{
|
||||||
|
unitOfWork
|
||||||
|
range {{
|
||||||
|
... on Range {{
|
||||||
|
min
|
||||||
|
max
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
baseSalary {{
|
||||||
|
unitOfWork
|
||||||
|
range {{
|
||||||
|
... on Range {{
|
||||||
|
min
|
||||||
|
max
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
currencyCode
|
||||||
|
}}
|
||||||
|
attributes {{
|
||||||
|
key
|
||||||
|
label
|
||||||
|
}}
|
||||||
|
employer {{
|
||||||
|
relativeCompanyPageUrl
|
||||||
|
name
|
||||||
|
dossier {{
|
||||||
|
employerDetails {{
|
||||||
|
addresses
|
||||||
|
industry
|
||||||
|
employeesLocalizedLabel
|
||||||
|
revenueLocalizedLabel
|
||||||
|
briefDescription
|
||||||
|
ceoName
|
||||||
|
ceoPhotoUrl
|
||||||
|
}}
|
||||||
|
images {{
|
||||||
|
headerImageUrl
|
||||||
|
squareLogoUrl
|
||||||
|
}}
|
||||||
|
links {{
|
||||||
|
corporateWebsite
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
recruit {{
|
||||||
|
viewJobUrl
|
||||||
|
detailedSalary
|
||||||
|
workSchedule
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
|
||||||
|
api_headers = {
|
||||||
|
"Host": "apis.indeed.com",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"indeed-api-key": "161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8",
|
||||||
|
"accept": "application/json",
|
||||||
|
"indeed-locale": "en-US",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1",
|
||||||
|
"indeed-app-info": "appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone",
|
||||||
|
}
|
||||||
@@ -1,29 +1,71 @@
|
|||||||
from typing import Optional, Tuple
|
"""
|
||||||
|
jobspy.scrapers.linkedin
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape LinkedIn.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import regex as re
|
||||||
|
from typing import Optional
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from bs4.element import Tag
|
from bs4.element import Tag
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from urllib.parse import urlparse, urlunparse, unquote
|
||||||
|
|
||||||
|
from .constants import headers
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..exceptions import LinkedInException
|
||||||
|
from ..utils import create_session, remove_attributes, create_logger
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
|
Country,
|
||||||
Compensation,
|
Compensation,
|
||||||
CompensationInterval,
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
get_enum_from_job_type,
|
||||||
|
currency_parser,
|
||||||
|
markdown_converter,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = create_logger("LinkedIn")
|
||||||
|
|
||||||
|
|
||||||
class LinkedInScraper(Scraper):
|
class LinkedInScraper(Scraper):
|
||||||
def __init__(self):
|
base_url = "https://www.linkedin.com"
|
||||||
|
delay = 3
|
||||||
|
band_delay = 4
|
||||||
|
jobs_per_page = 25
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes LinkedInScraper with the LinkedIn job search url
|
Initializes LinkedInScraper with the LinkedIn job search url
|
||||||
"""
|
"""
|
||||||
site = Site(Site.LINKEDIN)
|
super().__init__(Site.LINKEDIN, proxies=proxies, ca_cert=ca_cert)
|
||||||
url = "https://www.linkedin.com"
|
self.session = create_session(
|
||||||
super().__init__(site, url)
|
proxies=self.proxies,
|
||||||
|
ca_cert=ca_cert,
|
||||||
|
is_tls=False,
|
||||||
|
has_retry=True,
|
||||||
|
delay=5,
|
||||||
|
clear_cookies=True,
|
||||||
|
)
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
self.scraper_input = None
|
||||||
|
self.country = "worldwide"
|
||||||
|
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
@@ -31,181 +73,230 @@ class LinkedInScraper(Scraper):
|
|||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
job_list: list[JobPost] = []
|
job_list: list[JobPost] = []
|
||||||
seen_urls = set()
|
seen_ids = set()
|
||||||
page, processed_jobs, job_count = 0, 0, 0
|
start = scraper_input.offset // 10 * 10 if scraper_input.offset else 0
|
||||||
|
request_count = 0
|
||||||
def job_type_code(job_type):
|
seconds_old = (
|
||||||
mapping = {
|
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
||||||
JobType.FULL_TIME: "F",
|
)
|
||||||
JobType.PART_TIME: "P",
|
continue_search = (
|
||||||
JobType.INTERNSHIP: "I",
|
lambda: len(job_list) < scraper_input.results_wanted and start < 1000
|
||||||
JobType.CONTRACT: "C",
|
)
|
||||||
JobType.TEMPORARY: "T",
|
while continue_search():
|
||||||
}
|
request_count += 1
|
||||||
|
logger.info(
|
||||||
return mapping.get(job_type, "")
|
f"search page: {request_count} / {math.ceil(scraper_input.results_wanted / 10)}"
|
||||||
|
)
|
||||||
with requests.Session() as session:
|
params = {
|
||||||
while len(job_list) < scraper_input.results_wanted:
|
"keywords": scraper_input.search_term,
|
||||||
params = {
|
"location": scraper_input.location,
|
||||||
"keywords": scraper_input.search_term,
|
"distance": scraper_input.distance,
|
||||||
"location": scraper_input.location,
|
"f_WT": 2 if scraper_input.is_remote else None,
|
||||||
"distance": scraper_input.distance,
|
"f_JT": (
|
||||||
"f_WT": 2 if scraper_input.is_remote else None,
|
self.job_type_code(scraper_input.job_type)
|
||||||
"f_JT": job_type_code(scraper_input.job_type)
|
|
||||||
if scraper_input.job_type
|
if scraper_input.job_type
|
||||||
else None,
|
else None
|
||||||
"pageNum": page,
|
),
|
||||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
"pageNum": 0,
|
||||||
}
|
"start": start,
|
||||||
|
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||||
|
"f_C": (
|
||||||
|
",".join(map(str, scraper_input.linkedin_company_ids))
|
||||||
|
if scraper_input.linkedin_company_ids
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
}
|
||||||
|
if seconds_old is not None:
|
||||||
|
params["f_TPR"] = f"r{seconds_old}"
|
||||||
|
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
response = session.get(
|
try:
|
||||||
f"{self.url}/jobs/search", params=params, allow_redirects=True
|
response = self.session.get(
|
||||||
|
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
||||||
|
params=params,
|
||||||
|
timeout=10,
|
||||||
)
|
)
|
||||||
|
if response.status_code not in range(200, 400):
|
||||||
if response.status_code != 200:
|
if response.status_code == 429:
|
||||||
return JobResponse(
|
err = (
|
||||||
success=False,
|
f"429 Response - Blocked by LinkedIn for too many requests"
|
||||||
error=f"Response returned {response.status_code}",
|
)
|
||||||
)
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
|
||||||
|
|
||||||
if page == 0:
|
|
||||||
job_count_text = soup.find(
|
|
||||||
"span", class_="results-context-header__job-count"
|
|
||||||
).text
|
|
||||||
job_count = int("".join(filter(str.isdigit, job_count_text)))
|
|
||||||
|
|
||||||
for job_card in soup.find_all(
|
|
||||||
"div",
|
|
||||||
class_="base-card relative w-full hover:no-underline focus:no-underline base-card--link base-search-card base-search-card--link job-search-card",
|
|
||||||
):
|
|
||||||
processed_jobs += 1
|
|
||||||
data_entity_urn = job_card.get("data-entity-urn", "")
|
|
||||||
job_id = (
|
|
||||||
data_entity_urn.split(":")[-1] if data_entity_urn else "N/A"
|
|
||||||
)
|
|
||||||
job_url = f"{self.url}/jobs/view/{job_id}"
|
|
||||||
if job_url in seen_urls:
|
|
||||||
continue
|
|
||||||
seen_urls.add(job_url)
|
|
||||||
job_info = job_card.find("div", class_="base-search-card__info")
|
|
||||||
if job_info is None:
|
|
||||||
continue
|
|
||||||
title_tag = job_info.find("h3", class_="base-search-card__title")
|
|
||||||
title = title_tag.text.strip() if title_tag else "N/A"
|
|
||||||
|
|
||||||
company_tag = job_info.find("a", class_="hidden-nested-link")
|
|
||||||
company = company_tag.text.strip() if company_tag else "N/A"
|
|
||||||
|
|
||||||
metadata_card = job_info.find(
|
|
||||||
"div", class_="base-search-card__metadata"
|
|
||||||
)
|
|
||||||
location: Location = LinkedInScraper.get_location(metadata_card)
|
|
||||||
|
|
||||||
datetime_tag = metadata_card.find(
|
|
||||||
"time", class_="job-search-card__listdate"
|
|
||||||
)
|
|
||||||
description, job_type = LinkedInScraper.get_description(job_url)
|
|
||||||
if datetime_tag:
|
|
||||||
datetime_str = datetime_tag["datetime"]
|
|
||||||
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
|
||||||
else:
|
else:
|
||||||
date_posted = None
|
err = f"LinkedIn response status code {response.status_code}"
|
||||||
|
err += f" - {response.text}"
|
||||||
|
logger.error(err)
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
except Exception as e:
|
||||||
|
if "Proxy responded with" in str(e):
|
||||||
|
logger.error(f"LinkedIn: Bad proxy")
|
||||||
|
else:
|
||||||
|
logger.error(f"LinkedIn: {str(e)}")
|
||||||
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
job_post = JobPost(
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
title=title,
|
job_cards = soup.find_all("div", class_="base-search-card")
|
||||||
description=description,
|
if len(job_cards) == 0:
|
||||||
company_name=company,
|
return JobResponse(jobs=job_list)
|
||||||
location=location,
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=Compensation(
|
|
||||||
interval=CompensationInterval.YEARLY, currency="USD"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
job_list.append(job_post)
|
|
||||||
if (
|
|
||||||
len(job_list) >= scraper_input.results_wanted
|
|
||||||
or processed_jobs >= job_count
|
|
||||||
):
|
|
||||||
break
|
|
||||||
if (
|
|
||||||
len(job_list) >= scraper_input.results_wanted
|
|
||||||
or processed_jobs >= job_count
|
|
||||||
):
|
|
||||||
break
|
|
||||||
|
|
||||||
page += 1
|
for job_card in job_cards:
|
||||||
|
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||||
|
if href_tag and "href" in href_tag.attrs:
|
||||||
|
href = href_tag.attrs["href"].split("?")[0]
|
||||||
|
job_id = href.split("-")[-1]
|
||||||
|
|
||||||
|
if job_id in seen_ids:
|
||||||
|
continue
|
||||||
|
seen_ids.add(job_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
fetch_desc = scraper_input.linkedin_fetch_description
|
||||||
|
job_post = self._process_job(job_card, job_id, fetch_desc)
|
||||||
|
if job_post:
|
||||||
|
job_list.append(job_post)
|
||||||
|
if not continue_search():
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
raise LinkedInException(str(e))
|
||||||
|
|
||||||
|
if continue_search():
|
||||||
|
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||||
|
start += len(job_list)
|
||||||
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
job_list = job_list[: scraper_input.results_wanted]
|
||||||
job_response = JobResponse(
|
return JobResponse(jobs=job_list)
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=job_count,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
@staticmethod
|
def _process_job(
|
||||||
def get_description(job_page_url: str) -> Optional[str]:
|
self, job_card: Tag, job_id: str, full_descr: bool
|
||||||
|
) -> Optional[JobPost]:
|
||||||
|
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
||||||
|
|
||||||
|
compensation = None
|
||||||
|
if salary_tag:
|
||||||
|
salary_text = salary_tag.get_text(separator=" ").strip()
|
||||||
|
salary_values = [currency_parser(value) for value in salary_text.split("-")]
|
||||||
|
salary_min = salary_values[0]
|
||||||
|
salary_max = salary_values[1]
|
||||||
|
currency = salary_text[0] if salary_text[0] != "$" else "USD"
|
||||||
|
|
||||||
|
compensation = Compensation(
|
||||||
|
min_amount=int(salary_min),
|
||||||
|
max_amount=int(salary_max),
|
||||||
|
currency=currency,
|
||||||
|
)
|
||||||
|
|
||||||
|
title_tag = job_card.find("span", class_="sr-only")
|
||||||
|
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
||||||
|
|
||||||
|
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
||||||
|
company_a_tag = company_tag.find("a") if company_tag else None
|
||||||
|
company_url = (
|
||||||
|
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
||||||
|
if company_a_tag and company_a_tag.has_attr("href")
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
||||||
|
|
||||||
|
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
||||||
|
location = self._get_location(metadata_card)
|
||||||
|
|
||||||
|
datetime_tag = (
|
||||||
|
metadata_card.find("time", class_="job-search-card__listdate")
|
||||||
|
if metadata_card
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
date_posted = None
|
||||||
|
if datetime_tag and "datetime" in datetime_tag.attrs:
|
||||||
|
datetime_str = datetime_tag["datetime"]
|
||||||
|
try:
|
||||||
|
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
||||||
|
except:
|
||||||
|
date_posted = None
|
||||||
|
job_details = {}
|
||||||
|
if full_descr:
|
||||||
|
job_details = self._get_job_details(job_id)
|
||||||
|
|
||||||
|
return JobPost(
|
||||||
|
id=f"li-{job_id}",
|
||||||
|
title=title,
|
||||||
|
company_name=company,
|
||||||
|
company_url=company_url,
|
||||||
|
location=location,
|
||||||
|
date_posted=date_posted,
|
||||||
|
job_url=f"{self.base_url}/jobs/view/{job_id}",
|
||||||
|
compensation=compensation,
|
||||||
|
job_type=job_details.get("job_type"),
|
||||||
|
job_level=job_details.get("job_level", "").lower(),
|
||||||
|
company_industry=job_details.get("company_industry"),
|
||||||
|
description=job_details.get("description"),
|
||||||
|
job_url_direct=job_details.get("job_url_direct"),
|
||||||
|
emails=extract_emails_from_text(job_details.get("description")),
|
||||||
|
logo_photo_url=job_details.get("logo_photo_url"),
|
||||||
|
job_function=job_details.get("job_function"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_job_details(self, job_id: str) -> dict:
|
||||||
"""
|
"""
|
||||||
Retrieves job description by going to the job page url
|
Retrieves job description and other job details by going to the job page url
|
||||||
:param job_page_url:
|
:param job_page_url:
|
||||||
:return: description or None
|
:return: dict
|
||||||
"""
|
"""
|
||||||
response = requests.get(job_page_url, allow_redirects=True)
|
try:
|
||||||
if response.status_code not in range(200, 400):
|
response = self.session.get(
|
||||||
return None, None
|
f"{self.base_url}/jobs/view/{job_id}", timeout=5
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
except:
|
||||||
|
return {}
|
||||||
|
if "linkedin.com/signup" in response.url:
|
||||||
|
return {}
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
div_content = soup.find(
|
div_content = soup.find(
|
||||||
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
||||||
)
|
)
|
||||||
|
description = None
|
||||||
|
if div_content is not None:
|
||||||
|
div_content = remove_attributes(div_content)
|
||||||
|
description = div_content.prettify(formatter="html")
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description = markdown_converter(description)
|
||||||
|
|
||||||
text_content = None
|
h3_tag = soup.find(
|
||||||
if div_content:
|
"h3", text=lambda text: text and "Job function" in text.strip()
|
||||||
text_content = " ".join(div_content.get_text().split()).strip()
|
)
|
||||||
|
|
||||||
def get_job_type(
|
job_function = None
|
||||||
soup: BeautifulSoup,
|
if h3_tag:
|
||||||
) -> Tuple[Optional[str], Optional[JobType]]:
|
job_function_span = h3_tag.find_next(
|
||||||
"""
|
"span", class_="description__job-criteria-text"
|
||||||
Gets the job type from job page
|
|
||||||
:param soup:
|
|
||||||
:return: JobType
|
|
||||||
"""
|
|
||||||
h3_tag = soup.find(
|
|
||||||
"h3",
|
|
||||||
class_="description__job-criteria-subheader",
|
|
||||||
string=lambda text: "Employment type" in text,
|
|
||||||
)
|
)
|
||||||
|
if job_function_span:
|
||||||
|
job_function = job_function_span.text.strip()
|
||||||
|
|
||||||
employment_type = None
|
logo_photo_url = (
|
||||||
if h3_tag:
|
logo_image.get("data-delayed-url")
|
||||||
employment_type_span = h3_tag.find_next_sibling(
|
if (logo_image := soup.find("img", {"class": "artdeco-entity-image"}))
|
||||||
"span",
|
else None
|
||||||
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
)
|
||||||
)
|
return {
|
||||||
if employment_type_span:
|
"description": description,
|
||||||
employment_type = employment_type_span.get_text(strip=True)
|
"job_level": self._parse_job_level(soup),
|
||||||
employment_type = employment_type.lower()
|
"company_industry": self._parse_company_industry(soup),
|
||||||
employment_type = employment_type.replace("-", "")
|
"job_type": self._parse_job_type(soup),
|
||||||
|
"job_url_direct": self._parse_job_url_direct(soup),
|
||||||
|
"logo_photo_url": logo_photo_url,
|
||||||
|
"job_function": job_function,
|
||||||
|
}
|
||||||
|
|
||||||
return JobType(employment_type)
|
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
||||||
|
|
||||||
return text_content, get_job_type(soup)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_location(metadata_card: Optional[Tag]) -> Location:
|
|
||||||
"""
|
"""
|
||||||
Extracts the location data from the job metadata card.
|
Extracts the location data from the job metadata card.
|
||||||
:param metadata_card
|
:param metadata_card
|
||||||
:return: location
|
:return: location
|
||||||
"""
|
"""
|
||||||
|
location = Location(country=Country.from_string(self.country))
|
||||||
if metadata_card is not None:
|
if metadata_card is not None:
|
||||||
location_tag = metadata_card.find(
|
location_tag = metadata_card.find(
|
||||||
"span", class_="job-search-card__location"
|
"span", class_="job-search-card__location"
|
||||||
@@ -217,6 +308,108 @@ class LinkedInScraper(Scraper):
|
|||||||
location = Location(
|
location = Location(
|
||||||
city=city,
|
city=city,
|
||||||
state=state,
|
state=state,
|
||||||
|
country=Country.from_string(self.country),
|
||||||
)
|
)
|
||||||
|
elif len(parts) == 3:
|
||||||
|
city, state, country = parts
|
||||||
|
country = Country.from_string(country)
|
||||||
|
location = Location(city=city, state=state, country=country)
|
||||||
return location
|
return location
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_job_type(soup_job_type: BeautifulSoup) -> list[JobType] | None:
|
||||||
|
"""
|
||||||
|
Gets the job type from job page
|
||||||
|
:param soup_job_type:
|
||||||
|
:return: JobType
|
||||||
|
"""
|
||||||
|
h3_tag = soup_job_type.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Employment type" in text,
|
||||||
|
)
|
||||||
|
employment_type = None
|
||||||
|
if h3_tag:
|
||||||
|
employment_type_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if employment_type_span:
|
||||||
|
employment_type = employment_type_span.get_text(strip=True)
|
||||||
|
employment_type = employment_type.lower()
|
||||||
|
employment_type = employment_type.replace("-", "")
|
||||||
|
|
||||||
|
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_job_level(soup_job_level: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job level from job page
|
||||||
|
:param soup_job_level:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_job_level.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Seniority level" in text,
|
||||||
|
)
|
||||||
|
job_level = None
|
||||||
|
if h3_tag:
|
||||||
|
job_level_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if job_level_span:
|
||||||
|
job_level = job_level_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return job_level
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_company_industry(soup_industry: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the company industry from job page
|
||||||
|
:param soup_industry:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
h3_tag = soup_industry.find(
|
||||||
|
"h3",
|
||||||
|
class_="description__job-criteria-subheader",
|
||||||
|
string=lambda text: "Industries" in text,
|
||||||
|
)
|
||||||
|
industry = None
|
||||||
|
if h3_tag:
|
||||||
|
industry_span = h3_tag.find_next_sibling(
|
||||||
|
"span",
|
||||||
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
||||||
|
)
|
||||||
|
if industry_span:
|
||||||
|
industry = industry_span.get_text(strip=True)
|
||||||
|
|
||||||
|
return industry
|
||||||
|
|
||||||
|
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
||||||
|
"""
|
||||||
|
Gets the job url direct from job page
|
||||||
|
:param soup:
|
||||||
|
:return: str
|
||||||
|
"""
|
||||||
|
job_url_direct = None
|
||||||
|
job_url_direct_content = soup.find("code", id="applyUrl")
|
||||||
|
if job_url_direct_content:
|
||||||
|
job_url_direct_match = self.job_url_direct_regex.search(
|
||||||
|
job_url_direct_content.decode_contents().strip()
|
||||||
|
)
|
||||||
|
if job_url_direct_match:
|
||||||
|
job_url_direct = unquote(job_url_direct_match.group())
|
||||||
|
|
||||||
|
return job_url_direct
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def job_type_code(job_type_enum: JobType) -> str:
|
||||||
|
return {
|
||||||
|
JobType.FULL_TIME: "F",
|
||||||
|
JobType.PART_TIME: "P",
|
||||||
|
JobType.INTERNSHIP: "I",
|
||||||
|
JobType.CONTRACT: "C",
|
||||||
|
JobType.TEMPORARY: "T",
|
||||||
|
}.get(job_type_enum, "")
|
||||||
|
|||||||
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
8
src/jobspy/scrapers/linkedin/constants.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
headers = {
|
||||||
|
"authority": "www.linkedin.com",
|
||||||
|
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"cache-control": "max-age=0",
|
||||||
|
"upgrade-insecure-requests": "1",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
266
src/jobspy/scrapers/utils.py
Normal file
266
src/jobspy/scrapers/utils.py
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
from itertools import cycle
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import tls_client
|
||||||
|
import numpy as np
|
||||||
|
from markdownify import markdownify as md
|
||||||
|
from requests.adapters import HTTPAdapter, Retry
|
||||||
|
|
||||||
|
from ..jobs import CompensationInterval, JobType
|
||||||
|
|
||||||
|
|
||||||
|
def create_logger(name: str):
|
||||||
|
logger = logging.getLogger(f"JobSpy:{name}")
|
||||||
|
logger.propagate = False
|
||||||
|
if not logger.handlers:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
||||||
|
formatter = logging.Formatter(format)
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
|
||||||
|
class RotatingProxySession:
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
if isinstance(proxies, str):
|
||||||
|
self.proxy_cycle = cycle([self.format_proxy(proxies)])
|
||||||
|
elif isinstance(proxies, list):
|
||||||
|
self.proxy_cycle = (
|
||||||
|
cycle([self.format_proxy(proxy) for proxy in proxies])
|
||||||
|
if proxies
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.proxy_cycle = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_proxy(proxy):
|
||||||
|
"""Utility method to format a proxy string into a dictionary."""
|
||||||
|
if proxy.startswith("http://") or proxy.startswith("https://"):
|
||||||
|
return {"http": proxy, "https": proxy}
|
||||||
|
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
|
||||||
|
|
||||||
|
|
||||||
|
class RequestsRotating(RotatingProxySession, requests.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
requests.Session.__init__(self)
|
||||||
|
self.clear_cookies = clear_cookies
|
||||||
|
self.allow_redirects = True
|
||||||
|
self.setup_session(has_retry, delay)
|
||||||
|
|
||||||
|
def setup_session(self, has_retry, delay):
|
||||||
|
if has_retry:
|
||||||
|
retries = Retry(
|
||||||
|
total=3,
|
||||||
|
connect=3,
|
||||||
|
status=3,
|
||||||
|
status_forcelist=[500, 502, 503, 504, 429],
|
||||||
|
backoff_factor=delay,
|
||||||
|
)
|
||||||
|
adapter = HTTPAdapter(max_retries=retries)
|
||||||
|
self.mount("http://", adapter)
|
||||||
|
self.mount("https://", adapter)
|
||||||
|
|
||||||
|
def request(self, method, url, **kwargs):
|
||||||
|
if self.clear_cookies:
|
||||||
|
self.cookies.clear()
|
||||||
|
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
return requests.Session.request(self, method, url, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class TLSRotating(RotatingProxySession, tls_client.Session):
|
||||||
|
|
||||||
|
def __init__(self, proxies=None):
|
||||||
|
RotatingProxySession.__init__(self, proxies=proxies)
|
||||||
|
tls_client.Session.__init__(self, random_tls_extension_order=True)
|
||||||
|
|
||||||
|
def execute_request(self, *args, **kwargs):
|
||||||
|
if self.proxy_cycle:
|
||||||
|
next_proxy = next(self.proxy_cycle)
|
||||||
|
if next_proxy["http"] != "http://localhost":
|
||||||
|
self.proxies = next_proxy
|
||||||
|
else:
|
||||||
|
self.proxies = {}
|
||||||
|
response = tls_client.Session.execute_request(self, *args, **kwargs)
|
||||||
|
response.ok = response.status_code in range(200, 400)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def create_session(
|
||||||
|
*,
|
||||||
|
proxies: dict | str | None = None,
|
||||||
|
ca_cert: str | None = None,
|
||||||
|
is_tls: bool = True,
|
||||||
|
has_retry: bool = False,
|
||||||
|
delay: int = 1,
|
||||||
|
clear_cookies: bool = False,
|
||||||
|
) -> requests.Session:
|
||||||
|
"""
|
||||||
|
Creates a requests session with optional tls, proxy, and retry settings.
|
||||||
|
:return: A session object
|
||||||
|
"""
|
||||||
|
if is_tls:
|
||||||
|
session = TLSRotating(proxies=proxies)
|
||||||
|
else:
|
||||||
|
session = RequestsRotating(
|
||||||
|
proxies=proxies,
|
||||||
|
has_retry=has_retry,
|
||||||
|
delay=delay,
|
||||||
|
clear_cookies=clear_cookies,
|
||||||
|
)
|
||||||
|
|
||||||
|
if ca_cert:
|
||||||
|
session.verify = ca_cert
|
||||||
|
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def set_logger_level(verbose: int = 2):
|
||||||
|
"""
|
||||||
|
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- verbose: int {0, 1, 2} (default=2, all logs)
|
||||||
|
"""
|
||||||
|
if verbose is None:
|
||||||
|
return
|
||||||
|
level_name = {2: "INFO", 1: "WARNING", 0: "ERROR"}.get(verbose, "INFO")
|
||||||
|
level = getattr(logging, level_name.upper(), None)
|
||||||
|
if level is not None:
|
||||||
|
for logger_name in logging.root.manager.loggerDict:
|
||||||
|
if logger_name.startswith("JobSpy:"):
|
||||||
|
logging.getLogger(logger_name).setLevel(level)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid log level: {level_name}")
|
||||||
|
|
||||||
|
|
||||||
|
def markdown_converter(description_html: str):
|
||||||
|
if description_html is None:
|
||||||
|
return None
|
||||||
|
markdown = md(description_html)
|
||||||
|
return markdown.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||||
|
if not text:
|
||||||
|
return None
|
||||||
|
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
|
||||||
|
return email_regex.findall(text)
|
||||||
|
|
||||||
|
|
||||||
|
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
|
||||||
|
"""
|
||||||
|
Given a string, returns the corresponding JobType enum member if a match is found.
|
||||||
|
"""
|
||||||
|
res = None
|
||||||
|
for job_type in JobType:
|
||||||
|
if job_type_str in job_type.value:
|
||||||
|
res = job_type
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def currency_parser(cur_str):
|
||||||
|
# Remove any non-numerical characters
|
||||||
|
# except for ',' '.' or '-' (e.g. EUR)
|
||||||
|
cur_str = re.sub("[^-0-9.,]", "", cur_str)
|
||||||
|
# Remove any 000s separators (either , or .)
|
||||||
|
cur_str = re.sub("[.,]", "", cur_str[:-3]) + cur_str[-3:]
|
||||||
|
|
||||||
|
if "." in list(cur_str[-3:]):
|
||||||
|
num = float(cur_str)
|
||||||
|
elif "," in list(cur_str[-3:]):
|
||||||
|
num = float(cur_str.replace(",", "."))
|
||||||
|
else:
|
||||||
|
num = float(cur_str)
|
||||||
|
|
||||||
|
return np.round(num, 2)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_attributes(tag):
|
||||||
|
for attr in list(tag.attrs):
|
||||||
|
del tag[attr]
|
||||||
|
return tag
|
||||||
|
|
||||||
|
|
||||||
|
def extract_salary(
|
||||||
|
salary_str,
|
||||||
|
lower_limit=1000,
|
||||||
|
upper_limit=700000,
|
||||||
|
hourly_threshold=350,
|
||||||
|
monthly_threshold=30000,
|
||||||
|
enforce_annual_salary=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Extracts salary information from a string and returns the salary interval, min and max salary values, and currency.
|
||||||
|
(TODO: Needs test cases as the regex is complicated and may not cover all edge cases)
|
||||||
|
"""
|
||||||
|
if not salary_str:
|
||||||
|
return None, None, None, None
|
||||||
|
|
||||||
|
annual_max_salary = None
|
||||||
|
min_max_pattern = r"\$(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)\s*[-—–]\s*(?:\$)?(\d+(?:,\d+)?(?:\.\d+)?)([kK]?)"
|
||||||
|
|
||||||
|
def to_int(s):
|
||||||
|
return int(float(s.replace(",", "")))
|
||||||
|
|
||||||
|
def convert_hourly_to_annual(hourly_wage):
|
||||||
|
return hourly_wage * 2080
|
||||||
|
|
||||||
|
def convert_monthly_to_annual(monthly_wage):
|
||||||
|
return monthly_wage * 12
|
||||||
|
|
||||||
|
match = re.search(min_max_pattern, salary_str)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
min_salary = to_int(match.group(1))
|
||||||
|
max_salary = to_int(match.group(3))
|
||||||
|
# Handle 'k' suffix for min and max salaries independently
|
||||||
|
if "k" in match.group(2).lower() or "k" in match.group(4).lower():
|
||||||
|
min_salary *= 1000
|
||||||
|
max_salary *= 1000
|
||||||
|
|
||||||
|
# Convert to annual if less than the hourly threshold
|
||||||
|
if min_salary < hourly_threshold:
|
||||||
|
interval = CompensationInterval.HOURLY.value
|
||||||
|
annual_min_salary = convert_hourly_to_annual(min_salary)
|
||||||
|
if max_salary < hourly_threshold:
|
||||||
|
annual_max_salary = convert_hourly_to_annual(max_salary)
|
||||||
|
|
||||||
|
elif min_salary < monthly_threshold:
|
||||||
|
interval = CompensationInterval.MONTHLY.value
|
||||||
|
annual_min_salary = convert_monthly_to_annual(min_salary)
|
||||||
|
if max_salary < monthly_threshold:
|
||||||
|
annual_max_salary = convert_monthly_to_annual(max_salary)
|
||||||
|
|
||||||
|
else:
|
||||||
|
interval = CompensationInterval.YEARLY.value
|
||||||
|
annual_min_salary = min_salary
|
||||||
|
annual_max_salary = max_salary
|
||||||
|
|
||||||
|
# Ensure salary range is within specified limits
|
||||||
|
if not annual_max_salary:
|
||||||
|
return None, None, None, None
|
||||||
|
if (
|
||||||
|
lower_limit <= annual_min_salary <= upper_limit
|
||||||
|
and lower_limit <= annual_max_salary <= upper_limit
|
||||||
|
and annual_min_salary < annual_max_salary
|
||||||
|
):
|
||||||
|
if enforce_annual_salary:
|
||||||
|
return interval, annual_min_salary, annual_max_salary, "USD"
|
||||||
|
else:
|
||||||
|
return interval, min_salary, max_salary, "USD"
|
||||||
|
return None, None, None, None
|
||||||
@@ -1,415 +1,247 @@
|
|||||||
import math
|
"""
|
||||||
|
jobspy.scrapers.ziprecruiter
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This module contains routines to scrape ZipRecruiter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple, Any
|
||||||
from urllib.parse import urlparse, parse_qs
|
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
import tls_client
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from bs4.element import Tag
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site, StatusException
|
from .constants import headers
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
|
from ..utils import (
|
||||||
|
extract_emails_from_text,
|
||||||
|
create_session,
|
||||||
|
markdown_converter,
|
||||||
|
remove_attributes,
|
||||||
|
create_logger,
|
||||||
|
)
|
||||||
from ...jobs import (
|
from ...jobs import (
|
||||||
JobPost,
|
JobPost,
|
||||||
Compensation,
|
Compensation,
|
||||||
CompensationInterval,
|
|
||||||
Location,
|
Location,
|
||||||
JobResponse,
|
JobResponse,
|
||||||
JobType,
|
JobType,
|
||||||
|
Country,
|
||||||
|
DescriptionFormat,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger = create_logger("ZipRecruiter")
|
||||||
|
|
||||||
|
|
||||||
class ZipRecruiterScraper(Scraper):
|
class ZipRecruiterScraper(Scraper):
|
||||||
def __init__(self):
|
base_url = "https://www.ziprecruiter.com"
|
||||||
"""
|
api_url = "https://api.ziprecruiter.com"
|
||||||
Initializes LinkedInScraper with the ZipRecruiter job search url
|
|
||||||
"""
|
|
||||||
site = Site(Site.ZIP_RECRUITER)
|
|
||||||
url = "https://www.ziprecruiter.com"
|
|
||||||
super().__init__(site, url)
|
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, proxies: list[str] | str | None = None, ca_cert: str | None = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
|
||||||
|
"""
|
||||||
|
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
|
||||||
|
|
||||||
|
self.scraper_input = None
|
||||||
|
self.session = create_session(proxies=proxies, ca_cert=ca_cert)
|
||||||
|
self.session.headers.update(headers)
|
||||||
|
self._get_cookies()
|
||||||
|
|
||||||
|
self.delay = 5
|
||||||
self.jobs_per_page = 20
|
self.jobs_per_page = 20
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
self.session = tls_client.Session(
|
|
||||||
client_identifier="chrome112", random_tls_extension_order=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def scrape_page(
|
|
||||||
self, scraper_input: ScraperInput, page: int
|
|
||||||
) -> tuple[list[JobPost], int | None]:
|
|
||||||
"""
|
|
||||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
|
||||||
:param scraper_input:
|
|
||||||
:param page:
|
|
||||||
:param session:
|
|
||||||
:return: jobs found on page, total number of jobs found for search
|
|
||||||
"""
|
|
||||||
|
|
||||||
job_list = []
|
|
||||||
|
|
||||||
job_type_value = None
|
|
||||||
if scraper_input.job_type:
|
|
||||||
if scraper_input.job_type.value == "fulltime":
|
|
||||||
job_type_value = "full_time"
|
|
||||||
elif scraper_input.job_type.value == "parttime":
|
|
||||||
job_type_value = "part_time"
|
|
||||||
else:
|
|
||||||
job_type_value = scraper_input.job_type.value
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"search": scraper_input.search_term,
|
|
||||||
"location": scraper_input.location,
|
|
||||||
"page": page,
|
|
||||||
"form": "jobs-landing",
|
|
||||||
}
|
|
||||||
|
|
||||||
if scraper_input.is_remote:
|
|
||||||
params["refine_by_location_type"] = "only_remote"
|
|
||||||
|
|
||||||
if scraper_input.distance:
|
|
||||||
params["radius"] = scraper_input.distance
|
|
||||||
|
|
||||||
if job_type_value:
|
|
||||||
params[
|
|
||||||
"refine_by_employment"
|
|
||||||
] = f"employment_type:employment_type:{job_type_value}"
|
|
||||||
|
|
||||||
response = self.session.get(
|
|
||||||
self.url + "/jobs-search",
|
|
||||||
headers=ZipRecruiterScraper.headers(),
|
|
||||||
params=params,
|
|
||||||
)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise StatusException(response.status_code)
|
|
||||||
|
|
||||||
html_string = response.text
|
|
||||||
soup = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
script_tag = soup.find("script", {"id": "js_variables"})
|
|
||||||
data = json.loads(script_tag.string)
|
|
||||||
|
|
||||||
if page == 1:
|
|
||||||
job_count = int(data["totalJobCount"].replace(",", ""))
|
|
||||||
else:
|
|
||||||
job_count = None
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
||||||
if "jobList" in data and data["jobList"]:
|
|
||||||
jobs_js = data["jobList"]
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_js, job) for job in jobs_js
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
jobs_html = soup.find_all("div", {"class": "job_content"})
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html, job) for job in jobs_html
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
|
||||||
|
|
||||||
return job_list, job_count
|
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||||
:param scraper_input:
|
:param scraper_input: Information about job search criteria.
|
||||||
:return: job_response
|
:return: JobResponse containing a list of jobs.
|
||||||
"""
|
"""
|
||||||
|
self.scraper_input = scraper_input
|
||||||
|
job_list: list[JobPost] = []
|
||||||
|
continue_token = None
|
||||||
|
|
||||||
pages_to_process = max(
|
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
for page in range(1, max_pages + 1):
|
||||||
)
|
if len(job_list) >= scraper_input.results_wanted:
|
||||||
|
break
|
||||||
try:
|
if page > 1:
|
||||||
#: get first page to initialize session
|
time.sleep(self.delay)
|
||||||
job_list, total_results = self.scrape_page(scraper_input, 1)
|
logger.info(f"search page: {page} / {max_pages}")
|
||||||
|
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
scraper_input, continue_token
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.scrape_page, scraper_input, page)
|
|
||||||
for page in range(2, pages_to_process + 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
|
||||||
jobs, _ = future.result()
|
|
||||||
|
|
||||||
job_list += jobs
|
|
||||||
|
|
||||||
except StatusException as e:
|
|
||||||
return JobResponse(
|
|
||||||
success=False,
|
|
||||||
error=f"ZipRecruiter returned status code {e.status_code}",
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
if jobs_on_page:
|
||||||
return JobResponse(
|
job_list.extend(jobs_on_page)
|
||||||
success=False,
|
|
||||||
error=f"ZipRecruiter failed to scrape: {e}",
|
|
||||||
)
|
|
||||||
|
|
||||||
#: note: this does not handle if the results are more or less than the results_wanted
|
|
||||||
|
|
||||||
if len(job_list) > scraper_input.results_wanted:
|
|
||||||
job_list = job_list[: scraper_input.results_wanted]
|
|
||||||
|
|
||||||
job_response = JobResponse(
|
|
||||||
success=True,
|
|
||||||
jobs=job_list,
|
|
||||||
total_results=total_results,
|
|
||||||
)
|
|
||||||
return job_response
|
|
||||||
|
|
||||||
def process_job_html(self, job: Tag) -> Optional[JobPost]:
|
|
||||||
"""
|
|
||||||
Parses a job from the job content tag
|
|
||||||
:param job: BeautifulSoup Tag for one job post
|
|
||||||
:return JobPost
|
|
||||||
"""
|
|
||||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
title = job.find("h2", {"class": "title"}).text
|
|
||||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
if updated_job_url is not None:
|
|
||||||
job_url = updated_job_url
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
|
||||||
|
|
||||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
|
||||||
if job_type_element:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_element.text.strip().lower().replace("-", "").replace(" ", "")
|
|
||||||
)
|
|
||||||
if job_type_text == "contractor":
|
|
||||||
job_type_text = "contract"
|
|
||||||
job_type = JobType(job_type_text)
|
|
||||||
else:
|
|
||||||
job_type = None
|
|
||||||
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
def process_job_js(self, job: dict) -> JobPost:
|
|
||||||
# Map the job data to the expected fields by the Pydantic model
|
|
||||||
title = job.get("Title")
|
|
||||||
description = BeautifulSoup(
|
|
||||||
job.get("Snippet", "").strip(), "html.parser"
|
|
||||||
).get_text()
|
|
||||||
|
|
||||||
company = job.get("OrgName")
|
|
||||||
location = Location(city=job.get("City"), state=job.get("State"))
|
|
||||||
try:
|
|
||||||
job_type = ZipRecruiterScraper.job_type_from_string(
|
|
||||||
job.get("EmploymentType", "").replace("-", "_").lower()
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
# print(f"Skipping job due to unrecognized job type: {job.get('EmploymentType')}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
|
||||||
salary_parts = formatted_salary.split(" ")
|
|
||||||
|
|
||||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
|
||||||
if "." in min_salary_str:
|
|
||||||
min_amount = int(float(min_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
min_amount = int(min_salary_str.replace("K", "000"))
|
|
||||||
|
|
||||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
|
||||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
|
||||||
if "." in max_salary_str:
|
|
||||||
max_amount = int(float(max_salary_str) * 1000)
|
|
||||||
else:
|
else:
|
||||||
max_amount = int(max_salary_str.replace("K", "000"))
|
break
|
||||||
else:
|
if not continue_token:
|
||||||
max_amount = 0
|
break
|
||||||
|
return JobResponse(jobs=job_list[: scraper_input.results_wanted])
|
||||||
|
|
||||||
compensation = Compensation(
|
def _find_jobs_in_page(
|
||||||
interval=CompensationInterval.YEARLY,
|
self, scraper_input: ScraperInput, continue_token: str | None = None
|
||||||
min_amount=min_amount,
|
) -> Tuple[list[JobPost], Optional[str]]:
|
||||||
max_amount=max_amount,
|
"""
|
||||||
|
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||||
|
:param scraper_input:
|
||||||
|
:param continue_token:
|
||||||
|
:return: jobs found on page
|
||||||
|
"""
|
||||||
|
jobs_list = []
|
||||||
|
params = self._add_params(scraper_input)
|
||||||
|
if continue_token:
|
||||||
|
params["continue_from"] = continue_token
|
||||||
|
try:
|
||||||
|
res = self.session.get(f"{self.api_url}/jobs-app/jobs", params=params)
|
||||||
|
if res.status_code not in range(200, 400):
|
||||||
|
if res.status_code == 429:
|
||||||
|
err = "429 Response - Blocked by ZipRecruiter for too many requests"
|
||||||
|
else:
|
||||||
|
err = f"ZipRecruiter response status code {res.status_code}"
|
||||||
|
err += f" with response: {res.text}" # ZipRecruiter likely not available in EU
|
||||||
|
logger.error(err)
|
||||||
|
return jobs_list, ""
|
||||||
|
except Exception as e:
|
||||||
|
if "Proxy responded with" in str(e):
|
||||||
|
logger.error(f"Indeed: Bad proxy")
|
||||||
|
else:
|
||||||
|
logger.error(f"Indeed: {str(e)}")
|
||||||
|
return jobs_list, ""
|
||||||
|
|
||||||
|
res_data = res.json()
|
||||||
|
jobs_list = res_data.get("jobs", [])
|
||||||
|
next_continue_token = res_data.get("continue", None)
|
||||||
|
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||||
|
job_results = [executor.submit(self._process_job, job) for job in jobs_list]
|
||||||
|
|
||||||
|
job_list = list(filter(None, (result.result() for result in job_results)))
|
||||||
|
return job_list, next_continue_token
|
||||||
|
|
||||||
|
def _process_job(self, job: dict) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Processes an individual job dict from the response
|
||||||
|
"""
|
||||||
|
title = job.get("name")
|
||||||
|
job_url = f"{self.base_url}/jobs//j?lvk={job['listing_key']}"
|
||||||
|
if job_url in self.seen_urls:
|
||||||
|
return
|
||||||
|
self.seen_urls.add(job_url)
|
||||||
|
|
||||||
|
description = job.get("job_description", "").strip()
|
||||||
|
listing_type = job.get("buyer_type", "")
|
||||||
|
description = (
|
||||||
|
markdown_converter(description)
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN
|
||||||
|
else description
|
||||||
)
|
)
|
||||||
save_job_url = job.get("SaveJobURL", "")
|
company = job.get("hiring_company", {}).get("name")
|
||||||
posted_time_match = re.search(
|
country_value = "usa" if job.get("job_country") == "US" else "canada"
|
||||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
country_enum = Country.from_string(country_value)
|
||||||
|
|
||||||
|
location = Location(
|
||||||
|
city=job.get("job_city"), state=job.get("job_state"), country=country_enum
|
||||||
)
|
)
|
||||||
if posted_time_match:
|
job_type = self._get_job_type_enum(
|
||||||
date_time_str = posted_time_match.group(1)
|
job.get("employment_type", "").replace("_", "").lower()
|
||||||
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
|
)
|
||||||
date_posted = date_posted_obj.date()
|
date_posted = datetime.fromisoformat(job["posted_time"].rstrip("Z")).date()
|
||||||
else:
|
comp_interval = job.get("compensation_interval")
|
||||||
date_posted = date.today()
|
comp_interval = "yearly" if comp_interval == "annual" else comp_interval
|
||||||
job_url = job.get("JobURL")
|
comp_min = int(job["compensation_min"]) if "compensation_min" in job else None
|
||||||
|
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
|
||||||
|
comp_currency = job.get("compensation_currency")
|
||||||
|
description_full, job_url_direct = self._get_descr(job_url)
|
||||||
|
|
||||||
return JobPost(
|
return JobPost(
|
||||||
|
id=f'zr-{job["listing_key"]}',
|
||||||
title=title,
|
title=title,
|
||||||
description=description,
|
|
||||||
company_name=company,
|
company_name=company,
|
||||||
location=location,
|
location=location,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=compensation,
|
compensation=Compensation(
|
||||||
|
interval=comp_interval,
|
||||||
|
min_amount=comp_min,
|
||||||
|
max_amount=comp_max,
|
||||||
|
currency=comp_currency,
|
||||||
|
),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
|
description=description_full if description_full else description,
|
||||||
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
|
job_url_direct=job_url_direct,
|
||||||
|
listing_type=listing_type,
|
||||||
)
|
)
|
||||||
return job_post
|
|
||||||
|
def _get_descr(self, job_url):
|
||||||
|
res = self.session.get(job_url, allow_redirects=True)
|
||||||
|
description_full = job_url_direct = None
|
||||||
|
if res.ok:
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
job_descr_div = soup.find("div", class_="job_description")
|
||||||
|
company_descr_section = soup.find("section", class_="company_description")
|
||||||
|
job_description_clean = (
|
||||||
|
remove_attributes(job_descr_div).prettify(formatter="html")
|
||||||
|
if job_descr_div
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
company_description_clean = (
|
||||||
|
remove_attributes(company_descr_section).prettify(formatter="html")
|
||||||
|
if company_descr_section
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
description_full = job_description_clean + company_description_clean
|
||||||
|
script_tag = soup.find("script", type="application/json")
|
||||||
|
if script_tag:
|
||||||
|
job_json = json.loads(script_tag.string)
|
||||||
|
job_url_val = job_json["model"].get("saveJobURL", "")
|
||||||
|
m = re.search(r"job_url=(.+)", job_url_val)
|
||||||
|
if m:
|
||||||
|
job_url_direct = m.group(1)
|
||||||
|
|
||||||
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
||||||
|
description_full = markdown_converter(description_full)
|
||||||
|
|
||||||
|
return description_full, job_url_direct
|
||||||
|
|
||||||
|
def _get_cookies(self):
|
||||||
|
data = "event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||||
|
url = f"{self.api_url}/jobs-app/event"
|
||||||
|
self.session.post(url, data=data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def job_type_from_string(value: str) -> Optional[JobType]:
|
def _get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
if not value:
|
for job_type in JobType:
|
||||||
return None
|
if job_type_str in job_type.value:
|
||||||
|
return [job_type]
|
||||||
if value.lower() == "contractor":
|
|
||||||
value = "contract"
|
|
||||||
normalized_value = value.replace("_", "")
|
|
||||||
for item in JobType:
|
|
||||||
if item.value == normalized_value:
|
|
||||||
return item
|
|
||||||
raise ValueError(f"Invalid value for JobType: {value}")
|
|
||||||
|
|
||||||
def get_description(self, job_page_url: str) -> Tuple[Optional[str], Optional[str]]:
|
|
||||||
"""
|
|
||||||
Retrieves job description by going to the job page url
|
|
||||||
:param job_page_url:
|
|
||||||
:param session:
|
|
||||||
:return: description or None, response url
|
|
||||||
"""
|
|
||||||
response = self.session.get(
|
|
||||||
job_page_url, headers=ZipRecruiterScraper.headers(), allow_redirects=True
|
|
||||||
)
|
|
||||||
if response.status_code not in range(200, 400):
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
html_string = response.content
|
|
||||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
|
||||||
if job_description_div:
|
|
||||||
return job_description_div.text.strip(), response.url
|
|
||||||
return None, response.url
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_interval(interval_str: str):
|
|
||||||
"""
|
|
||||||
Maps the interval alias to its appropriate CompensationInterval.
|
|
||||||
:param interval_str
|
|
||||||
:return: CompensationInterval
|
|
||||||
"""
|
|
||||||
interval_alias = {"annually": CompensationInterval.YEARLY}
|
|
||||||
interval_str = interval_str.lower()
|
|
||||||
|
|
||||||
if interval_str in interval_alias:
|
|
||||||
return interval_alias[interval_str]
|
|
||||||
|
|
||||||
return CompensationInterval(interval_str)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_date_posted(job: BeautifulSoup) -> Optional[datetime.date]:
|
|
||||||
"""
|
|
||||||
Extracts the date a job was posted
|
|
||||||
:param job
|
|
||||||
:return: date the job was posted or None
|
|
||||||
"""
|
|
||||||
button = job.find(
|
|
||||||
"button", {"class": "action_input save_job zrs_btn_secondary_200"}
|
|
||||||
)
|
|
||||||
if not button:
|
|
||||||
return None
|
|
||||||
|
|
||||||
url_time = button.get("data-href", "")
|
|
||||||
url_components = urlparse(url_time)
|
|
||||||
params = parse_qs(url_components.query)
|
|
||||||
posted_time_str = params.get("posted_time", [None])[0]
|
|
||||||
|
|
||||||
if posted_time_str:
|
|
||||||
posted_date = datetime.strptime(
|
|
||||||
posted_time_str, "%Y-%m-%dT%H:%M:%SZ"
|
|
||||||
).date()
|
|
||||||
return posted_date
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_compensation(job: BeautifulSoup) -> Optional[Compensation]:
|
def _add_params(scraper_input) -> dict[str, str | Any]:
|
||||||
"""
|
params = {
|
||||||
Parses the compensation tag from the job BeautifulSoup object
|
"search": scraper_input.search_term,
|
||||||
:param job
|
"location": scraper_input.location,
|
||||||
:return: Compensation object or None
|
|
||||||
"""
|
|
||||||
pay_element = job.find("li", {"class": "perk_item perk_pay"})
|
|
||||||
if pay_element is None:
|
|
||||||
return None
|
|
||||||
pay = pay_element.find("div", {"class": "value"}).find("span").text.strip()
|
|
||||||
|
|
||||||
def create_compensation_object(pay_string: str) -> Compensation:
|
|
||||||
"""
|
|
||||||
Creates a Compensation object from a pay_string
|
|
||||||
:param pay_string
|
|
||||||
:return: compensation
|
|
||||||
"""
|
|
||||||
interval = ZipRecruiterScraper.get_interval(pay_string.split()[-1])
|
|
||||||
|
|
||||||
amounts = []
|
|
||||||
for amount in pay_string.split("to"):
|
|
||||||
amount = amount.replace(",", "").strip("$ ").split(" ")[0]
|
|
||||||
if "K" in amount:
|
|
||||||
amount = amount.replace("K", "")
|
|
||||||
amount = int(float(amount)) * 1000
|
|
||||||
else:
|
|
||||||
amount = int(float(amount))
|
|
||||||
amounts.append(amount)
|
|
||||||
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=interval, min_amount=min(amounts), max_amount=max(amounts)
|
|
||||||
)
|
|
||||||
|
|
||||||
return compensation
|
|
||||||
|
|
||||||
return create_compensation_object(pay)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_location(job: BeautifulSoup) -> Location:
|
|
||||||
"""
|
|
||||||
Extracts the job location from BeatifulSoup object
|
|
||||||
:param job:
|
|
||||||
:return: location
|
|
||||||
"""
|
|
||||||
location_link = job.find("a", {"class": "company_location"})
|
|
||||||
if location_link is not None:
|
|
||||||
location_string = location_link.text.strip()
|
|
||||||
parts = location_string.split(", ")
|
|
||||||
if len(parts) == 2:
|
|
||||||
city, state = parts
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
else:
|
|
||||||
city, state = None, None
|
|
||||||
return Location(
|
|
||||||
city=city,
|
|
||||||
state=state,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def headers() -> dict:
|
|
||||||
"""
|
|
||||||
Returns headers needed for requests
|
|
||||||
:return: dict - Dictionary containing headers
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
|
||||||
}
|
}
|
||||||
|
if scraper_input.hours_old:
|
||||||
|
params["days"] = max(scraper_input.hours_old // 24, 1)
|
||||||
|
job_type_map = {JobType.FULL_TIME: "full_time", JobType.PART_TIME: "part_time"}
|
||||||
|
if scraper_input.job_type:
|
||||||
|
job_type = scraper_input.job_type
|
||||||
|
params["employment_type"] = job_type_map.get(job_type, job_type.value[0])
|
||||||
|
if scraper_input.easy_apply:
|
||||||
|
params["zipapply"] = 1
|
||||||
|
if scraper_input.is_remote:
|
||||||
|
params["remote"] = 1
|
||||||
|
if scraper_input.distance:
|
||||||
|
params["radius"] = scraper_input.distance
|
||||||
|
return {k: v for k, v in params.items() if v is not None}
|
||||||
|
|||||||
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
10
src/jobspy/scrapers/ziprecruiter/constants.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
headers = {
|
||||||
|
"Host": "api.ziprecruiter.com",
|
||||||
|
"accept": "*/*",
|
||||||
|
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||||
|
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||||
|
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||||
|
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||||
|
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
}
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
from ..jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_indeed():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="indeed",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
assert result is not None
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_linkedin():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="linkedin",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
assert result is not None
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
from jobspy import scrape_jobs
|
|
||||||
|
|
||||||
|
|
||||||
def test_ziprecruiter():
|
|
||||||
result = scrape_jobs(
|
|
||||||
site_name="zip_recruiter",
|
|
||||||
search_term="software engineer",
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result is not None
|
|
||||||
18
tests/test_all.py
Normal file
18
tests/test_all.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def test_all():
|
||||||
|
sites = [
|
||||||
|
"indeed",
|
||||||
|
"glassdoor",
|
||||||
|
] # ziprecruiter/linkedin needs good ip, and temp fix to pass test on ci
|
||||||
|
result = scrape_jobs(
|
||||||
|
site_name=sites,
|
||||||
|
search_term="engineer",
|
||||||
|
results_wanted=5,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and len(result) == len(sites) * 5
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
13
tests/test_glassdoor.py
Normal file
13
tests/test_glassdoor.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def test_glassdoor():
|
||||||
|
result = scrape_jobs(
|
||||||
|
site_name="glassdoor",
|
||||||
|
search_term="engineer",
|
||||||
|
results_wanted=5,
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
13
tests/test_indeed.py
Normal file
13
tests/test_indeed.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def test_indeed():
|
||||||
|
result = scrape_jobs(
|
||||||
|
site_name="indeed",
|
||||||
|
search_term="engineer",
|
||||||
|
results_wanted=5,
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
9
tests/test_linkedin.py
Normal file
9
tests/test_linkedin.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def test_linkedin():
|
||||||
|
result = scrape_jobs(site_name="linkedin", search_term="engineer", results_wanted=5)
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
12
tests/test_ziprecruiter.py
Normal file
12
tests/test_ziprecruiter.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
from jobspy import scrape_jobs
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def test_ziprecruiter():
|
||||||
|
result = scrape_jobs(
|
||||||
|
site_name="zip_recruiter", search_term="software engineer", results_wanted=5
|
||||||
|
)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
isinstance(result, pd.DataFrame) and len(result) == 5
|
||||||
|
), "Result should be a non-empty DataFrame"
|
||||||
Reference in New Issue
Block a user