1725.159.252 / 1725_159_252.py
antitheft159's picture
Update 1725_159_252.py
6612e3d verified
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy.typing import ArrayLike
from pandas.core.frame import DataFrame, Series
from xgboost import XGBRegressor
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, r2_score
from IPython.display import display, HTML, Markdown
CSS = """
.output {
flex-direction: row;
}
"""
HTML(f"<style>{CSS}</style>")
df = pd.read_csv('/content/Employee Attrition.csv', \
dtype={'salary':'category', 'dept':'category'})
df= df.drop('Emp ID', axis=1)
df.head()
n_rows, n_columns = df.shape
total_data = n_rows * n_columns
print(f'total rows: {n_rows} \ntotal columns: {n_columns} \ntotal data: {total_data}')
df.info()
df.isnull().sum()
df.dropna(inplace=True)
def summary_stats(dataframe: DataFrame, numeric_only = True, style=True):
if numeric_only:
summary: DataFrame = dataframe.describe().T
summary['variance'] = dataframe.var(numeric_only=True)
summary = summary = summary if not style else summary.style.format("{:.2f}").\
background_gradient(cmap="Blues", axis=1, subset=summary.columns.drop("count"))
else:
summary = dataframe.describe(exclude="number")
return summary
numeric_cols_summary = summary_stats(df, style=True)
category_cols_summary = summary_stats(df, numeric_only=False)
display(numeric_cols_summary)
display(category_cols_summary)
numerical_cols = df.select_dtypes(include=['float64']).columns
sns.set(style='whitegrid')
colors = sns.color_palette("husl", len(numerical_cols))
plt.figure(figsize=(16, 12))
for i, (col, color) in enumerate(zip(numerical_cols, colors), 1):
plt.subplot(3, 3, i)
sns.boxplot(data=df[col], palette=[color])
plt.title(f'Boxplot of {col}')
plt.tight_layout()
plt.show()
def find_outliers(column):
Q1, Q3 = df[column].quantile([0.25, 0.75])
IQR = Q3 - Q1
lower_bound, upper_bound = Q1 - 1.5 * IQR, Q3 + 1.5 * IQR
outliers = df[(df[column] < lower_bound) | (df[column] > upper_bound)].copy()
outliers['Column'] = column
return outliers
all_outliers = pd.concat([find_outliers(col) for col in numerical_cols])
outliers_count = all_outliers.groupby('Column').size().reset_index(name='OutliersCount')
sns.set(style='whitegrid')
plt.figure(figsize=(12, 8))
bar_plot = sns.barplot(x='Column', y='OutliersCount', data=outliers_count, palette='viridis')
plt.title('Count of Outliers in Each Numerical Column')
plt.xlabel('Numerical Column')
plt.ylabel('Outliers Count')
plt.xticks(rotation=45, ha='right')
for p in bar_plot.patches:
bar_plot.annotate(f'{int(p.get_height())}', (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', xytext=(0, 10), textcoords='offset points', fontsize=17, color='black')
plt.show()
columns_have_outliers = ['Work_accident', 'promotion_last_5years', 'time_spend_company']
def remove_outliers(df, column):
Q1, Q3 = df[column].quantile([0.25, 0.75])
IQR = Q3 - Q1
lower_bound, upper_bound = Q1 - 1.5 * IQR, Q3 + 1.5 * IQR
return df[(df[column] >= lower_bound) & (df[column] <= upper_bound)]
df_no_outliers = df.copy()
for col in columns_have_outliers:
df_no_outliers = remove_outliers(df_no_outliers, col)
print(f'Shape before removing outliers: {df.shape}')
print(f'Shape after removing outliers: {df_no_outliers.shape}')
plt.figure(figsize=(12, 8))
for i, col in enumerate(columns_have_outliers, 1):
plt.subplot(2, 2, i)
sns.boxplot(x=df_no_outliers[col])
plt.title(f'Boxplot of {col}')
plt.tight_layout()
plt.show()
sns.set(style='whitegrid')
plt.figure(figsize=(14, 6))
plt.subplot(1, 2, 1)
ax1 = sns.countplot(x='dept', data=df, palette='viridis')
plt.title('Count Plot of Departments')
plt.xticks(rotation=45, ha='right')
for p in ax1.patches:
ax1.annotate(f'{int(p.get_height())}', (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='baseline', fontsize=12, color='black')
plt.subplot(1, 2, 2)
ax2 = sns.countplot(x='salary', data=df, palette='magma')
plt.title('Count Plot of Salary Levels')
for p in ax2.patches:
ax2.annotate(f'{int(p.get_height())}', (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='baseline', fontsize=12, color='black')
plt.tight_layout()
plt.show()
salary_summary = df.groupby('salary').agg({
'satisfaction_level': 'mean',
'last_evaluation': 'mean',
'number_project': 'mean',
'average_montly_hours': 'mean',
'time_spend_company': 'mean',
'Work_accident': 'mean',
'promotion_last_5years': 'mean',
}).reset_index()
visible_columns = ['salary', 'satisfaction_level', 'last_evaluation', 'number_project', 'average_montly_hours', 'time_spend_company', 'Work_accident', 'promotion_last_5years']
fig = go.Figure(data=[go.Table(
header=dict(values=list(salary_summary[visible_columns].columns)),
cells=dict(values=[salary_summary[visible_columns][col].round(3) if col != 'salary' else salary_summary[visible_columns][col] for col in salary_summary[visible_columns].columns]))])
fig.update_layout(
title='Salary Summary',
height=300
)
fig.show()
sns.set(style='whitegrid')
plt.figure(figsize=(10, 6))
sns.countplot(x='dept', hue='salary', data=df)
plt.xticks(rotation=45, ha='right')
plt.title('Mode Department by Salary Category')
plt.xlabel('Department')
plt.ylabel('Count')
plt.show()
from sklearn.preprocessing import LabelEncoder
df_label_encoded = df.copy()
label_encoder = LabelEncoder()
df_label_encoded['salary'] = label_encoder.fit_transform(df['salary'])
df_label_encoded['dept'] = label_encoder.fit_transform(df['dept'])
print("Label Encoded DataFrame:")
print(df_label_encoded.head())
df_label_encoded.salary.unique()
correlation_matrix = df_label_encoded.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', fmt=".2f", linewidths=.5)
plt.title('Correlation Matrix')
plt.show()
X = df_label_encoded.drop(columns=['satisfaction_level'])
y = df_label_encoded['satisfaction_level']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = XGBRegressor(n_estimtators=2000, learning_rate=0.005, n_jobs=100)
model.fit(X_train,
y_train,
eval_set=[(X_test, y_test)],
verbose=False)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
mae
r2_score(y_test,y_pred)