diff --git a/Rahul Srivastava_ML/Linear Regression.ipynb b/Rahul Srivastava_ML/Linear Regression.ipynb
new file mode 100644
index 000000000..b386af22e
--- /dev/null
+++ b/Rahul Srivastava_ML/Linear Regression.ipynb
@@ -0,0 +1,540 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Boston=pd.read_csv('Boston.csv')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " Unnamed: 0 | \n",
+ " crim | \n",
+ " zn | \n",
+ " indus | \n",
+ " chas | \n",
+ " nox | \n",
+ " rm | \n",
+ " age | \n",
+ " dis | \n",
+ " rad | \n",
+ " tax | \n",
+ " ptratio | \n",
+ " black | \n",
+ " lstat | \n",
+ " medv | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " 1 | \n",
+ " 0.00632 | \n",
+ " 18.0 | \n",
+ " 2.31 | \n",
+ " 0 | \n",
+ " 0.538 | \n",
+ " 6.575 | \n",
+ " 65.2 | \n",
+ " 4.0900 | \n",
+ " 1 | \n",
+ " 296 | \n",
+ " 15.3 | \n",
+ " 396.90 | \n",
+ " 4.98 | \n",
+ " 24.0 | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " 2 | \n",
+ " 0.02731 | \n",
+ " 0.0 | \n",
+ " 7.07 | \n",
+ " 0 | \n",
+ " 0.469 | \n",
+ " 6.421 | \n",
+ " 78.9 | \n",
+ " 4.9671 | \n",
+ " 2 | \n",
+ " 242 | \n",
+ " 17.8 | \n",
+ " 396.90 | \n",
+ " 9.14 | \n",
+ " 21.6 | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " 3 | \n",
+ " 0.02729 | \n",
+ " 0.0 | \n",
+ " 7.07 | \n",
+ " 0 | \n",
+ " 0.469 | \n",
+ " 7.185 | \n",
+ " 61.1 | \n",
+ " 4.9671 | \n",
+ " 2 | \n",
+ " 242 | \n",
+ " 17.8 | \n",
+ " 392.83 | \n",
+ " 4.03 | \n",
+ " 34.7 | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " 4 | \n",
+ " 0.03237 | \n",
+ " 0.0 | \n",
+ " 2.18 | \n",
+ " 0 | \n",
+ " 0.458 | \n",
+ " 6.998 | \n",
+ " 45.8 | \n",
+ " 6.0622 | \n",
+ " 3 | \n",
+ " 222 | \n",
+ " 18.7 | \n",
+ " 394.63 | \n",
+ " 2.94 | \n",
+ " 33.4 | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " 5 | \n",
+ " 0.06905 | \n",
+ " 0.0 | \n",
+ " 2.18 | \n",
+ " 0 | \n",
+ " 0.458 | \n",
+ " 7.147 | \n",
+ " 54.2 | \n",
+ " 6.0622 | \n",
+ " 3 | \n",
+ " 222 | \n",
+ " 18.7 | \n",
+ " 396.90 | \n",
+ " 5.33 | \n",
+ " 36.2 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " Unnamed: 0 crim zn indus chas nox rm age dis rad \\\n",
+ "0 1 0.00632 18.0 2.31 0 0.538 6.575 65.2 4.0900 1 \n",
+ "1 2 0.02731 0.0 7.07 0 0.469 6.421 78.9 4.9671 2 \n",
+ "2 3 0.02729 0.0 7.07 0 0.469 7.185 61.1 4.9671 2 \n",
+ "3 4 0.03237 0.0 2.18 0 0.458 6.998 45.8 6.0622 3 \n",
+ "4 5 0.06905 0.0 2.18 0 0.458 7.147 54.2 6.0622 3 \n",
+ "\n",
+ " tax ptratio black lstat medv \n",
+ "0 296 15.3 396.90 4.98 24.0 \n",
+ "1 242 17.8 396.90 9.14 21.6 \n",
+ "2 242 17.8 392.83 4.03 34.7 \n",
+ "3 222 18.7 394.63 2.94 33.4 \n",
+ "4 222 18.7 396.90 5.33 36.2 "
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Boston.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "y=Boston[['medv']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x=Boston[['crim']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.linear_model import LinearRegression"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.model_selection import train_test_split"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lr=LinearRegression()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr.fit(x_train,y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "y_pred=lr.predict(x_test)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " medv | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 293 | \n",
+ " 23.9 | \n",
+ "
\n",
+ " \n",
+ " | 309 | \n",
+ " 20.3 | \n",
+ "
\n",
+ " \n",
+ " | 50 | \n",
+ " 19.7 | \n",
+ "
\n",
+ " \n",
+ " | 311 | \n",
+ " 22.1 | \n",
+ "
\n",
+ " \n",
+ " | 386 | \n",
+ " 10.5 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " medv\n",
+ "293 23.9\n",
+ "309 20.3\n",
+ "50 19.7\n",
+ "311 22.1\n",
+ "386 10.5"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "y_test.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[23.71121368],\n",
+ " [23.61076456],\n",
+ " [23.70892416],\n",
+ " [23.44469495],\n",
+ " [14.55644792]])"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "y_pred[0:5]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.metrics import mean_squared_error"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "72.33207099237366"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "mean_squared_error(y_test,y_pred)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x=Boston[['lstat']]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.4)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr.fit(x_train,y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "y_pred=lr.predict(x_test)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " medv | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 25 | \n",
+ " 13.9 | \n",
+ "
\n",
+ " \n",
+ " | 133 | \n",
+ " 18.4 | \n",
+ "
\n",
+ " \n",
+ " | 263 | \n",
+ " 31.0 | \n",
+ "
\n",
+ " \n",
+ " | 51 | \n",
+ " 20.5 | \n",
+ "
\n",
+ " \n",
+ " | 66 | \n",
+ " 19.4 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " medv\n",
+ "25 13.9\n",
+ "133 18.4\n",
+ "263 31.0\n",
+ "51 20.5\n",
+ "66 19.4"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "y_test.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/Rahul Srivastava_ML/module2.md b/Rahul Srivastava_ML/module2.md
new file mode 100644
index 000000000..70e7a8c93
--- /dev/null
+++ b/Rahul Srivastava_ML/module2.md
@@ -0,0 +1,76 @@
+# Machine Learning
+It is a type of artificial intelligence(AI) that allows software applications to become more accurate at predicting outcomes without being explictly programmed to do so.Basocally,
+it uses historical data as input to predict new output values.
+
+## Types of Machine Learning
+There are three types of Machine learning :-
+`1` Supervised Learning
+`2` Unsupervised Learning
+`3` Reinforcement Learning
+
+## Supervised Learning
+It is an approach to creating AI , where a computer algorithm is trained on input data that has been labeled for a particular output.
+There are two types of Supervised learning :-
+`1` Regression
+`2` Classification
+
+### Regression
+Regression in machine learning consists of mathematical methods that allow data scientists to predict a continuous outcome(Y) based on the value of one or more predictor variables(X).
+There are two types of Regression :-
+`1` Linear Regression
+`2` Logistic Regression
+
+Linear Regression :- A Linear Regression is one of the easiest algorithm in ML. is a stastical model that attempts to indicate the link between the 2 variable,input(X) & output(Y) with the equation.The Input variable is called the Independent Variable and the Output variable is called the Dependent Variable.When unseen data is passed to the algorithm, it uses the function, calculates and maps the input to a continuous value for the output.
+
+Logistic Regression :- Logistic Regression could be a method wont to predict a variable,given a collection of independent variables,such that the variable is categorical.It does the prediction by mapping the unseen data to the logit function that has been programmed into it. The algorithm predicts the probability of the new data then it’s output lies between the range of 0 and 1.
+
+### Classification
+classification refers to a predictive modeling problem where a class label is predicted for a given example of input data.
+Some classifications algorithm are :-
+`1` Decision tree
+`2` Logistic Regression
+`3` Artificial Neural Network
+`4` Random Forest
+`5` Stochastic Gradient Descent
+`6` Naive Bayes
+`7` Support Vector Machine
+`8` K-Nearest Neighbor
+
+## Unsupervised Learning
+This type of machine learning involves algorithms that train on unlabeled data. The algorithm scans through data sets looking for any meaningful connection.
+
+### Clustering :-
+Clustering is a Machine Learning technique that involves the grouping of data points.In theory, data points that are in the same group should have similar properties and/or features, while data points in different groups should have highly dissimilar properties and/or features. Clustering is a method of unsupervised learning and is a common technique for statistical data analysis used in many fields.
+Some clustering algorithm are :-
+`1` k-means clustering
+`2` Mean-Shift Clustering
+`3` Density-Based Spatial Clustering of Applications with Noise
+`4` Expectation–Maximization (EM) Clustering using Gaussian Mixture Models
+`5` Agglomerative Hierarchical Clustering
+
+## Reinforcement Learning
+Reinforcement learning is the training of machine learning models to make a sequence of decisions. The agent learns to achieve a goal in an uncertain, potentially complex environment. In reinforcement learning, an artificial intelligence faces a game-like situation. Its goal is to maximize the total reward.
+
+
+## Perfomance Measure
+`1` Confusion Matrix :- A Confusion matrix is an N x N matrix used for evaluating the performance of a classification model, where N is the number of target classes. The matrix compares the actual target values with those predicted by the machine learning model.
+
+`2` Accuracy :- It is one of the most simple and common classification metrics. It is determined by the total number of correct predictions divided by total number of predictions made for a dataset.
+ total number of correct predictions
+Accuracy = ----------------------------------------
+ total number of predictions
+
+`3` Precison :- In terms of confusion matrix, precision is defined as the ratio of True Positives to all the positives predicted by the model.
+ TP
+Precision = --------------
+ TP + FP
+
+`4` Recall :- This is defined as the ratio of True Positives to all the positives in your dataset.
+ TP
+Recall = -----------
+ TP + FN
+
+
+
+### Contributor
+Rahul Srivastava