Program
function[]=forward(X,Y,x0)
n=length(X)
h=X(2)-X(1);
for i=1:n
Z(i)=Y(i);
D(i,1)=Y(i);
end
for i=1:n-1
for j=1:n-i
d(i,j)=Z(j+1)-Z(j);
D(i,1)=d(i,j);
end
for k=1:n-i
Z(k)=d(i,k)
end
end
printf('difference table is')
disp(D)
y_x=Y(1);
p=(x0-X(1))/h;
for i=1:n-1
pp=1;
for j=1:i
pp=pp*(p-(j-1));
end
y_x=y_x+((pp*d(i,i))/factorial(i))
end
printf('value of function at %.4f is %.4f',x0,y_x)
endfunction
Output:
-->X=(100:50:300);
-->Y=[958 917 865 799 712];
-->forward(X,Y,125)
difference table is
- 87.
- 21.
- 7.
- 4.
712.
value of function at 125.0000 is 939.2500
function[]=forward(X,Y,x0)
n=length(X)
h=X(2)-X(1);
for i=1:n
Z(i)=Y(i);
D(i,1)=Y(i);
end
for i=1:n-1
for j=1:n-i
d(i,j)=Z(j+1)-Z(j);
D(i,1)=d(i,j);
end
for k=1:n-i
Z(k)=d(i,k)
end
end
printf('difference table is')
disp(D)
y_x=Y(1);
p=(x0-X(1))/h;
for i=1:n-1
pp=1;
for j=1:i
pp=pp*(p-(j-1));
end
y_x=y_x+((pp*d(i,i))/factorial(i))
end
printf('value of function at %.4f is %.4f',x0,y_x)
endfunction
Output:
-->X=(100:50:300);
-->Y=[958 917 865 799 712];
-->forward(X,Y,125)
difference table is
- 87.
- 21.
- 7.
- 4.
712.
value of function at 125.0000 is 939.2500
15 Comments
Import numpy as np
ReplyDeleteImport pandas as pd
From sklearn.model_selection import train_test_split
From sklearn.linear_model import LinearRegression
# Create the Position_Salaries dataset
Data = {‘Position’: [‘CEO’, ‘charman’, ‘director’, ‘Senior Manager’, ‘Junior Manager’, ‘Intern’],
‘Level’: [1, 2, 3, 4, 5, 6],
‘Salary’: [50000, 80000, 110000, 150000, 200000, 250000]}
Df = pd.DataFrame(data)
# Identify the independent and target variables
X = df.iloc[:, 1:2].values
Y = df.iloc[:, 2].values
# Split the variables into training and testing sets with a 7:3 ratio
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Print the training and testing sets
Print(“X_train:\n”, X_train)
Print(“y_train:\n”, y_train)
Print(“X_test:\n”, X_test)
Print(“y_test:\n”, y_test)
# Build a simple linear regression model
Regressor = LinearRegression()
Regressor.fit(X_train, y_train)
# Print the coefficients and intercept
Print(“Coefficients:”, regressor.coef_)
Print(“Intercept:”, regressor.intercept_)
SLIP 2
ReplyDeleteImport numpy as np
Import pandas as pd
From sklearn.model_selection import train_test_split
From sklearn.linear_model import LinearRegression
# Create the Salary dataset
Data = {‘YearsExperience’: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
‘Salary’: [50000, 60000, 70000, 80000, 90000, 100000, 110000, 120000, 130000, 140000]}
Df = pd.DataFrame(data)
# Identify the independent and target variables
X = df.iloc[:, 0:1].values
Y = df.iloc[:, 1].values
# Split the variables into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Print the training and testing sets
Print(“X_train:\n”, X_train)
Print(“y_train:\n”, y_train)
Print(“X_test:\n”, X_test)
Print(“y_test:\n”, y_test)
# Build a simple linear regression model
Regressor = LinearRegression()
Regressor.fit(X_train, y_train)
# Print the coefficients and intercept
Print(“Coefficients:”, regressor.coef_)
Print(“Intercept:”, regressor.intercept_)
SLIP 3
ReplyDeleteImport pandas as pd
Data = {‘User ID’: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
‘Gender’: [‘Male’, ‘Male’, ‘Female’, ‘Female’, ‘Male’, ‘Male’, ‘Female’, ‘Female’, ‘Male’, ‘Female’],
‘Age’: [19, 35, 26, 27, 19, 27, 32, 25, 33, 45],
‘Estimated Salary’: [19000, 20000, 43000, 57000, 76000, 58000, 82000, 32000, 69000, 65000],
‘Purchased’: [0, 0, 0, 1, 1, 0, 1, 0, 1, 1]}
Df = pd.DataFrame(data)
From sklearn.model_selection import train_test_split
X = df.iloc[:, 1:4].values
Y = df.iloc[:, 4].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
From sklearn.linear_model import LogisticRegression
Lr=LogisticRegression(random_state=0)
Lr.fit(X_train, y_train)
# Predict a single observation
Observation = [[0, 30, 87000]]
Prediction = Lr.predict(observation)
Print(prediction)
# Predict multiple observations
Observations = [[0, 30, 87000], [1, 50, 45000], [1, 22, 30000]]
Predictions = Lr.predict(observations)
Print(predictions)
SLIP 4
ReplyDeleteImport pandas as pd
Import random
From sklearn.linear_model import LinearRegression
# create the dataset
Fish_species = [‘Tuna’, ‘Salmon’, ‘Trout’, ‘Bass’, ‘Sardine’, ‘Cod’, ‘Mackerel’]
Weights = []
For i in range(50):
Fish_weight = []
For j in range(7):
Weight = random.randint(1, 20)
Fish_weight.append(weight)
Weights.append(fish_weight)
Df = pd.DataFrame(weights, columns=fish_species)
# create the linear regression model
X = df.iloc[:, :-1] # independent variables
Y = df.iloc[:, -1] # target variable
Model = LinearRegression()
Model.fit(X, y)
# predict the weight of a new fish species
New_fish = [[10, 12, 15, 7, 4, 8]] # example input
Predicted_weight = model.predict(new_fish)
Print(“Predicted weight:”, predicted_weight)
SLIP 5
ReplyDeleteImport pandas as pd
From sklearn.datasets import load_iris
From sklearn.linear_model import LogisticRegression
From sklearn.model_selection import train_test_split
From sklearn.metrics import accuracy_score
# load the iris dataset
Iris = load_iris()
# create a dataframe from the dataset
Df = pd.DataFrame(iris.data, columns=iris.feature_names)
Df[‘target’] = iris.target
# view basic statistical details of the different species
Print(“Statistical details of Iris-setosa:”)
Print(df[df[‘target’]==0].describe())
Print(“Statistical details of Iris-versicolor:”)
Print(df[df[‘target’]==1].describe())
Print(“Statistical details of Iris-virginica:”)
Print(df[df[‘target’]==2].describe())
# split the data into training and testing sets
X = df.iloc[:,:-1]
Y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# fit a logistic regression model
Logreg = LogisticRegression()
Logreg.fit(X_train, y_train)
# make predictions on the test set
Y_pred = logreg.predict(X_test)
# calculate the accuracy of the model
Accuracy = accuracy_score(y_test, y_pred)
Print(“Accuracy of the logistic regression model:”, accuracy)
SLIP 6
ReplyDeleteImport pandas as pd
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# create the dataset
TID = {1:[“bread”,”milk”],2:[“bread”,”diaper”,”beer”,”eggs”],3:[“milk”,”diaper”,”beer”,”coke”],4:[“bread”,”milk”,”diaper”,”beer”],5:[“bread”,”milk”,”diaper”,”coke”]}
Transactions = []
For key, value in TID.items():
Transactions.append(value)
# convert the categorical values into numeric format
Te = TransactionEncoder()
Te_ary = te.fit_transform(transactions)
Df = pd.DataFrame(te_ary, columns=te.columns_)
# apply the apriori algorithm with different min_sup values
Min_sup_values = [0.2, 0.4, 0.6]
For min_sup in min_sup_values:
Frequent_itemsets = apriori(df, min_support=min_sup, use_colnames=True)
Rules = association_rules(frequent_itemsets, metric=”confidence”, min_threshold=0.7)
Print(“Min_sup:”, min_sup)
Print(“Frequent Itemsets:”)
Print(frequent_itemsets)
Print(“Association Rules:”)
Print(rules)
SLIP 7
ReplyDeleteImport pandas as pd
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# read the dataset
Df = pd.read_csv(‘Market_Basket_Optimisation.csv’, header=None)
# drop null values
Df.dropna(inplace=True)
# convert categorical values to numeric using one-hot encoding
Te = TransactionEncoder()
Te_ary = te.fit(df.values).transform(df.values)
Df = pd.DataFrame(te_ary, columns=te.columns_)
# generate frequent itemsets using apriori algorithm
Frequent_itemsets = apriori(df, min_support=0.01, use_colnames=True)
# generate association rules from frequent itemsets
Rules = association_rules(frequent_itemsets, metric=”lift”, min_threshold=1)
# display information
Print(“Original Dataset:\n”)
Print(df.head())
Print(“\nFrequent Itemsets:\n”)
Print(frequent_itemsets)
Print(“\nAssociation Rules:\n”)
Print(rules)
SLIP 8
ReplyDeleteImport pandas as pd
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# Load the dataset
Df = pd.read_csv(‘market_basket.csv’)
# Drop any rows with null values
Df.dropna(inplace=True)
# Convert categorical values to numeric format
Te = TransactionEncoder()
Te_ary = te.fit(df.values).transform(df.values)
Df = pd.DataFrame(te_ary, columns=te.columns_)
# Generate frequent itemsets
Frequent_itemsets = apriori(df, min_support=0.01, use_colnames=True)
# Generate association rules
Rules = association_rules(frequent_itemsets, metric=”lift”, min_threshold=1)
# Display information about the dataset
Print(“Dataset information:”)
Print(df.info())
# Display the frequent itemsets
Print(“\nFrequent itemsets:”)
Print(frequent_itemsets)
# Display the association rules
Print(“\nAssociation rules:”)
Print(rules)
SLIP 9
ReplyDeleteItems=[‘item1’,’item2’,’item3’,’item4’]
Transactions = [ [‘item1’, ‘item2’, ‘item3’],
[‘item2’, ‘item3’],
[‘item1’, ‘item2’, ‘item4’],
[‘item1’, ‘item4’],
[‘item2’, ‘item3’, ‘item4’],
[‘item1’, ‘item3’, ‘item4’],
[‘item1’, ‘item2’],
[‘item1’, ‘item3’],
[‘item3’, ‘item4’],
[‘item2’, ‘item4’]
]
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# Convert the transactions into a binary matrix
Te = TransactionEncoder()
Te_ary = te.fit_transform(transactions)
# Convert the binary matrix into a pandas DataFrame
Df = pd.DataFrame(te_ary, columns=te.columns_)
# Generate frequent itemsets with a minimum support of 0.3
Frequent_itemsets = apriori(df, min_support=0.3, use_colnames=True)
# Generate association rules with a minimum confidence of 0.7
Association_rules = association_rules(frequent_itemsets, metric=”confidence”, min_threshold=0.7)
# Print the frequent itemsets and association rules
Print(frequent_itemsets)
Print(association_rules)
SLIP 10
ReplyDeleteImport pandas as pd
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# Create the dataset
Dataset = {
1: [“eggs”,”milk”,”bread”],
2: [“eggs”,”apple”],
3: [“milk”,”bread”],
4: [“apple”,”milk”],
5: [“milk”,”apple”,”bread”]
}
# Convert categorical values into numeric format
Te = TransactionEncoder()
Te_ary = te.fit(dataset.values()).transform(dataset.values())
Df = pd.DataFrame(te_ary, columns=te.columns_)
# Apply Apriori algorithm to generate frequent itemsets and association rules
Min_sup = 0.4
Frequent_itemsets = apriori(df, min_support=min_sup, use_colnames=True)
Association_rules = association_rules(frequent_itemsets, metric=”confidence”, min_threshold=0.6)
# Print the frequent itemsets and association rules
Print(“Frequent Itemsets:\n”, frequent_itemsets)
Print(“\nAssociation Rules:\n”, association_rules)
SLIP 11
ReplyDeleteImport pandas as pd
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori, association_rules
# Creating the dataset
Dataset = [[‘butter’, ‘bread’, ‘milk’], [‘butter’, ‘flour’, ‘milk’, ‘sugar’], [‘butter’, ‘eggs’, ‘milk’, ‘salt’], [‘eggs’], [‘butter’, ‘flour’, ‘milk’, ‘salt’]]
Df = pd.DataFrame(dataset)
# Converting the categorical values into numeric format
Te = TransactionEncoder()
Te_ary = te.fit(dataset).transform(dataset)
Df = pd.DataFrame(te_ary, columns=te.columns_)
# Generating frequent itemsets using Apriori algorithm with different min_sup values
Min_sup_values = [0.4, 0.3, 0.2]
For min_sup in min_sup_values:
Frequent_itemsets = apriori(df, min_support=min_sup, use_colnames=True)
Print(“Frequent Itemsets with minimum support of”, min_sup)
Print(frequent_itemsets)
# Generating association rules
Rules = association_rules(frequent_itemsets, metric=”confidence”, min_threshold=0.7)
Print(“Association Rules with minimum support of”, min_sup)
Print(rules)
SLIP 12
ReplyDeleteImport numpy as np
Import pandas as pd
From sklearn.linear_model import LinearRegression
From sklearn.model_selection import train_test_split
# Create a random dataset with 10 samples
Heights = np.random.normal(170, 10, 10)
Weights = np.random.normal(70, 5, 10)
# Combine the two arrays into a single dataset
Dataset = pd.DataFrame({‘Height’: heights, ‘Weight’: weights})
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(dataset[‘Height’], dataset[‘Weight’], test_size=0.2, random_state=42)
# Create a Linear Regression model and fit it to the training data
Lr_model = LinearRegression()
Lr_model.fit(X_train.values.reshape(-1, 1), y_train)
# Print the model coefficients
Print(‘Model Coefficients:’, lr_model.coef_)
# Predict the weights for the test data and print the predictions
Y_pred = lr_model.predict(X_test.values.reshape(-1, 1))
Print(‘Predictions:’, y_pred)
SLIP 13
ReplyDeleteImport pandas as pd
Import numpy as np
From sklearn.model_selection import train_test_split
From sklearn.linear_model import LinearRegression
# Load the dataset
url = https://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.data
names = [‘parents’, ‘has_nurs’, ‘form’, ‘children’, ‘housing’, ‘finance’, ‘social’, ‘health’, ‘class’]
dataset = pd.read_csv(url, names=names)
# Identify independent and target variables
X = dataset.drop(‘class’, axis=1)
Y = dataset[‘class’]
# Convert categorical variables into numerical variables using one-hot encoding
X = pd.get_dummies(X)
# Split into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Build a linear regression model
Model = LinearRegression()
Model.fit(X_train, y_train)
# Print the coefficients of the model
Print(“Intercept: “, model.intercept_)
Print(“Coefficients: “, model.coef_)
# Predict the target variable for the testing set
Y_pred = model.predict(X_test)
# Evaluate the model using Mean Squared Error (MSE)
Mse = np.mean((y_test – y_pred) ** 2)
Print(“MSE: “, mse)
SLIP 14
ReplyDeleteTID={1:[“apple”,”mango”,”banana”],2=[“mango”,”banana”, “cabbage”,”carrots”],3=[“mango”,”banana”,carrots”],4=[“mango”,”carrots”]}AAAns:
From mlxtend.preprocessing import TransactionEncoder
From mlxtend.frequent_patterns import apriori
# Create the dataset
TID = {1:[“apple”,”mango”,”banana”],
2:[“mango”,”banana”,”cabbage”,”carrots”],
3:[“mango”,”banana”,”carrots”],
4:[“mango”,”carrots”]}
# Convert the categorical values into numeric format
Te = TransactionEncoder()
Te_ary = te.fit([TID[i] for i in TID]).transform([TID[i] for i in TID])
Df = pd.DataFrame(te_ary, columns=te.columns_)
# Apply the apriori algorithm with different min_sup values
Min_sup_values = [0.25, 0.5, 0.75]
For min_sup in min_sup_values:
Frequent_itemsets = apriori(df, min_support=min_sup, use_colnames=True)
Print(“Frequent itemsets with min_sup =”, min_sup)
Print(frequent_itemsets)
Print(“\n”)
SLIP 15
ReplyDeleteImport pandas as pd
# Create the dataset
Data = {‘No’: [1, 2, 3, 4],
‘Company’: [‘Tata’, ‘MG’, ‘Kia’, ‘Hyundai’],
‘Model’: [‘Nexon’, ‘Astor’, ‘Seltos’, ‘Creta’],
‘Year’: [2017, 2021, 2019, 2015]}
Df = pd.DataFrame(data)
# Convert categorical values into numeric format
Df[‘Company’] = pd.Categorical(df[‘Company’])
Df[‘Model’] = pd.Categorical(df[‘Model’])
Df[‘Company’] = df[‘Company’].cat.codes
Df[‘Model’] = df[‘Model’].cat.codes
Print(df)
From mlxtend.frequent_patterns import apriori
From mlxtend.frequent_patterns import association_rules
# Generate frequent itemsets with min_sup = 0.5
Frequent_itemsets = apriori(df, min_support=0.5, use_colnames=True)
Print(frequent_itemsets)
# Generate association rules with min_threshold = 0.7
Association_rules = association_rules(frequent_itemsets, metric=”confidence”, min_threshold=0.7)
Print(association_rules)