def stochastic_gradient_descent(feature_matrix, label,
learning_rate = 0.05, epoch = 1000):
"""
Implement gradient descent algorithm for regression.
Args:
feature_matrix - A numpy matrix describing the given data, with
ones added as the first column. Each row
represents a single data point.
label - The correct value of response variable, corresponding to
feature_matrix.
learning_rate - the learning rate with default value 0.5
epoch - the number of iterations with default value 1000
Returns: A numpy array for the final value of theta
"""
n = len(label)
theta = np.zeros(feature_matrix.shape[1]) # initialize theta to be
zero vector
for i in range(epoch):
# your code below
# generate a random integer between 0 and n
# compute gradient at this randomly selected feature vector
below
# update theta below
# compute average squared error or empirical risk or value of
cost function
# It is not necessary to comput cost here. But it is common to use
cost
# in the termination condition of the loop
# your code above
# test
# print(i, theta, cost)
return theta
raise NotImplementedError
import numpy as np
feature_matrix=np.array([310,270,350,251,410,308,263])
mean=feature_matrix.mean()
std=feature_matrix.std()
feature_matrix=(feature_matrix-mean)/std
feature_matrix=np.c_[np.ones(np.size(feature_matrix)),feature_matrix]
label=np.array([50,43,46,33,44,40,41])
learning_rate=0.01
m=len(label)
theta=np.random.rand(2)
def Grad_dec(feature_matrix,label,m,learning_rate,theta):
for i in range(1000):
p=np.dot(feature_matrix,theta)
error=p-label
cost=sum(error**2)/(2*m)
theta=theta-(learning_rate/m)*np.dot(feature_matrix.T,error)
return cost,theta
cost,theta=Grad_dec(feature_matrix,label,m,learning_rate,theta)
print(theta)
test_data=int(input('Enter test value:'))
test_data=(test_data-mean)/std
test_data=np.array([1,test_data])
print(round(np.dot(test_data.T,theta),4))
def stochastic_gradient_descent(feature_matrix, label, learning_rate = 0.05, epoch = 1000): """ Implement gradient descent algorithm for regression....
def gradient_descent(feature_matrix, label, learning_rate = 0.05, epoch = 1000): """ Implement gradient descent algorithm for regression. Args: feature_matrix - A numpy matrix describing the given data, with ones added as the first column. Each row represents a single data point. label - The correct value of response variable, corresponding to feature_matrix. learning_rate - the learning rate with default value 0.5 epoch - the number of iterations with default value 1000 Returns: A numpy array for the...
Python. Just work in the def sierpinski. No output needed. Will give thumbs up for any attempt beginning this code. Your task is to implement this algorithm in Python, returning a random collection of inum-100, 000 points. You should then plot the points to see the structure. Please complete the following function: def sierpinski (po, v, f, inum) The four arguments are ·po the initial point. You may assume this is the origin, i.e., po = [0, 0] . v:...