 keras tutorialkeras import initializers my_init = initializers.Zeros() model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) Where, kernel_initializer represent Dense from keras import initializers my_init = initializers.Ones() model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) Constant Generates a constant value from keras import initializers my_init = initializers.Constant(value=0) model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) where, value represent the constant0 码力 | 98 页 | 1.57 MB | 1 年前3 keras tutorialkeras import initializers my_init = initializers.Zeros() model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) Where, kernel_initializer represent Dense from keras import initializers my_init = initializers.Ones() model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) Constant Generates a constant value from keras import initializers my_init = initializers.Constant(value=0) model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=my_init)) where, value represent the constant0 码力 | 98 页 | 1.57 MB | 1 年前3
 动手学深度学习 v2.0Miniconda3-py39_4.12.0-Linux-x86_64.sh -b 接下来,初始化终端Shell,以便我们可以直接运行conda。 ~/miniconda3/bin/conda init 现在关闭并重新打开当前的shell。并使用下面的命令创建一个新的环境: 8 https://conda.io/en/latest/miniconda.html 9 conda create ones([n]) 由于在本书中我们将频繁地进行运行时间的基准测试,所以我们定义一个计时器: 3.1. 线性回归 89 class Timer: #@save """记录多次运行时间""" def __init__(self): self.times = [] self.start() def start(self): """启动计时器""" self.tik = time.time() def 别用于存储正确预测的数量和预测的总数量。当我们遍历数据集 时,两者都将随着时间的推移而累加。 class Accumulator: #@save """在n个变量上累加""" def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self0 码力 | 797 页 | 29.45 MB | 1 年前3 动手学深度学习 v2.0Miniconda3-py39_4.12.0-Linux-x86_64.sh -b 接下来,初始化终端Shell,以便我们可以直接运行conda。 ~/miniconda3/bin/conda init 现在关闭并重新打开当前的shell。并使用下面的命令创建一个新的环境: 8 https://conda.io/en/latest/miniconda.html 9 conda create ones([n]) 由于在本书中我们将频繁地进行运行时间的基准测试,所以我们定义一个计时器: 3.1. 线性回归 89 class Timer: #@save """记录多次运行时间""" def __init__(self): self.times = [] self.start() def start(self): """启动计时器""" self.tik = time.time() def 别用于存储正确预测的数量和预测的总数量。当我们遍历数据集 时,两者都将随着时间的推移而累加。 class Accumulator: #@save """在n个变量上累加""" def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self0 码力 | 797 页 | 29.45 MB | 1 年前3
 【PyTorch深度学习-龙龙老师】-测试版202112x 版本运行 # 创建运行环境 sess = tf.InteractiveSession() # 初始化步骤也需要作为操作运行 init = tf.global_variables_initializer() sess.run(init) # 运行初始化操作,完成初始化 # 运行输出端子,需要给输入端子赋值 c_numpy = sess.run(c_op, feed_dict={a_ph: from torch import optim class MyNetwork(nn.Module): def __init__(self): super(MyNetwork, self).__init__() # 创建 3 个全连接层 预览版202112 6.8 汽车油耗预测实战 25 self figure('himmelblau') ax = fig.gca(projection='3d') # 设置 3D 坐标轴 ax.plot_surface(X, Y, Z) # 3D 曲面图 ax.view_init(60, -30) ax.set_xlabel('x') ax.set_ylabel('y') plt.show() 预览版202112 第 7 章 反向传播算法 200 码力 | 439 页 | 29.91 MB | 1 年前3 【PyTorch深度学习-龙龙老师】-测试版202112x 版本运行 # 创建运行环境 sess = tf.InteractiveSession() # 初始化步骤也需要作为操作运行 init = tf.global_variables_initializer() sess.run(init) # 运行初始化操作,完成初始化 # 运行输出端子,需要给输入端子赋值 c_numpy = sess.run(c_op, feed_dict={a_ph: from torch import optim class MyNetwork(nn.Module): def __init__(self): super(MyNetwork, self).__init__() # 创建 3 个全连接层 预览版202112 6.8 汽车油耗预测实战 25 self figure('himmelblau') ax = fig.gca(projection='3d') # 设置 3D 坐标轴 ax.plot_surface(X, Y, Z) # 3D 曲面图 ax.view_init(60, -30) ax.set_xlabel('x') ax.set_ylabel('y') plt.show() 预览版202112 第 7 章 反向传播算法 200 码力 | 439 页 | 29.91 MB | 1 年前3
 全连接神经网络实战. pytorch 版中要想使用神经网络,需要继承 nn.Module: c l a s s NeuralNetwork (nn . Module ) : def __init__( s e l f ) : super ( NeuralNetwork , s e l f ) . __init__ () # 把 数 组 降 到1 维 s e l f . f l a t t e n = nn . Flatten () # 定 初始化网络权重-方法一 我们通过自定义初始化函数,来实现对网络参数的初始化。有时候,好的初始化可以为网络 的训练带来极大好处。 在 NeuralNetwork 内部定义函数: def weight_init ( s e l f ) : #遍 历 网 络 的 每 一 层 fo r m in s e l f . modules () : #如 果 该 层 是 线 性 连 接 层 i f i s i shape 是下 一层神经元个数。调用也很简单,定义网络对象后直接调用即可: model = NeuralNetwork () . to ( device ) model . weight_init () 我们开始训练,发现第一个 epoch 训练的结果正确率就达到了 78%,而最终训练结果能达到 百分之 81%。说明合理地初始化权重具有很重要的意义。 如果 weight 全都初始化成同一个值,例如:0 码力 | 29 页 | 1.40 MB | 1 年前3 全连接神经网络实战. pytorch 版中要想使用神经网络,需要继承 nn.Module: c l a s s NeuralNetwork (nn . Module ) : def __init__( s e l f ) : super ( NeuralNetwork , s e l f ) . __init__ () # 把 数 组 降 到1 维 s e l f . f l a t t e n = nn . Flatten () # 定 初始化网络权重-方法一 我们通过自定义初始化函数,来实现对网络参数的初始化。有时候,好的初始化可以为网络 的训练带来极大好处。 在 NeuralNetwork 内部定义函数: def weight_init ( s e l f ) : #遍 历 网 络 的 每 一 层 fo r m in s e l f . modules () : #如 果 该 层 是 线 性 连 接 层 i f i s i shape 是下 一层神经元个数。调用也很简单,定义网络对象后直接调用即可: model = NeuralNetwork () . to ( device ) model . weight_init () 我们开始训练,发现第一个 epoch 训练的结果正确率就达到了 78%,而最终训练结果能达到 百分之 81%。说明合理地初始化权重具有很重要的意义。 如果 weight 全都初始化成同一个值,例如:0 码力 | 29 页 | 1.40 MB | 1 年前3
 Machine Learning Pytorch Tutorial& Dataloader from torch.utils.data import Dataset, DataLoader class MyDataset(Dataset): def __init__(self, file): self.data = ... def __getitem__(self, index): return self.data[index] neural network import torch.nn as nn class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() self.net = nn.Sequential( nn.Linear(10, 32), neural network import torch.nn as nn class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() self.net = nn.Sequential( nn.Linear(10, 32),0 码力 | 48 页 | 584.86 KB | 1 年前3 Machine Learning Pytorch Tutorial& Dataloader from torch.utils.data import Dataset, DataLoader class MyDataset(Dataset): def __init__(self, file): self.data = ... def __getitem__(self, index): return self.data[index] neural network import torch.nn as nn class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() self.net = nn.Sequential( nn.Linear(10, 32), neural network import torch.nn as nn class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() self.net = nn.Sequential( nn.Linear(10, 32),0 码力 | 48 页 | 584.86 KB | 1 年前3
 AI大模型千问 qwen 中文文档for supervised fine-tuning.""" def __init__( self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int ): super(SupervisedDataset, self).__init__() rank0_print("Formatting inputs.. for supervised fine-tuning.""" def __init__( self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int ): super(LazySupervisedDataset, self).__init__() self.tokenizer = tokenizer self = 0.01 top_p = 0.9 history_len: int = 3 (续下页) 44 Chapter 1. 文档 Qwen (接上页) def __init__(self): super().__init__() @property def _llm_type(self) -> str: return "Qwen" @property def _history_len(self)0 码力 | 56 页 | 835.78 KB | 1 年前3 AI大模型千问 qwen 中文文档for supervised fine-tuning.""" def __init__( self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int ): super(SupervisedDataset, self).__init__() rank0_print("Formatting inputs.. for supervised fine-tuning.""" def __init__( self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int ): super(LazySupervisedDataset, self).__init__() self.tokenizer = tokenizer self = 0.01 top_p = 0.9 history_len: int = 3 (续下页) 44 Chapter 1. 文档 Qwen (接上页) def __init__(self): super().__init__() @property def _llm_type(self) -> str: return "Qwen" @property def _history_len(self)0 码力 | 56 页 | 835.78 KB | 1 年前3
 Keras: 基于 Python 的深度学习库子类写的简单的多层感知器的例子: import keras class SimpleMLP(keras.Model): def __init__(self, use_bn=False, use_dp=False, num_classes=10): super(SimpleMLP, self).__init__(name='mlp') self.use_bn = use_bn self.use_dp = use_dp self.bn(x) return self.dense2(x) model = SimpleMLP() model.compile(...) model.fit(...) 网络层定义在 __init__(self, ...) 中,前向传播在 call(self, inputs) 中指定。在 call 中,你可以指定自定义的损失函数,通过调用 self.add_loss(loss_tensor) class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self0 码力 | 257 页 | 1.19 MB | 1 年前3 Keras: 基于 Python 的深度学习库子类写的简单的多层感知器的例子: import keras class SimpleMLP(keras.Model): def __init__(self, use_bn=False, use_dp=False, num_classes=10): super(SimpleMLP, self).__init__(name='mlp') self.use_bn = use_bn self.use_dp = use_dp self.bn(x) return self.dense2(x) model = SimpleMLP() model.compile(...) model.fit(...) 网络层定义在 __init__(self, ...) 中,前向传播在 call(self, inputs) 中指定。在 call 中,你可以指定自定义的损失函数,通过调用 self.add_loss(loss_tensor) class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self0 码力 | 257 页 | 1.19 MB | 1 年前3
 《Efficient Deep Learning Book》[EDL] Chapter 7 - Automationrepresent the convolution strides. Normal cells use stride=1 and reduction cells use stride=2 """ def __init__(self, stride=1, channels=64): self.channels = channels self.stride = stride self.kwargs = dict(strides=(1 channels double with each layer as we move towards the top of the network. class ChildManager(): def __init__(self): self.tds = CHILD_PARAMS['train_ds'].shuffle( 500, reshuffle_each_iteration=True ).bat to accelerate the search in the directions which yield higher rewards. class Controller(): def __init__(self): self.rnn = self.make_rnn() self.store = dict(children=[], rewards=[], accuracies=[]) self0 码力 | 33 页 | 2.48 MB | 1 年前3 《Efficient Deep Learning Book》[EDL] Chapter 7 - Automationrepresent the convolution strides. Normal cells use stride=1 and reduction cells use stride=2 """ def __init__(self, stride=1, channels=64): self.channels = channels self.stride = stride self.kwargs = dict(strides=(1 channels double with each layer as we move towards the top of the network. class ChildManager(): def __init__(self): self.tds = CHILD_PARAMS['train_ds'].shuffle( 500, reshuffle_each_iteration=True ).bat to accelerate the search in the directions which yield higher rewards. class Controller(): def __init__(self): self.rnn = self.make_rnn() self.store = dict(children=[], rewards=[], accuracies=[]) self0 码力 | 33 页 | 2.48 MB | 1 年前3
 《Efficient Deep Learning Book》[EDL] Chapter 5 - Advanced Compression Techniques""" # Pick initial centroids that are evenly spaced. x_sorted = np.sort(x.flatten()) centroids_init = np.linspace(x_sorted[0], x_sorted[-1], num_clusters) # Construct the variables in this optimization going to be updated, thus they will be trainable. centroids_var = tf.Variable(initial_value=centroids_init, trainable=True) opt = tf.keras.optimizers.SGD(learning_rate=learning_rate) for step_idx in range(num_steps): keras.CentroidInitialization clustering_params = { 'number_of_clusters': 16, 'cluster_centroids_init': CentroidInitialization.LINEAR } # Cluster a whole model clustered_model = cluster_weights(model_wm_100 码力 | 34 页 | 3.18 MB | 1 年前3 《Efficient Deep Learning Book》[EDL] Chapter 5 - Advanced Compression Techniques""" # Pick initial centroids that are evenly spaced. x_sorted = np.sort(x.flatten()) centroids_init = np.linspace(x_sorted[0], x_sorted[-1], num_clusters) # Construct the variables in this optimization going to be updated, thus they will be trainable. centroids_var = tf.Variable(initial_value=centroids_init, trainable=True) opt = tf.keras.optimizers.SGD(learning_rate=learning_rate) for step_idx in range(num_steps): keras.CentroidInitialization clustering_params = { 'number_of_clusters': 16, 'cluster_centroids_init': CentroidInitialization.LINEAR } # Cluster a whole model clustered_model = cluster_weights(model_wm_100 码力 | 34 页 | 3.18 MB | 1 年前3
 PyTorch Release Notesdefault process group via: torch.distributed.init_process_group(backend="ucc", kwargs) or a side process group with any default via: torch.distributed.init_process_group(backend=any_backend, default_pg_kwargs) def init_bn(module): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if module.affine: module.weight.data.uniform_() for child in module.children(): init_bn(child) def init_bn(module): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if module.affine: module.weight.data.uniform_() for child in module.children(): init_bn(child)0 码力 | 365 页 | 2.94 MB | 1 年前3 PyTorch Release Notesdefault process group via: torch.distributed.init_process_group(backend="ucc", kwargs) or a side process group with any default via: torch.distributed.init_process_group(backend=any_backend, default_pg_kwargs) def init_bn(module): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if module.affine: module.weight.data.uniform_() for child in module.children(): init_bn(child) def init_bn(module): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if module.affine: module.weight.data.uniform_() for child in module.children(): init_bn(child)0 码力 | 365 页 | 2.94 MB | 1 年前3
共 19 条
- 1
- 2













