• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python functional.dropout函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.nn.functional.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dropout函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: forward

    def forward(self, input, format ='index'):
        if format == 'onehot':
            out = F.dropout(self.Linear(input), self.d, training=self.training)
        elif format == 'index':
            out = F.dropout(self.word_embed(input), self.d, training=self.training)

        return out
开发者ID:AashishV,项目名称:visDial.pytorch,代码行数:7,代码来源:model.py


示例2: forward

    def forward(self, inp, hidden=None, schedule=None, **kwargs):
        """
        Parameters:
        -----------
        inp: torch.Tensor (seq_len x batch_size)

        Returns:
        --------
        outs: torch.Tensor (seq_len * batch_size x vocab)
        hidden: see output of RNN, GRU, LSTM in torch.nn
        weights: None or list of weights (batch_size x seq_len),
            It will only be not None if attention is provided.
        """
        inp = word_dropout(
            inp, self.target_code, p=self.word_dropout,
            reserved_codes=self.reserved_codes, training=self.training)
        emb = self.embeddings(inp)
        if self.has_dropout:
            emb = F.dropout(emb, p=self.dropout, training=self.training)
        outs, hidden = self.rnn(emb, hidden or self.init_hidden_for(emb))
        if self.has_dropout:
            outs = F.dropout(outs, p=self.dropout, training=self.training)
        weights = None
        if self.add_attn:
            outs, weights = self.attn(outs, emb)
        seq_len, batch, hid_dim = outs.size()
        outs = outs.view(seq_len * batch, hid_dim)
        if self.add_deepout:
            outs = self.deepout(outs)
        outs = F.log_softmax(self.project(outs))
        return outs, hidden, weights
开发者ID:mikekestemont,项目名称:seqmod,代码行数:31,代码来源:lm.py


示例3: forward

 def forward(self, x):
     x = F.relu(self.linear1(x))
     x = F.dropout(x, 0.8)
     x = F.relu(self.linear2(x))
     x = F.dropout(x, 0.8)
     x = F.log_softmax(self.linear3(x))
     return x
开发者ID:zyoohv,项目名称:zyoohv.github.io,代码行数:7,代码来源:dnn.py


示例4: forward

    def forward(self, h_out, fake_region, conv_feat, conv_feat_embed):

        # View into three dimensions
        att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
        conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
        conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)

        # view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
        fake_region = self.fr_linear(fake_region)
        fake_region_embed = self.fr_embed(fake_region)

        h_out_linear = self.ho_linear(h_out)
        h_out_embed = self.ho_embed(h_out_linear)

        txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))

        img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
        img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)

        hA = F.tanh(img_all_embed + txt_replicate)
        hA = F.dropout(hA,self.drop_prob_lm, self.training)
        
        hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
        PI = F.softmax(hAflat.view(-1, att_size + 1))

        visAtt = torch.bmm(PI.unsqueeze(1), img_all)
        visAttdim = visAtt.squeeze(1)

        atten_out = visAttdim + h_out_linear

        h = F.tanh(self.att2h(atten_out))
        h = F.dropout(h, self.drop_prob_lm, self.training)
        return h
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:33,代码来源:AttModel.py


示例5: forward

 def forward(self, x):
     y = F.dropout(F.relu(self.linears[0](x)), self.training)
     for layer in self.linears[1:-1]:
         y = F.relu(layer(y))
         y = F.dropout(y, self.training)
     y = F.log_softmax(self.linears[-1](y))
     return y
开发者ID:spacy-io,项目名称:thinc,代码行数:7,代码来源:pytorch_mnist_mlp.py


示例6: _forward_unpadded

    def _forward_unpadded(self, x, x_mask):
        """Faster encoding that ignores any padding."""
        # Transpose batch and sequence dims
        x = x.transpose(0, 1)

        # Encode all layers
        outputs = [x]
        for i in range(self.num_layers):
            rnn_input = outputs[-1]

            # Apply dropout to hidden input
            if self.dropout_rate > 0:
                rnn_input = F.dropout(rnn_input,
                                      p=self.dropout_rate,
                                      training=self.training)
            # Forward
            rnn_output = self.rnns[i](rnn_input)[0]
            outputs.append(rnn_output)

        # Concat hidden layers
        if self.concat_layers:
            output = torch.cat(outputs[1:], 2)
        else:
            output = outputs[-1]

        # Transpose back
        output = output.transpose(0, 1)

        # Dropout on output layer
        if self.dropout_output and self.dropout_rate > 0:
            output = F.dropout(output,
                               p=self.dropout_rate,
                               training=self.training)
        return output
开发者ID:ahiroto,项目名称:ParlAI,代码行数:34,代码来源:layers.py


示例7: forward

    def forward(self, x, encoder_padding_mask):
        """
        Args:
            x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
            encoder_padding_mask (ByteTensor): binary ByteTensor of shape
                `(batch, src_len)` where padding elements are indicated by ``1``.

        Returns:
            encoded output of shape `(batch, src_len, embed_dim)`
        """
        residual = x
        x = self.maybe_layer_norm(0, x, before=True)
        x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = residual + x
        x = self.maybe_layer_norm(0, x, after=True)

        residual = x
        x = self.maybe_layer_norm(1, x, before=True)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, p=self.relu_dropout, training=self.training)
        x = self.fc2(x)
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = residual + x
        x = self.maybe_layer_norm(1, x, after=True)
        return x
开发者ID:fyabc,项目名称:fairseq,代码行数:26,代码来源:transformer.py


示例8: forward

    def forward(self, src_tokens):
        bsz, seqlen = src_tokens.size()
        num_layers = len(self.layers)

        # embed tokens
        x = self.embed_tokens(src_tokens)
        x = F.dropout(x, p=self.dropout_in, training=self.training)
        embed_dim = x.size(2)

        # B x T x C -> T x B x C
        x = x.transpose(0, 1)

        final_hiddens, final_cells = [], []
        outs = [x[j] for j in range(seqlen)]
        for i, rnn in enumerate(self.layers):
            hidden = Variable(x.data.new(bsz, embed_dim).zero_())
            cell = Variable(x.data.new(bsz, embed_dim).zero_())
            for j in range(seqlen):
                # recurrent cell
                hidden, cell = rnn(outs[j], (hidden, cell))

                # store the most recent hidden state in outs, either to be used
                # as the input for the next layer, or as the final output
                outs[j] = F.dropout(hidden, p=self.dropout_out, training=self.training)

            # save the final hidden and cell states for every layer
            final_hiddens.append(hidden)
            final_cells.append(cell)

        # collect outputs across time steps
        x = torch.cat(outs, dim=0).view(seqlen, bsz, embed_dim)
        final_hiddens = torch.cat(final_hiddens, dim=0).view(num_layers, bsz, embed_dim)
        final_cells = torch.cat(final_cells, dim=0).view(num_layers, bsz, embed_dim)

        return x, final_hiddens, final_cells
开发者ID:ahiroto,项目名称:ParlAI,代码行数:35,代码来源:lstm.py


示例9: forward

 def forward(self, x):
     x = x.view(-1, 28 * 28)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, p=0.8, training=self.training)
     x = F.relu(self.fc2(x))
     x = F.dropout(x, p=0.8, training=self.training)
     x = self.fc3(x)
     return x
开发者ID:uptodiff,项目名称:knowledge-distillation-pytorch,代码行数:8,代码来源:teacher_mnist.py


示例10: hidden_to_idx

 def hidden_to_idx(self, hidden, is_training=False):
     """Convert hidden state vectors into indices into the dictionary."""
     # dropout at each step
     e = F.dropout(self.h2e(hidden), p=self.dropout, training=is_training)
     scores = F.dropout(self.e2o(e), p=self.dropout, training=is_training)
     # skip zero (null_idx) when selecting a score
     _max_score, idx = scores.narrow(2, 1, scores.size(2) - 1).max(2)
     # add one back to index since we removed first option
     return idx.add_(1), scores
开发者ID:youlei5898,项目名称:ParlAI,代码行数:9,代码来源:seq2seq.py


示例11: forward

 def forward(self, inputs): # inputs (batch size, "sentence" length) bs,n
     embeds = self.embeddings(inputs) # bs,n,300
     embeds = embeds.view(-1,n*300) # bs,n*300
     out = F.tanh(self.h(embeds)) # bs,hidden_size
     out = self.u(F.dropout(out,p=dropout_rate)) # bs,|V|
     embeds = F.dropout(embeds,p=dropout_rate)
     out += self.w(embeds) # bs,|V|
     #out = F.softmax(out,dim=1)
     return out
开发者ID:anihamde,项目名称:cs287-s18,代码行数:9,代码来源:nnlm.py


示例12: forward

    def forward(self, input):
        x = F.leaky_relu(self.fc1(input), 0.2)
        x = F.dropout(x, 0.3)
        x = F.leaky_relu(self.fc2(x), 0.2)
        x = F.dropout(x, 0.3)
        x = F.leaky_relu(self.fc3(x), 0.2)
        x = F.dropout(x, 0.3)
        x = F.sigmoid(self.fc4(x))

        return x
开发者ID:KudoLayton,项目名称:pytorch-MNIST-CelebA-GAN-DCGAN,代码行数:10,代码来源:pytorch_MNIST_GAN.py


示例13: forward

 def forward(self, x):
     x = F.relu(self.conv1(x))      # 28x28x32 -> 26x26x32
     x = F.relu(self.conv2(x))      # 26x26x32 -> 24x24x64
     x = F.max_pool2d(x, 2) # 24x24x64 -> 12x12x64
     x = F.dropout(x, p=0.25, training=self.training)
     x = x.view(-1, 12*12*64)       # flatten 12x12x64 = 9216
     x = F.relu(self.fc1(x))        # fc 9216 -> 128
     x = F.dropout(x, p=0.5, training=self.training)
     x = self.fc2(x)                # fc 128 -> 10
     return F.log_softmax(x, dim=1) # to 10 logits
开发者ID:philferriere,项目名称:dlwin,代码行数:10,代码来源:mnist_cnn_pytorch.py


示例14: _forward_padded

    def _forward_padded(self, x, x_mask):
        """Slower (significantly), but more precise,
        encoding that handles padding."""
        # Compute sorted sequence lengths
        lengths = x_mask.data.eq(0).long().sum(1).squeeze()
        _, idx_sort = torch.sort(lengths, dim=0, descending=True)
        _, idx_unsort = torch.sort(idx_sort, dim=0)

        lengths = list(lengths[idx_sort])
        idx_sort = Variable(idx_sort)
        idx_unsort = Variable(idx_unsort)

        # Sort x
        x = x.index_select(0, idx_sort)

        # Transpose batch and sequence dims
        x = x.transpose(0, 1)

        # Pack it up
        rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)

        # Encode all layers
        outputs = [rnn_input]
        for i in range(self.num_layers):
            rnn_input = outputs[-1]

            # Apply dropout to input
            if self.dropout_rate > 0:
                dropout_input = F.dropout(rnn_input.data,
                                          p=self.dropout_rate,
                                          training=self.training)
                rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
                                                        rnn_input.batch_sizes)
            outputs.append(self.rnns[i](rnn_input)[0])

        # Unpack everything
        for i, o in enumerate(outputs[1:], 1):
            outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]

        # Concat hidden layers or take final
        if self.concat_layers:
            output = torch.cat(outputs[1:], 2)
        else:
            output = outputs[-1]

        # Transpose and unsort
        output = output.transpose(0, 1)
        output = output.index_select(0, idx_unsort)

        # Dropout on output layer
        if self.dropout_output and self.dropout_rate > 0:
            output = F.dropout(output,
                               p=self.dropout_rate,
                               training=self.training)
        return output
开发者ID:ahiroto,项目名称:ParlAI,代码行数:55,代码来源:layers.py


示例15: forward

 def forward(self, input, hidden): 
     # input is (sentence length, batch size) n,bs
     # hidden is ((n_layers,bs,hidden_size),(n_layers,bs,hidden_size))
     embeds = self.embedding(input) # n,bs,300
     # batch goes along the second dimension
     out = F.dropout(embeds,p=dropout_rate)
     out, hidden = self.lstm(out, hidden)
     out = F.dropout(out,p=dropout_rate)
     # apply the linear and the softmax
     out = self.linear(out) # n,bs,|V|
     #out = self.softmax(out)    # This was originally the output. (SG: I see this is LogSoftmax)
     return out, hidden
开发者ID:anihamde,项目名称:cs287-s18,代码行数:12,代码来源:lstm.py


示例16: _forward

    def _forward(self, input_tokens, positions, encoder_out):
        # split and transpose encoder outputs
        encoder_a, encoder_b = self._split_encoder_out(encoder_out)

        # embed tokens and positions
        x = self.embed_tokens(input_tokens) + self.embed_positions(positions)
        x = F.dropout(x, p=self.dropout, training=self.training)
        target_embedding = x

        # project to size of convolution
        x = self.fc1(x)

        # B x T x C -> T x B x C
        x = self._transpose_unless_incremental_eval(x)

        # temporal convolutions
        avg_attn_scores = None
        num_attn_layers = len(self.attention)
        for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):
            residual = x if proj is None else proj(x)

            x = F.dropout(x, p=self.dropout, training=self.training)
            x = conv(x)
            x = conv.remove_future_timesteps(x)
            x = F.glu(x)

            # attention
            if attention is not None:
                x = self._transpose_unless_incremental_eval(x)

                x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b))
                attn_scores = attn_scores / num_attn_layers
                if avg_attn_scores is None:
                    avg_attn_scores = attn_scores
                else:
                    avg_attn_scores.add_(attn_scores)

                x = self._transpose_unless_incremental_eval(x)

            # residual
            x = (x + residual) * math.sqrt(0.5)

        # T x B x C -> B x T x C
        x = self._transpose_unless_incremental_eval(x)

        # project back to size of vocabulary
        x = self.fc2(x)
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.fc3(x)

        return x, avg_attn_scores
开发者ID:ahiroto,项目名称:ParlAI,代码行数:51,代码来源:fconv.py


示例17: forward

    def forward(self, x1, x2):
        x1 = F.dropout(F.relu(self.layer1_1(x1.view(-1, 784))), self.drop)
        x2 = F.dropout(F.relu(self.layer1_2(x2.view(-1, 784))), self.drop)

        x = F.dropout(F.relu(self.layer2(torch.cat((x1, x2), 1))), self.drop)
        x = F.dropout(F.relu(self.layer3(x)), self.drop)
        x = F.dropout(F.relu(self.layer4(x)), self.drop)

        out1 = F.relu(self.layer5_1(x))
        out1 = F.sigmoid(self.layer6_1(out1))
        out2 = F.relu(self.layer5_2(x))
        out2 = F.sigmoid(self.layer6_2(out2))

        return out1, out2
开发者ID:joshicha,项目名称:VIGAN,代码行数:14,代码来源:networks.py


示例18: forward

    def forward(self, xs, hidden, encoder_output, attn_mask=None):
        xes = F.dropout(self.lt(xs), p=self.dropout, training=self.training)
        xes = self.attention(xes, hidden, encoder_output, attn_mask)
        output, new_hidden = self.rnn(xes, hidden)
        # TODO: add post-attention?
        # output = self.attention(output, new_hidden, encoder_output, attn_mask)

        e = F.dropout(self.o2e(output), p=self.dropout, training=self.training)
        scores = F.dropout(self.e2s(e), p=self.dropout, training=self.training)
        # select top scoring index, excluding the padding symbol (at idx zero)
        _max_score, idx = scores.narrow(2, 1, scores.size(2) - 1).max(2)
        preds = idx.add_(1)

        return preds, scores, new_hidden
开发者ID:ahiroto,项目名称:ParlAI,代码行数:14,代码来源:modules.py


示例19: forward

    def forward(self, text_sequences, text_positions=None, lengths=None,
                speaker_embed=None):
        assert self.n_speakers == 1 or speaker_embed is not None

        # embed text_sequences
        x = self.embed_tokens(text_sequences.long())
        x = F.dropout(x, p=self.dropout, training=self.training)

        # expand speaker embedding for all time steps
        speaker_embed_btc = None

        input_embedding = x

        # B x T x C -> B x C x T
        x = x.transpose(1, 2)

        # 1D conv blocks
        for f in self.convolutions:
            x = f(x, speaker_embed_btc) if isinstance(f, Conv1dGLU) else f(x)

        # Back to B x T x C
        keys = x.transpose(1, 2)

        # scale gradients (this only affects backward, not forward)
        # add output to input embedding for attention
        values = (keys + input_embedding) * math.sqrt(0.5)

        return keys, values
开发者ID:Saiuz,项目名称:autokeras,代码行数:28,代码来源:deepvoice3.py


示例20: forward

    def forward(self, s):
        #                                                           s: batch_size x board_x x board_y
        s = s.view(-1, 1, self.board_x, self.board_y)                # batch_size x 1 x board_x x board_y
        s = F.relu(self.bn1(self.conv1(s)))                          # batch_size x num_channels x board_x x board_y
        s = F.relu(self.bn2(self.conv2(s)))                          # batch_size x num_channels x board_x x board_y
        s = F.relu(self.bn3(self.conv3(s)))                          # batch_size x num_channels x (board_x-2) x (board_y-2)
        s = F.relu(self.bn4(self.conv4(s)))                          # batch_size x num_channels x (board_x-4) x (board_y-4)
        s = s.view(-1, self.args.num_channels*(self.board_x-4)*(self.board_y-4))

        s = F.dropout(F.relu(self.fc_bn1(self.fc1(s))), p=self.args.dropout, training=self.training)  # batch_size x 1024
        s = F.dropout(F.relu(self.fc_bn2(self.fc2(s))), p=self.args.dropout, training=self.training)  # batch_size x 512

        pi = self.fc3(s)                                                                         # batch_size x action_size
        v = self.fc4(s)                                                                          # batch_size x 1

        return F.log_softmax(pi, dim=1), F.tanh(v)
开发者ID:vglsd,项目名称:Dots-Boxes-WhereToFindThem,代码行数:16,代码来源:DnBNNet.py



注:本文中的torch.nn.functional.dropout函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python functional.log_softmax函数代码示例发布时间:2022-05-27
下一篇:
Python functional.cross_entropy函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap