-
Notifications
You must be signed in to change notification settings - Fork 2
/
encode.h
executable file
·54 lines (49 loc) · 2.64 KB
/
encode.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#ifndef _ENCODE_
#define _ENCODE_
#include <vector>
#include <string>
using namespace std;
namespace NMT
{
class Encoder
{
private:
size_t head_num;
size_t hidden_num;
size_t layer_num;
size_t vocabe_size;
size_t filter_size;
vector<vector<vector<float>>> weight;
vector<float> weight_embedding;
vector<float> weight_language;
vector<float> weight_scale;
vector<float> weight_bias;
public:
Encoder( size_t& head_num,
size_t& hidden_num,
size_t& layer_num,
size_t& vocabe_size,
size_t& filter_size,
vector<vector<vector<float>>>& weight,
vector<float>& weight_embedding,
vector<float>& weight_language,
vector<float>& weight_scale,
vector<float>& weight_bias);
void SetZero(const size_t& batch_size, const size_t& length, float* input, int* mask);
void BuildBias(const size_t& batch_size, const size_t& length, int* mask, float* bias);
void AddBias(float* input, const float* bias, const size_t& batch_size, const size_t& length);
void GetPositionX(const float* position_embedding, const size_t max_length, const size_t& length, vector<float>& position_x);
void MulPositionKey(const size_t& batch_size, const size_t& length, float* input, float* position_key, float* out);
void MulPositionValue(const size_t& batch_size, const size_t& length, float*input, float* position_val, float* out);
void BatchSoftmax(float* input_qk, int k, int head_num, const size_t& batch_size, const size_t& length);
void EmbeddingLookup(const int* input, const size_t& batch_size, const size_t& length, vector<float>& embedding_word, vector<int>& mask, vector<int>& target_language_id);
void ChangeEmbedding(vector<float>& embedding_word, const size_t& batch_size, const size_t& length, vector<int>& target_language_id);
void LayerPreprocess(vector<float>& layer_input, const size_t& batch_size, const size_t& length, const float* scale, const float* bias);
void LayerPostprocess(vector<float>& layer_input, const vector<float>& temp);
void Attention(float* layer_input, const size_t& batch_size, const size_t& length, const float* q_weight, const float* k_weight, const float* v_weight,
const float* key_weight, const float* value_weight, const float* weight, const float* bias, float* output);
void FeedForward(const vector<float>& input, vector<float>& output, const size_t& batch_size, const size_t& length, int filter, const float* weight, float* bias, string activation);
vector<float> Encode(vector<int>& input, const size_t& batch_size, const size_t& length, vector<int>& mask, vector<int>& language_id);
};
}
#endif