{"id":287,"date":"2016-10-26T18:19:43","date_gmt":"2016-10-26T10:19:43","guid":{"rendered":"http:\/\/blog.messi.moe\/?p=287"},"modified":"2016-12-18T00:02:34","modified_gmt":"2016-12-17T16:02:34","slug":"note-for-word2vec-source-code","status":"publish","type":"post","link":"https:\/\/blog.messi.moe\/?p=287","title":{"rendered":"Note for word2vec Source Code"},"content":{"rendered":"<blockquote><p>\n  Reference:<br \/>\n  https:\/\/github.com\/zhangchen-qinyinghua\/word2vector\/blob\/master\/word2vec_full_chinese_comment.c\n<\/p><\/blockquote>\n<pre><code class=\"c\">#include &lt;stdio.h&gt; \/\/ \u6587\u4ef6\u8f93\u5165\u8f93\u5165\u4f7f\u7528\u7684\u6807\u51c6\u5316\u51fd\u6570\n#include &lt;stdlib.h&gt; \n\/\/ C \u6807\u51c6\u51fd\u6570\u5e93\u7684\u5934\u6587\u4ef6\uff0c\u58f0\u660e\u4e86\u6570\u503c\u4e0e\u5b57\u7b26\u4e32\u8f6c\u6362\u51fd\u6570\uff0c\u4f2a\u968f\u673a\u6570\u751f\u6210\u51fd\u6570\uff0c\u52a8\u6001\u5185\u5b58\u5206\u914d\u51fd\u6570\uff0c\n\/\/ \u8fdb\u7a0b\u63a7\u5236\u51fd\u6570\u7b49\u516c\u5171\u51fd\u6570\u3002 \n#include &lt;string.h&gt;\n\/\/ \u6d89\u53ca\u5b57\u7b26\u4e32\u548c\u5927\u91cf\u5185\u5b58\u5904\u7406\u51fd\u6570\n#include &lt;math.h&gt;\n\/\/ \u63d0\u4f9b\u7528\u4e8e\u5e38\u7528\u9ad8\u7ea7\u6570\u5b66\u8fd0\u7b97\u7684\u8fd0\u7b97\u51fd\u6570\n#include &lt;pthread.h&gt;\n\/\/ POSIX\u7684\u7ebf\u7a0b\u6807\u51c6\uff0c\u5b9a\u4e49\u4e86\u521b\u5efa\u548c\u64cd\u7eb5\u7ebf\u7a0b\u7684\u4e00\u5957API\n\n#define MAX_STRING 100             \/\/ \u5b57\u7b26\u4e32\u6700\u5927\u5b57\u7b26\u6570\n#define EXP_TABLE_SIZE 1000        \/\/ sigmoid\u51fd\u6570\u8868\u683c\n#define MAX_EXP 6              \/\/ sigmoid\u51fd\u6570\u53ea\u8ba1\u7b97-6~6 \n#define MAX_SENTENCE_LENGTH 1000   \/\/ \u53e5\u5b50\u6700\u5927\u957f\u5ea6\uff0c\u4ee5\u6362\u884c\u7b26\u4e3a\u754c\n#define MAX_CODE_LENGTH 40     \/\/ \u54c8\u592b\u66fc\u7f16\u7801\u7684\u6700\u5927\u7801\u957f\n\nconst int vocab_hash_size = 30000000;\n\/\/ maximum 30*0.7=21M words in the vocabulary\n\/\/ \u5b9a\u4e49\u4e86\u5b57\u5178\u91cc\u8bcd\u7684\u6700\u5927\u6570\u76ee\n\ntypedef float real;\n\nstruct vocab_word {\n    long long cn;\n    \/\/ \u8bcd\u9891\u6570\n    int *point;\n    \/\/ point\u8868\u793a\u8fd9\u4e2a\u8bcd\u6c47\u5bf9\u5e94\u7684\u8f85\u52a9\u5411\u91cf\u5217\u7684index\u5e8f\u5217\uff0c\u5176\u957f\u5ea6\u662fcodelen\n    \/\/ \u8f85\u52a9\u5411\u91cf\u5b9a\u4e49\u5728syn1\u4e2d\n    char *word, *code, codelen;\n    \/\/ *word\u662f\u8bcd\u672c\u8eab\n    \/\/ *code\u662f\u8bcd\u5bf9\u5e94\u7684\u54c8\u592b\u66fc\u7f16\u7801\n    \/\/ codelen\u662fcode\u7684\u957f\u5ea6\n    \/\/ \u53c2\u89c1hierarchical softmax\u539f\u7406\n}\n\nchar train_file[MAX_STRING], output_file[MAX_STRING];\n\/\/ \u5b9a\u4e49\u4e86\u8f93\u5165\u8f93\u51fa\u6587\u4ef6\uff08\u6587\u4ef6\u540d\uff09\uff0c\u4ee5\u5b57\u7b26\u6570\u7ec4\u5f62\u5f0f\u50a8\u5b58\nchar save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];\n\/\/ \u5b9a\u4e49\u4e86\u5b58\u53d6\u8bcd\u5178\u548c\u8bfb\u8bcd\u5178\u6587\u4ef6\uff08\u6587\u4ef6\u540d\uff09\nstruct vocab_word *vocab;\n\/\/ \u5b9a\u4e49\u4e86\u52a8\u6001\u53d8\u5316\u7684\u8bcd\u5178\u7ed3\u6784\uff0c\u5b9a\u4e49\u89c1\u7b2c*\u884c\nint binary=0, cbow=1, debug_mode=2, window=5, min_count=5, num_threads=12, min_reduce=1;\n\/\/ \u5b9a\u4e49\u9ed8\u8ba4\u53c2\u6570\n\/\/ binary: \u662f\u5426\u4f7f\u7528\u4e8c\u8fdb\u5236\n\/\/ cbow: \u662f\u5426\u4f7f\u7528cbow\uff0c\u5426\u5219\u4f7f\u7528skip-gram\n\/\/ window: \u7a97\u957f\uff0c\u9ed8\u8ba4\u4e3a5\n\/\/ min_count: \u51fa\u73b0\u9891\u6b21\u5c0f\u4e8e\u4e00\u5b9a\u503c\u7684\u8bcd\u88ab\u820d\u5f03\n\/\/ min_reduce: \u53c2\u89c1ReduceVocab\nint *vocab_hash;\n\/\/ \u8bcd\u5178\u8bcd\u7684\u54c8\u5e0c\u503c\uff0c\u4e00\u4e2a\u6570\u7ec4\nlong long vocab_max_size=1000, vocab_size=0, layer1_size=100;\n\/\/ \u8bcd\u5178\u8bcd\u6570\u6700\u5927\u503c\u4e3a1000\uff0c\u4e4b\u540e\u4e0d\u591f\u7528\u518d\u52a0\uff0c\u5f53\u524d\u8bcd\u6570\u4e3a0\n\/\/ layer1_size\u6307\u7684\u5e94\u8be5\u662fembedding vector\u7684\u7ef4\u6570\u4e3a100\uff0c\u5373cbow\u7b2c\u4e00\u5c42\u7684\u8282\u70b9\u6570\nlong long train_words=0, word_count_actual=0, iter=5, file_size=0, classes=0;\n\/\/ \u8bcd\u9891\u7edf\u8ba1\n\/\/ train_words\u662f\u6240\u6709\u6709\u6548\u8bcd\u6c47\u7684\u9891\u7387\u4e4b\u548c?\n\/\/ word_count_actual ?\n\/\/ iter\u5e94\u8be5\u662f\u8fed\u4ee3\u6b21\u6570 ?\n\/\/ file_size\u5b9a\u4e49\u6587\u4ef6\u6700\u5927\u4f53\u79ef\uff0c\u7528\u4e8e\u5e76\u884c\u5316\u65f6\u8d1f\u8f7d\u5747\u8861\n\/\/ classes ?\nreal alpha=0.025, starting_alpha, sample=1e-3;\n\/\/ alpha\u662f\u5b66\u4e60\u7387learning rate\n\/\/ starting_alpha \u521d\u59cb\u5b66\u4e60\u7387\uff0c\u53ef\u6307\u5b9a\n\/\/ sample\u53c2\u89c1\u8bcd\u6c47\u8868\u521d\u59cb\u5316\u90e8\u5206\nreal *syn0, *syn1, *syn1neg, *expTable;\n\/\/ *syn0: word2vec\u7684\u6700\u7ec8\u7ed3\u679c\uff0c\u5927\u5c0f\u662fvector\u7ef4\u6570\u00d7\u8f93\u51fa\u8bcd\u6c47\u6570\u76ee\uff0c\u4ee5*point\u4f5c\u4e3aindex\n\/\/ *syn1: \u5168\u90e8\u8f85\u52a9\u53d8\u91cf\uff0c\u4e2a\u6570\u6bd4*syn0\u5c11\u4e00\u4e2a ?\n\/\/ *syn1neg: \u968f\u673a\u8d1f\u91c7\u6837\u5f97\u5230\u7684\u5411\u91cf\u8868\n\/\/ *expTable\u662f\u9884\u5148\u8ba1\u7b97\u7684sigmoid\u51fd\u6570\u503c\uff0c\u53c2\u89c1main\u51fd\u6570\u5e95\u90e8\nclock_t start;  \/\/ \u8ba1\u65f6\u5668\uff0c\u8003\u5bdf\u7a0b\u5e8f\u65f6\u95f4\u590d\u6742\u5ea6\n\nint hs=0, negative=5;  \/\/ \u5b9a\u4e49hierarchical softmax\u548cnegative sampling\u9ed8\u8ba4\u53c2\u6570\uff0c\u524d\u8005\u9ed8\u8ba4\u5173\n\/\/ negative\u4e0d\u4e3a\u96f6\u65f6\u8868\u793a\u6bcf\u4e00\u4e2aword\u548c\u4e0a\u4e0b\u6587\u8981\u8d1f\u91c7\u6837\u7684\u4e2a\u6570\nconst int table_size=1e8;\nint *table;\n\/\/ \u7528\u4e8e\u8d1f\u91c7\u6837\uff0c\u521d\u59cb\u5316\u8d1f\u91c7\u6837\u8f85\u52a9table\uff0c\u4fdd\u8bc1\u4e00\u4e2a\u5355\u8bcd\u88ab\u9009\u4e3a\u8d1f\u6837\u672c\u7684\u6982\u7387d1\u4e3a\n\/\/ (\u8bcd\u9891\u6570)^0.75\/\u6240\u6709\u8bcd[(\u8bcd\u9891\u6570)^0.75]\u4e4b\u548c\nvoid InitUnigramTable() {\n    int a,i;\n    double train_word_pow=0;\n    double d1, power=0.75;      \/\/ \u4e3a\u4ec0\u4e48\u9009\u62e90.75\u4f5c\u4e3a\u6307\u6570\u5c1a\u4e0d\u6e05\u695a\n    table=(int *)malloc(table_size * sizeof(int));\n    for (a=0;a&lt;vocab_size;a++) train_words_pow+=pow(vocab[a].cn, power);    \/\/ ?\n    i=0;\n    d1=pow(vocab[i].cn, power)\/train_word_pow;\n    for (a=0;a&lt;vocab_size;a++) {\n        table[a]=i;\n        if (a\/(double)table_size&gt;d1) {\n            i++;\n            d1+=pow(vocab[i].cn, power)\/train_word_pow;\n        }\n        if (i&gt;=vocab_size) i=vocab_size-1;\n    }\n}\n\n\/\/ Reads a single word from a file, assuming space+tab+EOL to be word boundaries\n\/\/ \u4ece\u6587\u4ef6\u4e2d\u8bfb\u53d6\u5355\u4e2a\u8bcd\uff0c\u4ee5\u7a7a\u683c\u3001\u5236\u8868\u7b26\u548cEOL(EOF\u548c'\\n')\u4f5c\u4e3a\u8fb9\u754c\n\/\/ char(13)\u662f\u56de\u8f66('\\r)\n\/\/ \u4e0d\u540c\u7684\u64cd\u4f5c\u7cfb\u7edf\u4e2d\u6362\u884c\u7b26\u4e0d\u540c\uff0cwin\u662f\\r\\n, unix\u662f\\n, mac\u662f\\r\uff0c\u9700\u7edf\u4e00\u8f6c\u6362\u4e3a'\\n'\u6362\u884c\n\/\/ \u4e0d\u8ba4\u4e3a'\\r'\u662f\u6709\u6548\u7b26\u53f7\n\/\/ \u5355\u4e2a\u6362\u884c\u7b26\u53f7'\\n'\u88ab\u8ba4\u4e3a\u662f\u6709\u6548\u7b26\u53f7\uff0c\u5e76\u88ab\u8bb0\u4e3a\u8bcd\"&lt;\/s&gt;\"\n\/\/ \u9047\u5230\u6362\u884c\u7b26\u4f5c\u4e3a\u7ed3\u5c3e\uff0c\u6362\u884c\u7b26\u5c06\u88ab\u56de\u9000\u5230\u8f93\u5165\u6d41\uff0c\u5f62\u6210\u4e00\u4e2a\u5355\u4e2a\u6362\u884c\u7b26\uff0c\u4e0b\u4e00\u6b21\u8bfb\u4e3a\"&lt;\/s&gt;\"\nvoid ReadWord(char *word, FILE *fin) {\n    int a=0, ch;\n    while (!feof(fin)) {\n        ch=fgetc(fin);\n        if(ch==13) continue;\n        if((ch==' ')||(ch=='\\t')||(ch=='\\n')) {\n            if (a&gt;0) {\n                if (ch=='\\n') ungetc(ch,fin); \/\/ \u6362\u884c\u7b26\u88ab\u56de\u7f29\u5230\u8f93\u5165\u6d41\u4e2d\n                break;\n            }\n            if(ch=='\\n') {\n                strcpy(word,(char*)\"&lt;\/s&gt;\");\n                return;\n            } else continue;\n        }\n        word[a]=ch;\n        a++;\n        if (a&gt;=MAX_STRING-1) a--;  \/\/ Truncate too long words \n        \/\/ \u5355\u4e2a\u8bcd\u6c47\u592a\u957f\u65f6\u9700\u8981\u622a\u65ad\n    }\n    word[a]=0;\n}\n\n\/\/ Return hash value of a word\n\/\/ \u8ba1\u7b97\u8bcd\u7684hash\u503c\nint GetWordHash(char *word) {\n    unsigned long long a, hash=0;\n    for (a=0;a&lt;strlen(word);a++) hash=hash*257+word[a];\n    hash=hash%vocab_hash_size;\n    return hash;\n}\n\n\/\/ Return position of a word in the vocabulary; if the word is not found, return -1\n\/\/ \u6839\u636ehash\u8868\u67e5\u627e\u8bcd\u5728\u8bcd\u5178\u91cc\u7684\u4f4d\u7f6e\nint SearchVocab(char *word) {\n    unsigned int hash=GetWordHash(word);\n    while(1) {\n        if (vocab_hash[hash]==-1) return -1;\n        if (!strcmp(word, vocab[vocab_hash[hash]].word) return vocab_hash[hash];\n        hash=(hash+1)%vocab_hash_size;\n    }\n    return -1;\n}\n\n\/\/ Reads a word and returns its index in the vocabulary\n\/\/ \u4ece\u8f93\u5165\u6587\u4ef6\u4e2d\u8bfb\u53d6\u4e00\u4e2a\u5355\u8bcd\uff0c\u8fd4\u56de\u5176\u5728\u8bcd\u5178\u4e2d\u7684\u4f4d\u7f6e\nint ReadWordIndex(FILE *fin) {\n    char word[MAX_STRING];\n    ReadWord(word, fin);\n    if (feof(fin)) return -1;\n    return SearchVocab(word);\n}\n\n\/\/ Adds a word to the vocabulary\nint AddWordToVocab(char *word) {\n    unsigned int hash, length=strlen(word)+1;\n    if (length&gt;MAX_STRING) length=MAX_STRING; \/\/ \u622a\u65ad\u8bcd\u4f7f\u5176\u957f\u5ea6\u4e0d\u8d85\u8fc7MAX_STRING\n    vocab[vocab_size].word=(char *)calloc(length, sizeof(char));\n    strcpy(vocab[vocab_size].word, word);\n    vocab[vocab_size].cn=0;\n    vocab_size++;\n    \/\/ Reallocate memory if needed\n    if (vocab_size+2&gt;=vocab_max_size) {\n        vocab_max_size+=1000;\n        vocab=(struct vocab_word *)realloc(vocab, vocab_max_size*sizeof(struct vocab_word));\n    }\n    hash=GetWordHash(word);     \/\/ \u83b7\u53d6\u8bcd\u7684hash\n    while (vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n    \/\/ \u5728hash\u8868\u4e2d\u7ebf\u6027\u63a2\u6d4b\uff0c\u5982\u679c\u6709\u4f4d\u7f6e\u4e3a-1(\u5373\u6ca1\u6709\u8bcd)\u5c31\u5c06\u5f53\u524d\u8bcd\u63d2\u5165\u5230\u8fd9\u4e2a\u4f4d\u7f6e\n    vocab_hash[hash]=vocab_size-1;\n    return vocab_size-1;\n}\n\n\/\/ Used later for sorting by word counts\n\/\/ \u6bd4\u8f83\u8bcd\u9891\nint VocabCompare(count void *a, const void *b) {\n    return ((struct vocab_word *)b)-&gt;cn - ((struct vocab_word *)a)-&gt;cn;\n}\n\n\/\/ Sorts the vocabulary by frequency using word counts\n\/\/ \u91cd\u6392\u8bcd\u5178\nvoid SortVocab() {\n    int a, size;\n    unsigned int hash;\n    \/\/ Sort the vocabulary and keep &lt;\/s&gt; at the first position\n    qsort(&amp;vocab[1], vocab_size-1, sizeof(struct vocab_word), VocabCompare);\n    \/\/ \u7528\u5feb\u901f\u6392\u5e8f\uff0c\u6309\u7167\u8bcd\u9891\u5bf9\u8bcd\u5178\u91cc\u7684\u8bcd\u8fdb\u884c\u6392\u5e8f\uff0c\u8bcd\u9891\u5927\u7684\u6392\u524d\u9762\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;       \/\/ \u91cd\u7f6ehash\u8868\u4e3a\u7a7a\n    size=vocab_size;\n    train_words=0;\n    for (a=0;a&lt;size;a++) {\n        \/\/ Words occuring less than min_count times will be discarded from the vocab\n        \/\/ \u8bcd\u9891\u5c0f\u4e8e\u67d0\u4e00\u4e2a\u9608\u503c\u7684\u8bcd\uff0c\u4e22\u5f03\n        if ((vocab[a].cn&lt;min_count) &amp;&amp; (a!=0)) {\n            vocab_size--;\n            free(vocab[a].word);\n        } else {\n            \/\/ Hash will be recomputed, as after the sorting it is not actual\n            \/\/ \u5269\u4e0b\u7684\u8bcd\u91cd\u65b0\u6392\u5217\u5728hash\u8868\u4e2d\n            hash=GetWordHash(vocab[a].word);\n            while(vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n            vocab_hash[hash]=a;\n            train_words+=vocab[a].cn;\n            \/\/ train_words\u662f\u8bcd\u5178\u4e2d\u6240\u6709\u6709\u6548\u8bcd\u7684\u8bcd\u9891\u4e4b\u548c\n        }\n    }\n    \/\/ \u7cbe\u7b80\u8bcd\u5178\u5360\u7528\u7684\u5185\u5b58\n    vocab=(struct vocab_word *)realloc(vocab,(vocab_size+1)*sizeof(struct vocab_word));\n    \/\/ Allocate memory for the binary tree construction\n    \/\/ \u4e3a\u54c8\u592b\u66fc\u6811\u5206\u914d\u5185\u5b58\n    for (a=0;a&lt;vocab_size;a++) {\n        vocab[a].code=(char *)calloc(MAX_CODE_LENGTH,sizeof(char));\n        vocab[a].point=(int *)calloc(MAX_CODE_LENGTH,sizeof(int));\n    }\n}\n\n\/\/ Reduces the vocabulary by removing infrequent tokens\nvoid ReduceVocab() {\n    int a,b=0;\n    unsigned int hash;\n    \/\/ \u5bf9\u4e8e\u8bcda\uff0c\u82e5\u8bcd\u9891\u5927\u4e8e\u9608\u503c\uff0c\u5219\u79fb\u52a8\u5230\u8bcd\u5178\u4f4d\u7f6eb\uff0c\u5426\u5219\u820d\u5f03\n    for (a=0;a&lt;vocab_size;a++) {\n        if (vocab[a].cn &gt; min_reduce) {\n            vocab[b].cn=vocab[a].cn;\n            vocab[b].word=vocab[a].word;\n            b++\n        } else free(vocab[a].word);\n        vocab_size=b;\n    }\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;\n    for (a=0;a&lt;vocab_size;a++) {\n        \/\/ Hash will be recomputed, as it is not actual\n        \/\/ \u91cd\u6784hash\u8868\n        hash=GetWordHash(vocab[a].word);\n        while(vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n        vocab_hash[hash]=a;\n    }\n    fflush(stdout);\n    min_reduce++;       \/\/ \u6bcf\u6267\u884c\u4e00\u6b21\uff0c\u820d\u5f03\u9608\u503c+1 why?\n}\n\n\/\/ Create binary Huffman tree using the word counts\n\/\/ Frequent words will have short unique binary codes\n\/\/ \u6784\u9020\u54c8\u592b\u66fc\u6811\uff0c\u7ed9\u6bcf\u4e2a\u8bcd\u8d4b\u4e88\u4e00\u4e2a\u54c8\u592b\u66fc\u7f16\u7801\uff0c\u8bcd\u9891\u9ad8\u8005\u7801\u957f\u77ed\n\/\/ \u54c8\u592b\u66fc\u6811\u7684\u6784\u5efa\u4f7f\u7528\u8d2a\u5fc3\u6cd5\n\/*\nHuffman\u6811\uff1a\u6700\u4f18\u4e8c\u53c9\u6811\uff0cWPL=\u2211\u7801\u957fx\u6743\u503c \u6700\u4f4e\n\u8d2a\u5fc3\u6cd5\u6784\u9020Huffman\u6811\u7684\u8fc7\u7a0b\uff1a\n\u5bf9\u4e8e\u5df2\u77e5\u7684\u4e00\u7ec4\u53f6\u5b50\u8282\u70b9\uff0c\u6743\u503c\u5206\u522b\u4e3aW1,W2,...,Wn\uff0c\u7ea6\u5b9a\u6743\u503c\u5c0f\u7684\u4e3a\u5de6\u5b50\u6811\uff0c\u5927\u7684\u4e3a\u53f3\u5b50\u6811\n\n\uff081\uff09\u9996\u5148\u628an\u4e2a\u53f6\u5b50\u8282\u70b9\u770b\u6210n\u68f5\u6811\uff08\u4ec5\u6709\u4e00\u4e2a\u8282\u70b9\u7684\u4e8c\u53c9\u6811\uff09\uff0c\u628a\u5b83\u4eec\u770b\u505a\u4e00\u4e2a\u68ee\u6797\n\n    \u4f8b\uff1a   2 4 5 8\n\n\uff082\uff09\u5408\u5e76\u68ee\u6797\u4e2d\u6700\u5c0f\u548c\u6b21\u5c0f\u7684\u4e24\u68f5\u6811\uff0c\u8be5\u6811\u6839\u8282\u70b9\u7684\u6743\u503c\u4e3a\u4e24\u68f5\u5b50\u6811\u4e4b\u548c\uff0c\u6b64\u65f6\u68ee\u6797\u4e2d\u8fd8\u6709n-1\u68f5\u6811\n\n    i.    6    5   8\n         \/ \\\n        2   4\n\n\uff083\uff09\u91cd\u590d\u7b2c\uff082\uff09\u6b65\u76f4\u5230\u68ee\u6797\u4e2d\u53ea\u6709\u4e00\u68f5\u6811\u4e3a\u6b62\n\n    ii.      11        8\n             \/\\\n            5  6\n              \/ \\\n             2   4\n\n    iii.           19\n                  \/  \\\n                 8   11\n                     \/\\\n                    5  6\n                      \/ \\\n                     2   4\n    (done)\n\n*\/\nvoid CreateBinaryTree() {\n    long long a,b,i,min1i,min2i,pos1,pos2,point[MAX_CODE_LENGTH];\n    \/\/ min1i,min2i\u662f\u4e24\u4e2a\u6743\u503c\u6700\u5c0f\u7684\u8282\u70b9\uff0c\u6743\u503c\u4e3avocab[a].cn\u5373\u6bcf\u4e2a\u8bcd\u7684\u8bcd\u9891\n    char code[MAX_CODE_LENGTH];\n    long long *count=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u6bcf\u4e2a\u8bcd\u7684\u8bcd\u9891\n    long long *binary=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u6bcf\u4e2a\u8bcd\u7684Huffman\u7f16\u7801\uff0c\u4e3a\u4e86\u5bf9\u9f50\u7528\u4e86\u66f4\u5927\u7684\u957f\u5ea6(\u6700\u5927\u7801\u957f\u4e3aMAX_CODE_LENGTH=40)\n    long long *parent_node=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u8bb0\u5f55\u6240\u6709\u5b50\u6811\u7684\u7236\u8282\u70b9\n    for (a=0;a&lt;vocab_size;a++) count[a]=vocab[a].cn;\n    for (a=vocab_size;a&lt;vocab_size*2;a++) count[a]=1e15;\n    \/\/ \u5269\u4e0b\u8282\u70b9\u8d4b\u6743\u4e00\u4e2a\u5927\u6811\uff0c\u65b9\u4fbf\u83b7\u53d6\u4e24\u4e2a\u6700\u5c0f\u7684\u6743\u503c\n    pos1=vocab_size-1;\n    pos2=vocab_size;\n    \/\/ Following algorithm constructs the Huffman tree by adding one node at a time\n    \/\/ \u6bcf\u6b21\u5408\u5e76\u4e24\u68f5\u6743\u503c\u6700\u5c0f\u7684\u5b50\u6811\uff0c\u76f4\u5230\u5408\u5e76vocab_size-1\u6b21\n    for (a=0;a&lt;vocab_size-1;a++) {\n        \/\/ First, find two smallest nodes 'min1,min2'\n        if (pos1&gt;=0) {\n            if (count[pos1]&lt;count[pos2]) {\n                min1i=pos1; pos1--;\n            } else {\n                min1i=pos2; pos2++;\n            }\n        } else {\n            min1i=pos2; pos2++;\n        }\n        if (pos1&gt;=0) {\n            if (count[pos1]&lt;count[pos2]) {\n                min2i=pos1; pos1--;\n            } else {\n                min1i=pos2; pos2++;\n            }\n\n        } else {\n            min2i=pos2; pos2++;\n        }\n        count[vocab_size+a]=count[min1i]+count[min2i];\n        parent_node[min1i]=vocab_size+a;\n        parent_node[min2i]=vocab_size+a;\n        binary[min2i]=1;\n        \/\/ binary[min1i]=0, \u5373\u5de6\u5b50\u6811\u7f16\u78010\uff0c\u53f3\u5b50\u6811\u7f16\u78011\n    }\n    \/\/ Now assign binary code to each vocabulary word \n    for (a=0;a&lt;vocab_size;a++) {\n        b=a;\n        i=0;\n        while(1) {\n            code[i]=binary[b];\n            point[i]=b;\n            i++;\n            b=parent_node[b];\n            if (b==vocab_size*2-2) break;\n        }\n        vocab[a].codelen=i;\n        vocab[a].point[0]=vocab_size-2;\n        for (b=0;b&lt;i;b++) {\n            vocab[a].code[i-b-1]=code[b];\n            vocab[a].point[i-b]=point[b]-vocab_size;\n        }\n    }\n    free(count);\n    free(binary);\n    free(parent_node);\n}\n\n\/\/ \u4ece\u8bad\u7ec3\u6587\u4ef6\u4e2d\u83b7\u53d6\u8bcd\u8bed\uff0c\u540c\u65f6\u8bfb\u53d6\u8bad\u7ec3\u6587\u4ef6\u5927\u5c0f\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u65f6\u8d1f\u8f7d\u5747\u8861\nvoid LearnVocabFromTrainFile() {\n    char word[MAX_STRING];\n    FILE *fin;\n    long long a,i;\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;   \/\/ \u521d\u59cb\u5316hash\u8868\n    fin=fopen(train_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"ERROR: training data file not found!\\n\");\n        exit(1);\n    }\n    vocab_size=0;\n    AddWordToVocab((char *)\"&lt;\/s&gt;\");\n    \/\/ \u9996\u5148\u52a0\u5165\u4e00\u4e2a\u7279\u6b8a\u8bcd\u6c47&lt;\/s&gt;, \u8fd9\u4e2a\u8bcd\u8868\u793a\u6362\u884c\u7b26\uff0c\u7528\u4e8e\u5206\u9694\u884c\u7684\u6807\u8bb0\n    while (1) {\n        ReadWord(word,fin);     \/\/ \u8bfb\u5165\u4e00\u4e2a\u8bcd\n        if(feof(fin)) break;\n        train_words++;          \/\/ train_words\u662f\u8bcd\u5178\u4e2d\u8bcd\u7684\u6570\u76ee\n        if ((debug_mode)&gt;1 &amp;&amp; (train_words%100000==0)) {\n            printf(\"%lldK%c\",train_words\/1000,13);\n            fflush(stdout);\n        }\n        i=SearchVocab(word);\n        if(i==-1) {\n            a=AddWordToVocab(word);\n            vocab[a].cn=1;\n        }\n        else vocab[i].cn++;\n        if (vocab_size&gt;vocab_hash_size*0.7) ReduceVocab();\n        \/\/ \u5982\u679c\u8bcd\u5178\u4e2d\u8bcd\u6c47\u6570\u76ee\u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\uff0c\u5219\u6e05\u7406\u6389\u4e00\u4e9b\u8bcd\u9891\u8f83\u5c0f\u7684\u8bcd\n    }\n    SortVocab();        \/\/ \u6392\u5e8f\n    if(debug_mode&gt;0) {\n        printf(\"Vocab size: %lld\\n\", vocab_size);\n        printf(\"Words in train file: %lld\\n\", train_words);\n    }\n    file_size=ftell(fin);\n    fclose(fin);\n}\n\n\/\/ \u5199\u5165\u8bcd\u6c47\u5230\u6587\u4ef6save_vocab_file\n\/\/ \u683c\u5f0f\uff1a\u8bcd\u6c47 \u8bcd\u9891\\n\nvoid SaveVocab() {\n    long long i;\n    FILE *fo=fopen(save_vocab_file,\"wb\");\n    for(i=0;i&lt;vocab_size;i++) fprintf(fo, \"%s %lld\\n\",vocab[i].word, vocab[i].cn);\n    fclose(fo);\n}\n\n\/\/ \u4ece\u683c\u5f0f\u4e3a\u201c\u8bcd\u6c47 \u8bcd\u9891\\n\u201d\u7684\u6587\u4ef6\u4e2d\u8bfb\u53d6\u8bcd\u6c47\nvoid ReadVocab() {\n    long long a,i=0;\n    char c;\n    char word[MAX_STRING];\n    FILE *fin=fopen(read_vocab_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"Vocabulary file not found\\n\");\n        exit(1);\n    }\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;\n    vocab_size=0;\n    while(1) {\n        ReadWord(word,fin);\n        if(feof(fin)) break;\n        a=AddWordToVocab(word);\n        fscanf(fin,\"%lld%c\", &amp;vocab[a].cn, &amp;c);\n         \/\/ \u8bfb\u5165\u8bcd\u9891\uff0c\u7531\u4e8efscanf\u548cReadWord\u7684\u95ee\u9898\uff0c\u9700\u8981\u9000\u6389\u8bcd\u9891\u540e\u9762\u8868\u793a\u8fb9\u754c\u7684\u5b57\u7b26\uff0c\u5426\u5219scanf\u4e0d\u4f1a\n         \/\/ \u8bfb\u5165\u8fd9\u4e9b\u7b26\u53f7\uff0cReadWord\u4f1a\u8bfb\u5165\n        i++;\n    }\n    SortVocab();        \/\/ \u91cd\u6392\u8bcd\u5178\n    if (debug_mode&gt;0) {\n        printf(\"Vocab size: %lld\\n\", vocab_size);\n        printf(\"Words in train file: %lld\\n\", train_words);\n    }\n    fin=fopen(train_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"ERROR: training data file not found!\\n\");\n        exit(1);\n    }\n    fseek(fin,0,SEEK_END);\n    file_size=ftell(fin);\n    fclose(fin);\n}\n\nvoid InitNet() {\n    long long a,b;\n    a=posix_memalign((void **)&amp;syn0, 128, (long long)vocab_size*layer1_size*sizeof(real));\n    \/\/ \u5185\u5b58\u52a8\u6001\u5bf9\u9f50\n    \/\/ syn0\u5c31\u662fword2vec\u7684\u8ba1\u7b97\u7ed3\u679c\uff0c\u5373\u8bcd\u7684\u5411\u91cf\u5316\u8868\u793a\uff0c\u5411\u91cf\u7ef4\u6570\u4e3a100\uff0calignment=128\u8981\u6c42\u4e3a2\u7684\u6574\u6570\u6b21\u5e42\n    if (syn1==NULL) {\n        printf(\"Memory allocation failed\\n\");\n        exit(1);\n    }\n\n    \/\/hierarchical softmax \n    if (hs) {\n        a=posix_memalign((void **)&amp;syn1, 128, (long long)vocab_size*layer1_size*sizeof(real));\n        \/\/ syn1\u662f\u8f85\u52a9\u5411\u91cf\uff0c\u5927\u5c0f\u4e3alayer1_size\uff0c\u6570\u91cf\u6bd4syn0\u5c11\u4e00\u4e2a\uff0c\u5168\u96f6\u521d\u59cb\u5316\n        if (syn1==NULL) {\n            printf(\"Memory allocation failed\\n\");\n            exit(1);\n        }\n        for(b=0;b&lt;layer1_size;b++) {\n            for(a=0;a&lt;vocab_size;a++) syn1[a*layer1_size+b]=0;\n        }\n    }\n\n    \/\/ negative sampling \n    if (negative&gt;0) {\n        a=posix_memalign((void **)&amp;syn1neg,128, (long long)vocab_size*layer1_size*sizeof(real));\n        \/\/ \u6bcf\u4e2a\u8bcd\u4e00\u4e2a\u8d1f\u91c7\u6837\u5411\u91cf\n        if (syn1neg==NULL) {\n            printf(\"Memory allocation failed\\n\");\n            exit(1);\n        }\n        for(b=0;b&lt;layer1_size;b++) {\n            for(a=0;a&lt;vocab_size;a++) syn1neg[a*layer1_size+b]=0;\n        }\n    }\n\n    \/\/ \u6240\u6709\u8bcd\u7684\u5bf9\u5e94\u5411\u91cf\u521d\u59cb\u5316\uff0c\u968f\u673a\u6570\uff0c\u8303\u56f4\u5728(-0.5\/\u7ef4\u5ea6)~(0.5\/\u7ef4\u5ea6)\u4e0a\u5747\u5300\u5206\u5e03\n    for(b=0;b&lt;layer1_size;b++) {\n        for(a=0;a&lt;vocab_size;a++) {\n            syn0[a*layer1_size+b]=(rand()\/(real)RAND_MAX-0.5)\/layer1_size;\n        }\n    }\n\n    \/\/\u6784\u9020Huffman\u6811\n    CreateBinaryTree();\n}\n\n\/\/ word2vec\u6a21\u578b\u8bad\u7ec3\u7684\u4e3b\u8981\u8fc7\u7a0b\nvoid *TrainModelThread(void *id) {\n    long long a,b,d,word,last_word,sentence_length=0,sentence_position=0;\n    \/\/ word\u662f\u8bcd\u5728\u8bcd\u5178\u4e2d\u7684\u4f4d\u7f6e\n    long long word_count=0, last_word_count=0, sen[MAX_SENTENCE_LENGTH+1];\n    \/\/ word_count\u7edf\u8ba1\u5b66\u4e60\u4e86\u591a\u5c11\u8bcd\uff0c\u6bcf\u5b66\u4e00\u4e2a\u5c31\u52a0\u4e00\n    \/\/ last_word_count\u7684\u4f5c\u7528\u662f\u52a8\u6001\u53d8\u5316\u5b66\u4e60\u7387\n    \/\/ \u6bcf\u5b66\u4e6010000\u4e2a\u6837\u672c\uff0c\u5b66\u4e60\u7387\u5c31\u53d8\u5c0f\u4e00\u6b21\uff0c\u76f4\u5230\u53d8\u5c0f\u5230starting_alpha*0.0001\n    long long l1,l2,c,target,label,local_iter=iter;\n    \/\/ local_iter ?\n    unsigned long long next_random=(long long)id;\n    \/\/ \u968f\u673a\u6570\u4ee5\u516c\u5f0f next_random=next_random*(unsigned long long)*25214903917+11 \u4ea7\u751f\n    \/\/ 25214903917\u662f\u4e00\u4e2a\u5927\u8d28\u6570\n    real f,g;\n    clock_t now;\n    real *neu1=(real *)calloc(layer1_size, sizeof(real));\n    \/\/ neu1\u662f\u6240\u6709\u8f93\u5165\u7684\u4e0a\u4e0b\u6587\u5411\u91cf\u4e4b\u548c\n    \/\/ neu1e\u662f\u4e00\u8f6e\u8fed\u4ee3\u4e4b\u540e\u6bcf\u4e00\u4e2aword\u5bf9\u5e94\u7684\u5411\u91cf\u7684\u589e\u91cf\n    real *neu1e=(real *)calloc(layer1_size, sizeof(real));\n    FILE *f1=fopen(train_file,\"rb\");\n    fseek(f1,file_size\/(long long)num_threads*(long long)id, SEEK_SET();    \/\/ \u5e76\u884c\u8ba1\u7b97\n    while(1) {\n        \/\/ \u5b66\u4e60\u7387\u66f4\u65b0\uff0c\u65b0\u53e5\u5b50\u8bfb\u53d6\uff0c\u6839\u636e\u4e00\u4e2a\u8bcd\u53ca\u5176\u4e0a\u4e0b\u6587\u8fdb\u884c\u6a21\u578b\u5b66\u4e60\n\n        \/\/ \u5b66\u4e60\u7387\u66f4\u65b0 \n        if (word_count-last_word_count&gt;10000) {\n            \/\/ \u6bcf\u5b66\u4e6010000\u4e2a\u8bcd\uff0c\u5b66\u4e60\u7387\u5c31\u53d8\u5c0f\u4e00\u6b21\n            word_count_actual += word_count+last_word_count;\n            last_word_count=word_count;\n            if ((debug_mode)&gt;1)) {\n                now=clock();\n                printf(\"%cAlpha: %f Progress: %.2f%% Words\/thread\/sec: %.2fk  \",13,alpha,\n                  word_count_actual\/(real)(iter*train_words+1)*100,\n                  word_count_actual\/((real)(now-start+1)\/(real)CLOCKS_PER_SEC*1000));\n                fflush(stdout);\n            }\n            alpha=starting_alpha*(1-word_count_actual\/(real)(iter*train_words+1));\n            \/\/ \u5b66\u4e60\u7387\u53d8\u5316\u516c\u5f0f\n            if(alpha&lt;starting_alpha*0.0001) alpha=starting_alpha*0.0001;\n        }\n\n        \/\/ \u65b0\u53e5\u5b50\u8bfb\u53d6 \n        if(sentence_length==0) {\n            while(1) {\n                word=ReadWordIndex(fi);     \/\/ \u8bfb\u5165\u4e00\u4e2a\u65b0\u53e5\u5b50\n                if(feof(fi)) break;\n                if(word==-1) continue;\n                word_count++;   \/\/ \u8bfb\u5230\u4e00\u4e2a\u6709\u6548\u8bcd\uff0cword_count\u589e\u52a01\n                if(word==0) break;\n                \/\/ The subsampling randomly discards frequent words while \n                \/\/ keeping the ranking same\n                if(sample&gt;0) {\n                    \/\/ \u9ad8\u9891\u8bcd\u4e9a\u91c7\u6837\n                    real ran=(sqrt(vocab[word].cn\/(sample*train_words))+1)*\n                             (sample*train_words)\/vocab[word].cn;\n                    \/\/ ran\u662f\u9ad8\u9891\u8bcd\u4e0d\u88ab\u820d\u5f03\u7684\u6982\u7387, sample\u662f\u4e00\u4e2a\u53c2\u6570\n                    \/\/ ran=sqrt(f\/s)*s\/f=sqrt(s\/f)=sqrt(sample\/frequency)\n                    \/\/ \u5176\u4e2ds\u4e3a\u91c7\u6837\u6570sample*train_words, f=vocab[word].cn\n                    \/\/ \u5206\u6bcd\u7528s+1\u53ea\u662f\u4e3a\u4e86\u9632\u6b62\u5176\u4e3a\u96f6\uff0c\u5b9e\u9645\u51e0\u4e4e\u4e0d\u5f71\u54cd\u6570\u503c\n                    \/\/ \u51fa\u73b0\u9891\u7387\u8d8a\u9ad8\u7684\u8bcd\uff0c\u88ab\u820d\u5f03\u7684\u6982\u7387\u8d8a\u5927\n                    \/\/ \u4e8e\u662f\u9ad8\u9891\u8bcd\u88ab\u820d\u5f03\u7684\u6982\u7387\u4e3amax{0,1-ran} (?)\n                    next_random=next_random*(unsigned long long)25214903917+11;\n                    if(ran&lt;(next_random&amp;0xFFFF)\/(real)65536) continue;\n                    \/\/ \u5c06next_random\u7f29\u653e\u5230(0,1)\n                    \/\/ \u5982\u679c\u8fd9\u4e2a\u968f\u673a\u6570\u5927\u4e8eran(\u5373\u88ab\u820d\u5f03\u6982\u73871-ran)\uff0c\u5373\u88ab\u820d\u5f03\n                }\n                sen[sentence_length]=word;\n                sentence_length++;\n                if(sentence_length&gt;=MAX_SENTENCE_LENGTH) break;\n                \/\/ \u5982\u679c\u53e5\u5b50\u8fc7\u957f\uff0c\u5219\u53ea\u53d6\u524dMAX_SENTENCE_LENGTH\n            }\n            sentence_position=0;\n        }\n\n        \/\/ \u5b66\u4e60\u4e00\u4e2a\u8bcd\u53ca\u5176\u4e0a\u4e0b\u6587\n        if(feof(fi)||(word_count&gt;train_words\/num_threads)) {\n            word_count_actual += word_count-last_word_count;\n            local_iter--;   \/\/ \u8fd9\u6bb5\u7684\u610f\u601d\u5e94\u8be5\u662f\u91cd\u590d\u8fed\u4ee3\u5b66\u4e60\u4e00\u5b9a\u6b21\u6570\n            if(local_iter==0) break;\n            word_count=0;\n            last_word_count=0;\n            sentence_length=0;\n            fseek(fi,file_size\/(long long)num_threads*(long long)id, SEEK_SET);\n            continue;\n        }\n        word=sen[sentence_position];\n        \/\/ \u4ecesentence_position=0\u5f00\u59cb\u4e00\u4e2a\u4e00\u4e2a\u8bcd\u5b66\u4e60\n        if(word==-1) continue;      \/\/ ?\n        for(c=0;c&lt;layer1_size;c++) neu1[c]=0;       \/\/ \u521d\u59cb\u5316 \n        for(c=0;c&lt;layer1_size;c++) neu1e[c]=0;      \/\/ \u521d\u59cb\u5316 \n        next_random=next_random*(unsigned long long)25214903917+11;\n        b=next_random%window;\n        \/\/ b\u662f0\u5230window-1\u7684\u6570\n        \/\/ \u9009\u53d6\u7684\u4e0a\u4e0b\u6587\u662f(sentence_position-(window-b),sentence_postion+(window-b))\u7684\u53bb\u5fc3\u96c6\u5408\n        \/\/ -----------------------------------------------------------------------------------------\n        \/\/ \u4ee5\u4e0b\u90e8\u5206\u81ea\u5df1\u6572\u4e86\u4e00\u904d\uff0c\u770b\u4e86\u4e00\u904dReference\u63d0\u4f9b\u7684\u6ce8\u91ca\uff0c\u4f46\u662f\u8fd8\u662f\u4e0d\u592a\u61c2\uff0c\u8fd8\u9700\u8981\u8fb9\u8bfbpapaer\u8fb9\u505a\u7b14\u8bb0\n        \/\/ \u76ee\u524d\u627e\u5230\u7684\u6253\u7b97\u8bfb\u7684paper\uff1a\n        \/\/\n        \/\/ [1] Goldberg Y, Levy O. word2vec Explained: deriving Mikolov et al.'s negative-sampling \n        \/\/     word-embedding method[J]. arXiv preprint arXiv:1402.3722, 2014.\n        \/\/ [2] Mikolov T, Sutskever I, Chen K, et al. Distributed representations of words and \n        \/\/     phrases and their compositionality[C]\/\/Advances in neural information processing\n        \/\/     systems. 2013: 3111-3119.\n        \/\/ [3] Morin F, Bengio Y. Hierarchical Probabilistic Neural Network Language Model[C]\/\/\n        \/\/     Aistats. 2005, 5: 246-252.\n        \/\/ [4] Mikolov T, Chen K, Corrado G, et al. Efficient estimation of word representations \n        \/\/     in vector space[J]. arXiv preprint arXiv:1301.3781, 2013.\n        \/\/\n        \/\/ -----------------------------------------------------------------------------------------\n        if(cbow) {\n            \/\/ train the cbow architeture\n            \/\/ in-&gt;hidden\n            \/\/ CBOW\u6a21\u578b\uff0c\u6839\u636e\u4e0a\u4e0b\u6587\u5b66\u4e60\n            \/\/ \u9996\u5148\u83b7\u53d6word\u7684\u4e0a\u4e0b\u6587\uff0c\u52a0\u548c\u5230neu1\n            \/\/ \u7136\u540e\u6839\u636e\u662f\u5426\u91c7\u7528hierarchical softmax\u548cnegative sampling\u4f7f\u7528\u4e0d\u540c\u7684\u68af\u5ea6\u516c\u5f0f\n            \/\/ \u4f9d\u6b21\u6839\u636e\u4e24\u79cd\u68af\u5ea6\u516c\u5f0f\u66f4\u65b0\u8f85\u52a9\u5411\u91cfsyn1\uff0c\u7136\u540e\u518d\u66f4\u65b0word\u5411\u91cfsyn0\n            \/\/\n            \/\/ CBOW:                        Skip-Gram:\n            \/\/  input projection output        input   projection  output\n            \/\/  w(t-2)                                           w(t-2)\n            \/\/ w(t-1) \u2572                                    \u2197  w(t-1)\n            \/\/       \u2572 \u2572                                     \u2571  \u2197\n            \/\/         \u2198 \u2198                               \u2571  \u2571  \n            \/\/           [sum] \u2500\u2192 w(t)          w(t) \u2500\u2192  [     ]\n            \/\/         \u2197  \u2197                            \u2572  \u2572\n            \/\/       \u2571  \u2571                                    \u2572  \u2198\n            \/\/ w(t+1)  \u2571                                       \u2198  w(t+1)\n            \/\/   w(t+2)                                       w(t+2)\n            \/\/\n\n            cw=0;\n            for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                c=sentence_position-window+a;\n                if(c&lt;0) continue;\n                if(c&gt;=sentence_length) continue;\n                last_word=sen[c];\n                if(last_word==-1) continue;\n                for(c=0;c&lt;layer1_size;c++) neu1[c]+=syn0[c+last_word*layer1_size];\n                cw++;\n            }\n            if(cw) {\n                for(c=0;c&lt;layer1_size;c++) neu1[c]\/=cw;\n                if(hs) for(d=0;d&lt;vocab[word].codelen;d++) {\n                    f=0;\n                    l2=vocab[word].point[d]*layer1_size;\n                    \/\/ Propagate hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) f+=neu1[c]*syn1[c+l2];\n                    if(f&lt;=-MAX_EXP) continue;\n                    else if(f&gt;=MAX_EXP) continue;\n                    else f=expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))];\n                    \/\/ 'g' is the gradient multiplied by learning rate\n                    g=(1-vocab[word].code[d]-f)*alpha;\n                    \/\/ Propagate errors output -&gt; hidden \n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1[c+l2];\n                    \/\/ Learn weights hedden -&gt; output \n                    for(c=0;c&lt;layer1_size;c++) syn1[c+l2]+=g*neu1[c];\n                }\n                \/\/ Negative Sampling\n                if(negative&gt;0) for(d=0;d&lt;negative+1;d++) {\n                    if(d==0) {target=word; label=1;} else {\n                        next_random=next_random*(unsigned long long)25214903917+11;\n                        target=table[(next_random&gt;&gt;16)%table_size];\n                        if(target==0) target=next_random%(vocab_size-1)+1;\n                        if(target==word) continue;\n                        label=0;\n                    }\n                    l2=target*layer1_size;\n                    f=0;\n                    for(c=0;c&lt;layer1_size;c++) f+=neu1[c]*syn1neg[c+l2];\n                    if(f&gt;MAX_EXP) g=(label-1)*alpha;\n                    else if(f&lt;-MAX_EXP) g=(label-0)*alpha;\n                    else g=(label-expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))])*alpha;\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1neg[c+l2];\n                    for(c=0;c&lt;layer1_size;c++) syn1neg[c+l2]+=g*neu1[c];\n                }\n                \/\/ hidden -&gt; in\n                for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                    c=sentence_position-window+a;\n                    if(c&lt;0) continue;\n                    if(c&gt;=sentence_length) continue;\n                    last_word=sen[c];\n                    if(last_word==-1) continue;\n                    for(c=0;c&lt;layer1_size;c++) syn0[c+last_word*layer1_size]+=neu1e[c];\n                }\n            }\n        } else {\n            \/\/ train skip-gram\n            for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                c=sentence_position-window+a;\n                if(c&lt;0) continue;\n                if(c&gt;=sentence_length) continue;\n                last_word=sen[c];\n                if(last_word==-1) continue;\n                l1=last_word*layer1_size;\n                for(c=0;c&lt;layer1_size;c++) neu1e[c]=0;\n                \/\/ hierarchical softmax\n                if (hs) for (d=0;d&lt;vocab[word].codelen;d++) {\n                    f=0;\n                    l2=vocab[word].point[d]*layer1_size;\n                    \/\/ Propagate hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) f+=syn0[c+l1]*syn1[c+l2];\n                    if(f&lt;=-MAX_EXP) continue;\n                    else if(f&gt;=MAX_EXP) continue;\n                    else f=expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))];\n                    \/\/ 'g' is the gradient multiplied by the learning rate\n                    g=(1-vocab[word].code[d]-f)*alpha;\n                    \/\/ Propagate errors output -&gt; hidden\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1[c+l2];\n                    \/\/ Learn weights hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) syn1[c+l2]+=g*syn0[c+l1];\n                }\n                \/\/ Negative Sampling\n                if(negative&gt;0) for (d=0;d&lt;negative+1;d++) {\n                    if(d==0) {target=word; label=1;} \n                    else {\n                        next_random=next_random*(unsigned long long)25214903917+11;\n                        target=table[(next_random&gt;&gt;16)%table_size];\n                        if(target==0) target=next_random%(vocab_size-1)+1;\n                        if(target==word) continue;\n                        label=0;\n                    }\n                    l2=target*layer1_size;\n                    f=0;\n                    for(c=0;c&lt;layer1_size;c++) f+=syn0[c+l1]*syn1neg[c+l2];\n                    if(f&gt;MAX_EXP) g=(label-1)*alpha;\n                    else if(f&lt;-MAX_EXP) g=(label-0)*alpha;\n                    else g=(label-expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))])*alpha;\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1neg[c+l2];\n                    for(c=0;c&lt;layer1_size;c++) syn1neg[c+l2]+=g*syn0[c+l1];\n                }\n                \/\/ Learn weights input -&gt; hidden\n                for(c=0;c&lt;layer1_size;c++) syn0[c+l1]+=neu1e[c];\n            }\n        }\n        sentence_position++;\n        if(sentence_position&gt;=sentence_length) {\n            sentence_length=0;\n            continue;\n        }\n    }\n    fclose(fi);\n    free(neu1);\n    free(neu1e);\n    pthread_exit(NULL);\n}\n\n<\/code><\/pre>\n<p>[1] Goldberg Y, Levy O. word2vec Explained: deriving Mikolov et al.&#8217;s negative-sampling<br \/>\n    word-embedding method[J]. arXiv preprint arXiv:1402.3722, 2014.<\/p>\n","protected":false},"excerpt":{"rendered":"<blockquote><p>\n  Reference:<br \/>\n  https:\/\/github.com\/zhangchen-qinyinghua\/word2vector\/blob\/master\/word2vec_full_chinese_comment.c\n<\/p><\/blockquote>\n<pre><code class=\"c\">#include &lt;stdio.h&gt; \/\/ \u6587\u4ef6\u8f93\u5165\u8f93\u5165\u4f7f\u7528\u7684\u6807\u51c6\u5316\u51fd\u6570\n#include &lt;stdlib.h&gt; \n\/\/ C \u6807\u51c6\u51fd\u6570\u5e93\u7684\u5934\u6587\u4ef6\uff0c\u58f0\u660e\u4e86\u6570\u503c\u4e0e\u5b57\u7b26\u4e32\u8f6c\u6362\u51fd\u6570\uff0c\u4f2a\u968f\u673a\u6570\u751f\u6210\u51fd\u6570\uff0c\u52a8\u6001\u5185\u5b58\u5206\u914d\u51fd\u6570\uff0c\n\/\/ \u8fdb\u7a0b\u63a7\u5236\u51fd\u6570\u7b49\u516c\u5171\u51fd\u6570\u3002 \n#include &lt;string.h&gt;\n\/\/ \u6d89\u53ca\u5b57\u7b26\u4e32\u548c\u5927\u91cf\u5185\u5b58\u5904\u7406\u51fd\u6570\n#include &lt;math.h&gt;\n\/\/ \u63d0\u4f9b\u7528\u4e8e\u5e38\u7528\u9ad8\u7ea7\u6570\u5b66\u8fd0\u7b97\u7684\u8fd0\u7b97\u51fd\u6570\n#include &lt;pthread.h&gt;\n\/\/ POSIX\u7684\u7ebf\u7a0b\u6807\u51c6\uff0c\u5b9a\u4e49\u4e86\u521b\u5efa\u548c\u64cd\u7eb5\u7ebf\u7a0b\u7684\u4e00\u5957API\n\n#define MAX_STRING 100             \/\/ \u5b57\u7b26\u4e32\u6700\u5927\u5b57\u7b26\u6570\n#define EXP_TABLE_SIZE 1000        \/\/ sigmoid\u51fd\u6570\u8868\u683c\n#define MAX_EXP 6              \/\/ sigmoid\u51fd\u6570\u53ea\u8ba1\u7b97-6~6 \n#define MAX_SENTENCE_LENGTH 1000   \/\/ \u53e5\u5b50\u6700\u5927\u957f\u5ea6\uff0c\u4ee5\u6362\u884c\u7b26\u4e3a\u754c\n#define MAX_CODE_LENGTH 40     \/\/ \u54c8\u592b\u66fc\u7f16\u7801\u7684\u6700\u5927\u7801\u957f\n\nconst int vocab_hash_size = 30000000;\n\/\/ maximum 30*0.7=21M words in the vocabulary\n\/\/ \u5b9a\u4e49\u4e86\u5b57\u5178\u91cc\u8bcd\u7684\u6700\u5927\u6570\u76ee\n\ntypedef float real;\n\nstruct vocab_word {\n    long long cn;\n    \/\/ \u8bcd\u9891\u6570\n    int *point;\n    \/\/ point\u8868\u793a\u8fd9\u4e2a\u8bcd\u6c47\u5bf9\u5e94\u7684\u8f85\u52a9\u5411\u91cf\u5217\u7684index\u5e8f\u5217\uff0c\u5176\u957f\u5ea6\u662fcodelen\n    \/\/ \u8f85\u52a9\u5411\u91cf\u5b9a\u4e49\u5728syn1\u4e2d\n    char *word, *code, codelen;\n    \/\/ *word\u662f\u8bcd\u672c\u8eab\n    \/\/ *code\u662f\u8bcd\u5bf9\u5e94\u7684\u54c8\u592b\u66fc\u7f16\u7801\n    \/\/ codelen\u662fcode\u7684\u957f\u5ea6\n    \/\/ \u53c2\u89c1hierarchical softmax\u539f\u7406\n}\n\nchar train_file[MAX_STRING], output_file[MAX_STRING];\n\/\/ \u5b9a\u4e49\u4e86\u8f93\u5165\u8f93\u51fa\u6587\u4ef6\uff08\u6587\u4ef6\u540d\uff09\uff0c\u4ee5\u5b57\u7b26\u6570\u7ec4\u5f62\u5f0f\u50a8\u5b58\nchar save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];\n\/\/ \u5b9a\u4e49\u4e86\u5b58\u53d6\u8bcd\u5178\u548c\u8bfb\u8bcd\u5178\u6587\u4ef6\uff08\u6587\u4ef6\u540d\uff09\nstruct vocab_word *vocab;\n\/\/ \u5b9a\u4e49\u4e86\u52a8\u6001\u53d8\u5316\u7684\u8bcd\u5178\u7ed3\u6784\uff0c\u5b9a\u4e49\u89c1\u7b2c*\u884c\nint binary=0, cbow=1, debug_mode=2, window=5, min_count=5, num_threads=12, min_reduce=1;\n\/\/ \u5b9a\u4e49\u9ed8\u8ba4\u53c2\u6570\n\/\/ binary: \u662f\u5426\u4f7f\u7528\u4e8c\u8fdb\u5236\n\/\/ cbow: \u662f\u5426\u4f7f\u7528cbow\uff0c\u5426\u5219\u4f7f\u7528skip-gram\n\/\/ window: \u7a97\u957f\uff0c\u9ed8\u8ba4\u4e3a5\n\/\/ min_count: \u51fa\u73b0\u9891\u6b21\u5c0f\u4e8e\u4e00\u5b9a\u503c\u7684\u8bcd\u88ab\u820d\u5f03\n\/\/ min_reduce: \u53c2\u89c1ReduceVocab\nint *vocab_hash;\n\/\/ \u8bcd\u5178\u8bcd\u7684\u54c8\u5e0c\u503c\uff0c\u4e00\u4e2a\u6570\u7ec4\nlong long vocab_max_size=1000, vocab_size=0, layer1_size=100;\n\/\/ \u8bcd\u5178\u8bcd\u6570\u6700\u5927\u503c\u4e3a1000\uff0c\u4e4b\u540e\u4e0d\u591f\u7528\u518d\u52a0\uff0c\u5f53\u524d\u8bcd\u6570\u4e3a0\n\/\/ layer1_size\u6307\u7684\u5e94\u8be5\u662fembedding vector\u7684\u7ef4\u6570\u4e3a100\uff0c\u5373cbow\u7b2c\u4e00\u5c42\u7684\u8282\u70b9\u6570\nlong long train_words=0, word_count_actual=0, iter=5, file_size=0, classes=0;\n\/\/ \u8bcd\u9891\u7edf\u8ba1\n\/\/ train_words\u662f\u6240\u6709\u6709\u6548\u8bcd\u6c47\u7684\u9891\u7387\u4e4b\u548c?\n\/\/ word_count_actual ?\n\/\/ iter\u5e94\u8be5\u662f\u8fed\u4ee3\u6b21\u6570 ?\n\/\/ file_size\u5b9a\u4e49\u6587\u4ef6\u6700\u5927\u4f53\u79ef\uff0c\u7528\u4e8e\u5e76\u884c\u5316\u65f6\u8d1f\u8f7d\u5747\u8861\n\/\/ classes ?\nreal alpha=0.025, starting_alpha, sample=1e-3;\n\/\/ alpha\u662f\u5b66\u4e60\u7387learning rate\n\/\/ starting_alpha \u521d\u59cb\u5b66\u4e60\u7387\uff0c\u53ef\u6307\u5b9a\n\/\/ sample\u53c2\u89c1\u8bcd\u6c47\u8868\u521d\u59cb\u5316\u90e8\u5206\nreal *syn0, *syn1, *syn1neg, *expTable;\n\/\/ *syn0: word2vec\u7684\u6700\u7ec8\u7ed3\u679c\uff0c\u5927\u5c0f\u662fvector\u7ef4\u6570\u00d7\u8f93\u51fa\u8bcd\u6c47\u6570\u76ee\uff0c\u4ee5*point\u4f5c\u4e3aindex\n\/\/ *syn1: \u5168\u90e8\u8f85\u52a9\u53d8\u91cf\uff0c\u4e2a\u6570\u6bd4*syn0\u5c11\u4e00\u4e2a ?\n\/\/ *syn1neg: \u968f\u673a\u8d1f\u91c7\u6837\u5f97\u5230\u7684\u5411\u91cf\u8868\n\/\/ *expTable\u662f\u9884\u5148\u8ba1\u7b97\u7684sigmoid\u51fd\u6570\u503c\uff0c\u53c2\u89c1main\u51fd\u6570\u5e95\u90e8\nclock_t start;  \/\/ \u8ba1\u65f6\u5668\uff0c\u8003\u5bdf\u7a0b\u5e8f\u65f6\u95f4\u590d\u6742\u5ea6\n\nint hs=0, negative=5;  \/\/ \u5b9a\u4e49hierarchical softmax\u548cnegative sampling\u9ed8\u8ba4\u53c2\u6570\uff0c\u524d\u8005\u9ed8\u8ba4\u5173\n\/\/ negative\u4e0d\u4e3a\u96f6\u65f6\u8868\u793a\u6bcf\u4e00\u4e2aword\u548c\u4e0a\u4e0b\u6587\u8981\u8d1f\u91c7\u6837\u7684\u4e2a\u6570\nconst int table_size=1e8;\nint *table;\n\/\/ \u7528\u4e8e\u8d1f\u91c7\u6837\uff0c\u521d\u59cb\u5316\u8d1f\u91c7\u6837\u8f85\u52a9table\uff0c\u4fdd\u8bc1\u4e00\u4e2a\u5355\u8bcd\u88ab\u9009\u4e3a\u8d1f\u6837\u672c\u7684\u6982\u7387d1\u4e3a\n\/\/ (\u8bcd\u9891\u6570)^0.75\/\u6240\u6709\u8bcd[(\u8bcd\u9891\u6570)^0.75]\u4e4b\u548c\nvoid InitUnigramTable() {\n    int a,i;\n    double train_word_pow=0;\n    double d1, power=0.75;      \/\/ \u4e3a\u4ec0\u4e48\u9009\u62e90.75\u4f5c\u4e3a\u6307\u6570\u5c1a\u4e0d\u6e05\u695a\n    table=(int *)malloc(table_size * sizeof(int));\n    for (a=0;a&lt;vocab_size;a++) train_words_pow+=pow(vocab[a].cn, power);    \/\/ ?\n    i=0;\n    d1=pow(vocab[i].cn, power)\/train_word_pow;\n    for (a=0;a&lt;vocab_size;a++) {\n        table[a]=i;\n        if (a\/(double)table_size&gt;d1) {\n            i++;\n            d1+=pow(vocab[i].cn, power)\/train_word_pow;\n        }\n        if (i&gt;=vocab_size) i=vocab_size-1;\n    }\n}\n\n\/\/ Reads a single word from a file, assuming space+tab+EOL to be word boundaries\n\/\/ \u4ece\u6587\u4ef6\u4e2d\u8bfb\u53d6\u5355\u4e2a\u8bcd\uff0c\u4ee5\u7a7a\u683c\u3001\u5236\u8868\u7b26\u548cEOL(EOF\u548c'\\n')\u4f5c\u4e3a\u8fb9\u754c\n\/\/ char(13)\u662f\u56de\u8f66('\\r)\n\/\/ \u4e0d\u540c\u7684\u64cd\u4f5c\u7cfb\u7edf\u4e2d\u6362\u884c\u7b26\u4e0d\u540c\uff0cwin\u662f\\r\\n, unix\u662f\\n, mac\u662f\\r\uff0c\u9700\u7edf\u4e00\u8f6c\u6362\u4e3a'\\n'\u6362\u884c\n\/\/ \u4e0d\u8ba4\u4e3a'\\r'\u662f\u6709\u6548\u7b26\u53f7\n\/\/ \u5355\u4e2a\u6362\u884c\u7b26\u53f7'\\n'\u88ab\u8ba4\u4e3a\u662f\u6709\u6548\u7b26\u53f7\uff0c\u5e76\u88ab\u8bb0\u4e3a\u8bcd\"&lt;\/s&gt;\"\n\/\/ \u9047\u5230\u6362\u884c\u7b26\u4f5c\u4e3a\u7ed3\u5c3e\uff0c\u6362\u884c\u7b26\u5c06\u88ab\u56de\u9000\u5230\u8f93\u5165\u6d41\uff0c\u5f62\u6210\u4e00\u4e2a\u5355\u4e2a\u6362\u884c\u7b26\uff0c\u4e0b\u4e00\u6b21\u8bfb\u4e3a\"&lt;\/s&gt;\"\nvoid ReadWord(char *word, FILE *fin) {\n    int a=0, ch;\n    while (!feof(fin)) {\n        ch=fgetc(fin);\n        if(ch==13) continue;\n        if((ch==' ')||(ch=='\\t')||(ch=='\\n')) {\n            if (a&gt;0) {\n                if (ch=='\\n') ungetc(ch,fin); \/\/ \u6362\u884c\u7b26\u88ab\u56de\u7f29\u5230\u8f93\u5165\u6d41\u4e2d\n                break;\n            }\n            if(ch=='\\n') {\n                strcpy(word,(char*)\"&lt;\/s&gt;\");\n                return;\n            } else continue;\n        }\n        word[a]=ch;\n        a++;\n        if (a&gt;=MAX_STRING-1) a--;  \/\/ Truncate too long words \n        \/\/ \u5355\u4e2a\u8bcd\u6c47\u592a\u957f\u65f6\u9700\u8981\u622a\u65ad\n    }\n    word[a]=0;\n}\n\n\/\/ Return hash value of a word\n\/\/ \u8ba1\u7b97\u8bcd\u7684hash\u503c\nint GetWordHash(char *word) {\n    unsigned long long a, hash=0;\n    for (a=0;a&lt;strlen(word);a++) hash=hash*257+word[a];\n    hash=hash%vocab_hash_size;\n    return hash;\n}\n\n\/\/ Return position of a word in the vocabulary; if the word is not found, return -1\n\/\/ \u6839\u636ehash\u8868\u67e5\u627e\u8bcd\u5728\u8bcd\u5178\u91cc\u7684\u4f4d\u7f6e\nint SearchVocab(char *word) {\n    unsigned int hash=GetWordHash(word);\n    while(1) {\n        if (vocab_hash[hash]==-1) return -1;\n        if (!strcmp(word, vocab[vocab_hash[hash]].word) return vocab_hash[hash];\n        hash=(hash+1)%vocab_hash_size;\n    }\n    return -1;\n}\n\n\/\/ Reads a word and returns its index in the vocabulary\n\/\/ \u4ece\u8f93\u5165\u6587\u4ef6\u4e2d\u8bfb\u53d6\u4e00\u4e2a\u5355\u8bcd\uff0c\u8fd4\u56de\u5176\u5728\u8bcd\u5178\u4e2d\u7684\u4f4d\u7f6e\nint ReadWordIndex(FILE *fin) {\n    char word[MAX_STRING];\n    ReadWord(word, fin);\n    if (feof(fin)) return -1;\n    return SearchVocab(word);\n}\n\n\/\/ Adds a word to the vocabulary\nint AddWordToVocab(char *word) {\n    unsigned int hash, length=strlen(word)+1;\n    if (length&gt;MAX_STRING) length=MAX_STRING; \/\/ \u622a\u65ad\u8bcd\u4f7f\u5176\u957f\u5ea6\u4e0d\u8d85\u8fc7MAX_STRING\n    vocab[vocab_size].word=(char *)calloc(length, sizeof(char));\n    strcpy(vocab[vocab_size].word, word);\n    vocab[vocab_size].cn=0;\n    vocab_size++;\n    \/\/ Reallocate memory if needed\n    if (vocab_size+2&gt;=vocab_max_size) {\n        vocab_max_size+=1000;\n        vocab=(struct vocab_word *)realloc(vocab, vocab_max_size*sizeof(struct vocab_word));\n    }\n    hash=GetWordHash(word);     \/\/ \u83b7\u53d6\u8bcd\u7684hash\n    while (vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n    \/\/ \u5728hash\u8868\u4e2d\u7ebf\u6027\u63a2\u6d4b\uff0c\u5982\u679c\u6709\u4f4d\u7f6e\u4e3a-1(\u5373\u6ca1\u6709\u8bcd)\u5c31\u5c06\u5f53\u524d\u8bcd\u63d2\u5165\u5230\u8fd9\u4e2a\u4f4d\u7f6e\n    vocab_hash[hash]=vocab_size-1;\n    return vocab_size-1;\n}\n\n\/\/ Used later for sorting by word counts\n\/\/ \u6bd4\u8f83\u8bcd\u9891\nint VocabCompare(count void *a, const void *b) {\n    return ((struct vocab_word *)b)-&gt;cn - ((struct vocab_word *)a)-&gt;cn;\n}\n\n\/\/ Sorts the vocabulary by frequency using word counts\n\/\/ \u91cd\u6392\u8bcd\u5178\nvoid SortVocab() {\n    int a, size;\n    unsigned int hash;\n    \/\/ Sort the vocabulary and keep &lt;\/s&gt; at the first position\n    qsort(&amp;vocab[1], vocab_size-1, sizeof(struct vocab_word), VocabCompare);\n    \/\/ \u7528\u5feb\u901f\u6392\u5e8f\uff0c\u6309\u7167\u8bcd\u9891\u5bf9\u8bcd\u5178\u91cc\u7684\u8bcd\u8fdb\u884c\u6392\u5e8f\uff0c\u8bcd\u9891\u5927\u7684\u6392\u524d\u9762\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;       \/\/ \u91cd\u7f6ehash\u8868\u4e3a\u7a7a\n    size=vocab_size;\n    train_words=0;\n    for (a=0;a&lt;size;a++) {\n        \/\/ Words occuring less than min_count times will be discarded from the vocab\n        \/\/ \u8bcd\u9891\u5c0f\u4e8e\u67d0\u4e00\u4e2a\u9608\u503c\u7684\u8bcd\uff0c\u4e22\u5f03\n        if ((vocab[a].cn&lt;min_count) &amp;&amp; (a!=0)) {\n            vocab_size--;\n            free(vocab[a].word);\n        } else {\n            \/\/ Hash will be recomputed, as after the sorting it is not actual\n            \/\/ \u5269\u4e0b\u7684\u8bcd\u91cd\u65b0\u6392\u5217\u5728hash\u8868\u4e2d\n            hash=GetWordHash(vocab[a].word);\n            while(vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n            vocab_hash[hash]=a;\n            train_words+=vocab[a].cn;\n            \/\/ train_words\u662f\u8bcd\u5178\u4e2d\u6240\u6709\u6709\u6548\u8bcd\u7684\u8bcd\u9891\u4e4b\u548c\n        }\n    }\n    \/\/ \u7cbe\u7b80\u8bcd\u5178\u5360\u7528\u7684\u5185\u5b58\n    vocab=(struct vocab_word *)realloc(vocab,(vocab_size+1)*sizeof(struct vocab_word));\n    \/\/ Allocate memory for the binary tree construction\n    \/\/ \u4e3a\u54c8\u592b\u66fc\u6811\u5206\u914d\u5185\u5b58\n    for (a=0;a&lt;vocab_size;a++) {\n        vocab[a].code=(char *)calloc(MAX_CODE_LENGTH,sizeof(char));\n        vocab[a].point=(int *)calloc(MAX_CODE_LENGTH,sizeof(int));\n    }\n}\n\n\/\/ Reduces the vocabulary by removing infrequent tokens\nvoid ReduceVocab() {\n    int a,b=0;\n    unsigned int hash;\n    \/\/ \u5bf9\u4e8e\u8bcda\uff0c\u82e5\u8bcd\u9891\u5927\u4e8e\u9608\u503c\uff0c\u5219\u79fb\u52a8\u5230\u8bcd\u5178\u4f4d\u7f6eb\uff0c\u5426\u5219\u820d\u5f03\n    for (a=0;a&lt;vocab_size;a++) {\n        if (vocab[a].cn &gt; min_reduce) {\n            vocab[b].cn=vocab[a].cn;\n            vocab[b].word=vocab[a].word;\n            b++\n        } else free(vocab[a].word);\n        vocab_size=b;\n    }\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;\n    for (a=0;a&lt;vocab_size;a++) {\n        \/\/ Hash will be recomputed, as it is not actual\n        \/\/ \u91cd\u6784hash\u8868\n        hash=GetWordHash(vocab[a].word);\n        while(vocab_hash[hash]!=-1) hash=(hash+1)%vocab_hash_size;\n        vocab_hash[hash]=a;\n    }\n    fflush(stdout);\n    min_reduce++;       \/\/ \u6bcf\u6267\u884c\u4e00\u6b21\uff0c\u820d\u5f03\u9608\u503c+1 why?\n}\n\n\/\/ Create binary Huffman tree using the word counts\n\/\/ Frequent words will have short unique binary codes\n\/\/ \u6784\u9020\u54c8\u592b\u66fc\u6811\uff0c\u7ed9\u6bcf\u4e2a\u8bcd\u8d4b\u4e88\u4e00\u4e2a\u54c8\u592b\u66fc\u7f16\u7801\uff0c\u8bcd\u9891\u9ad8\u8005\u7801\u957f\u77ed\n\/\/ \u54c8\u592b\u66fc\u6811\u7684\u6784\u5efa\u4f7f\u7528\u8d2a\u5fc3\u6cd5\n\/*\nHuffman\u6811\uff1a\u6700\u4f18\u4e8c\u53c9\u6811\uff0cWPL=\u2211\u7801\u957fx\u6743\u503c \u6700\u4f4e\n\u8d2a\u5fc3\u6cd5\u6784\u9020Huffman\u6811\u7684\u8fc7\u7a0b\uff1a\n\u5bf9\u4e8e\u5df2\u77e5\u7684\u4e00\u7ec4\u53f6\u5b50\u8282\u70b9\uff0c\u6743\u503c\u5206\u522b\u4e3aW1,W2,...,Wn\uff0c\u7ea6\u5b9a\u6743\u503c\u5c0f\u7684\u4e3a\u5de6\u5b50\u6811\uff0c\u5927\u7684\u4e3a\u53f3\u5b50\u6811\n\n\uff081\uff09\u9996\u5148\u628an\u4e2a\u53f6\u5b50\u8282\u70b9\u770b\u6210n\u68f5\u6811\uff08\u4ec5\u6709\u4e00\u4e2a\u8282\u70b9\u7684\u4e8c\u53c9\u6811\uff09\uff0c\u628a\u5b83\u4eec\u770b\u505a\u4e00\u4e2a\u68ee\u6797\n\n    \u4f8b\uff1a   2 4 5 8\n\n\uff082\uff09\u5408\u5e76\u68ee\u6797\u4e2d\u6700\u5c0f\u548c\u6b21\u5c0f\u7684\u4e24\u68f5\u6811\uff0c\u8be5\u6811\u6839\u8282\u70b9\u7684\u6743\u503c\u4e3a\u4e24\u68f5\u5b50\u6811\u4e4b\u548c\uff0c\u6b64\u65f6\u68ee\u6797\u4e2d\u8fd8\u6709n-1\u68f5\u6811\n\n    i.    6    5   8\n         \/ \\\n        2   4\n\n\uff083\uff09\u91cd\u590d\u7b2c\uff082\uff09\u6b65\u76f4\u5230\u68ee\u6797\u4e2d\u53ea\u6709\u4e00\u68f5\u6811\u4e3a\u6b62\n\n    ii.      11        8\n             \/\\\n            5  6\n              \/ \\\n             2   4\n\n    iii.           19\n                  \/  \\\n                 8   11\n                     \/\\\n                    5  6\n                      \/ \\\n                     2   4\n    (done)\n\n*\/\nvoid CreateBinaryTree() {\n    long long a,b,i,min1i,min2i,pos1,pos2,point[MAX_CODE_LENGTH];\n    \/\/ min1i,min2i\u662f\u4e24\u4e2a\u6743\u503c\u6700\u5c0f\u7684\u8282\u70b9\uff0c\u6743\u503c\u4e3avocab[a].cn\u5373\u6bcf\u4e2a\u8bcd\u7684\u8bcd\u9891\n    char code[MAX_CODE_LENGTH];\n    long long *count=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u6bcf\u4e2a\u8bcd\u7684\u8bcd\u9891\n    long long *binary=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u6bcf\u4e2a\u8bcd\u7684Huffman\u7f16\u7801\uff0c\u4e3a\u4e86\u5bf9\u9f50\u7528\u4e86\u66f4\u5927\u7684\u957f\u5ea6(\u6700\u5927\u7801\u957f\u4e3aMAX_CODE_LENGTH=40)\n    long long *parent_node=(long long *)calloc(vocab_size*2+1,sizeof(long long));\n    \/\/ \u8bb0\u5f55\u6240\u6709\u5b50\u6811\u7684\u7236\u8282\u70b9\n    for (a=0;a&lt;vocab_size;a++) count[a]=vocab[a].cn;\n    for (a=vocab_size;a&lt;vocab_size*2;a++) count[a]=1e15;\n    \/\/ \u5269\u4e0b\u8282\u70b9\u8d4b\u6743\u4e00\u4e2a\u5927\u6811\uff0c\u65b9\u4fbf\u83b7\u53d6\u4e24\u4e2a\u6700\u5c0f\u7684\u6743\u503c\n    pos1=vocab_size-1;\n    pos2=vocab_size;\n    \/\/ Following algorithm constructs the Huffman tree by adding one node at a time\n    \/\/ \u6bcf\u6b21\u5408\u5e76\u4e24\u68f5\u6743\u503c\u6700\u5c0f\u7684\u5b50\u6811\uff0c\u76f4\u5230\u5408\u5e76vocab_size-1\u6b21\n    for (a=0;a&lt;vocab_size-1;a++) {\n        \/\/ First, find two smallest nodes 'min1,min2'\n        if (pos1&gt;=0) {\n            if (count[pos1]&lt;count[pos2]) {\n                min1i=pos1; pos1--;\n            } else {\n                min1i=pos2; pos2++;\n            }\n        } else {\n            min1i=pos2; pos2++;\n        }\n        if (pos1&gt;=0) {\n            if (count[pos1]&lt;count[pos2]) {\n                min2i=pos1; pos1--;\n            } else {\n                min1i=pos2; pos2++;\n            }\n\n        } else {\n            min2i=pos2; pos2++;\n        }\n        count[vocab_size+a]=count[min1i]+count[min2i];\n        parent_node[min1i]=vocab_size+a;\n        parent_node[min2i]=vocab_size+a;\n        binary[min2i]=1;\n        \/\/ binary[min1i]=0, \u5373\u5de6\u5b50\u6811\u7f16\u78010\uff0c\u53f3\u5b50\u6811\u7f16\u78011\n    }\n    \/\/ Now assign binary code to each vocabulary word \n    for (a=0;a&lt;vocab_size;a++) {\n        b=a;\n        i=0;\n        while(1) {\n            code[i]=binary[b];\n            point[i]=b;\n            i++;\n            b=parent_node[b];\n            if (b==vocab_size*2-2) break;\n        }\n        vocab[a].codelen=i;\n        vocab[a].point[0]=vocab_size-2;\n        for (b=0;b&lt;i;b++) {\n            vocab[a].code[i-b-1]=code[b];\n            vocab[a].point[i-b]=point[b]-vocab_size;\n        }\n    }\n    free(count);\n    free(binary);\n    free(parent_node);\n}\n\n\/\/ \u4ece\u8bad\u7ec3\u6587\u4ef6\u4e2d\u83b7\u53d6\u8bcd\u8bed\uff0c\u540c\u65f6\u8bfb\u53d6\u8bad\u7ec3\u6587\u4ef6\u5927\u5c0f\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u65f6\u8d1f\u8f7d\u5747\u8861\nvoid LearnVocabFromTrainFile() {\n    char word[MAX_STRING];\n    FILE *fin;\n    long long a,i;\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;   \/\/ \u521d\u59cb\u5316hash\u8868\n    fin=fopen(train_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"ERROR: training data file not found!\\n\");\n        exit(1);\n    }\n    vocab_size=0;\n    AddWordToVocab((char *)\"&lt;\/s&gt;\");\n    \/\/ \u9996\u5148\u52a0\u5165\u4e00\u4e2a\u7279\u6b8a\u8bcd\u6c47&lt;\/s&gt;, \u8fd9\u4e2a\u8bcd\u8868\u793a\u6362\u884c\u7b26\uff0c\u7528\u4e8e\u5206\u9694\u884c\u7684\u6807\u8bb0\n    while (1) {\n        ReadWord(word,fin);     \/\/ \u8bfb\u5165\u4e00\u4e2a\u8bcd\n        if(feof(fin)) break;\n        train_words++;          \/\/ train_words\u662f\u8bcd\u5178\u4e2d\u8bcd\u7684\u6570\u76ee\n        if ((debug_mode)&gt;1 &amp;&amp; (train_words%100000==0)) {\n            printf(\"%lldK%c\",train_words\/1000,13);\n            fflush(stdout);\n        }\n        i=SearchVocab(word);\n        if(i==-1) {\n            a=AddWordToVocab(word);\n            vocab[a].cn=1;\n        }\n        else vocab[i].cn++;\n        if (vocab_size&gt;vocab_hash_size*0.7) ReduceVocab();\n        \/\/ \u5982\u679c\u8bcd\u5178\u4e2d\u8bcd\u6c47\u6570\u76ee\u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\uff0c\u5219\u6e05\u7406\u6389\u4e00\u4e9b\u8bcd\u9891\u8f83\u5c0f\u7684\u8bcd\n    }\n    SortVocab();        \/\/ \u6392\u5e8f\n    if(debug_mode&gt;0) {\n        printf(\"Vocab size: %lld\\n\", vocab_size);\n        printf(\"Words in train file: %lld\\n\", train_words);\n    }\n    file_size=ftell(fin);\n    fclose(fin);\n}\n\n\/\/ \u5199\u5165\u8bcd\u6c47\u5230\u6587\u4ef6save_vocab_file\n\/\/ \u683c\u5f0f\uff1a\u8bcd\u6c47 \u8bcd\u9891\\n\nvoid SaveVocab() {\n    long long i;\n    FILE *fo=fopen(save_vocab_file,\"wb\");\n    for(i=0;i&lt;vocab_size;i++) fprintf(fo, \"%s %lld\\n\",vocab[i].word, vocab[i].cn);\n    fclose(fo);\n}\n\n\/\/ \u4ece\u683c\u5f0f\u4e3a\u201c\u8bcd\u6c47 \u8bcd\u9891\\n\u201d\u7684\u6587\u4ef6\u4e2d\u8bfb\u53d6\u8bcd\u6c47\nvoid ReadVocab() {\n    long long a,i=0;\n    char c;\n    char word[MAX_STRING];\n    FILE *fin=fopen(read_vocab_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"Vocabulary file not found\\n\");\n        exit(1);\n    }\n    for (a=0;a&lt;vocab_hash_size;a++) vocab_hash[a]=-1;\n    vocab_size=0;\n    while(1) {\n        ReadWord(word,fin);\n        if(feof(fin)) break;\n        a=AddWordToVocab(word);\n        fscanf(fin,\"%lld%c\", &amp;vocab[a].cn, &amp;c);\n         \/\/ \u8bfb\u5165\u8bcd\u9891\uff0c\u7531\u4e8efscanf\u548cReadWord\u7684\u95ee\u9898\uff0c\u9700\u8981\u9000\u6389\u8bcd\u9891\u540e\u9762\u8868\u793a\u8fb9\u754c\u7684\u5b57\u7b26\uff0c\u5426\u5219scanf\u4e0d\u4f1a\n         \/\/ \u8bfb\u5165\u8fd9\u4e9b\u7b26\u53f7\uff0cReadWord\u4f1a\u8bfb\u5165\n        i++;\n    }\n    SortVocab();        \/\/ \u91cd\u6392\u8bcd\u5178\n    if (debug_mode&gt;0) {\n        printf(\"Vocab size: %lld\\n\", vocab_size);\n        printf(\"Words in train file: %lld\\n\", train_words);\n    }\n    fin=fopen(train_file,\"rb\");\n    if (fin==NULL) {\n        printf(\"ERROR: training data file not found!\\n\");\n        exit(1);\n    }\n    fseek(fin,0,SEEK_END);\n    file_size=ftell(fin);\n    fclose(fin);\n}\n\nvoid InitNet() {\n    long long a,b;\n    a=posix_memalign((void **)&amp;syn0, 128, (long long)vocab_size*layer1_size*sizeof(real));\n    \/\/ \u5185\u5b58\u52a8\u6001\u5bf9\u9f50\n    \/\/ syn0\u5c31\u662fword2vec\u7684\u8ba1\u7b97\u7ed3\u679c\uff0c\u5373\u8bcd\u7684\u5411\u91cf\u5316\u8868\u793a\uff0c\u5411\u91cf\u7ef4\u6570\u4e3a100\uff0calignment=128\u8981\u6c42\u4e3a2\u7684\u6574\u6570\u6b21\u5e42\n    if (syn1==NULL) {\n        printf(\"Memory allocation failed\\n\");\n        exit(1);\n    }\n\n    \/\/hierarchical softmax \n    if (hs) {\n        a=posix_memalign((void **)&amp;syn1, 128, (long long)vocab_size*layer1_size*sizeof(real));\n        \/\/ syn1\u662f\u8f85\u52a9\u5411\u91cf\uff0c\u5927\u5c0f\u4e3alayer1_size\uff0c\u6570\u91cf\u6bd4syn0\u5c11\u4e00\u4e2a\uff0c\u5168\u96f6\u521d\u59cb\u5316\n        if (syn1==NULL) {\n            printf(\"Memory allocation failed\\n\");\n            exit(1);\n        }\n        for(b=0;b&lt;layer1_size;b++) {\n            for(a=0;a&lt;vocab_size;a++) syn1[a*layer1_size+b]=0;\n        }\n    }\n\n    \/\/ negative sampling \n    if (negative&gt;0) {\n        a=posix_memalign((void **)&amp;syn1neg,128, (long long)vocab_size*layer1_size*sizeof(real));\n        \/\/ \u6bcf\u4e2a\u8bcd\u4e00\u4e2a\u8d1f\u91c7\u6837\u5411\u91cf\n        if (syn1neg==NULL) {\n            printf(\"Memory allocation failed\\n\");\n            exit(1);\n        }\n        for(b=0;b&lt;layer1_size;b++) {\n            for(a=0;a&lt;vocab_size;a++) syn1neg[a*layer1_size+b]=0;\n        }\n    }\n\n    \/\/ \u6240\u6709\u8bcd\u7684\u5bf9\u5e94\u5411\u91cf\u521d\u59cb\u5316\uff0c\u968f\u673a\u6570\uff0c\u8303\u56f4\u5728(-0.5\/\u7ef4\u5ea6)~(0.5\/\u7ef4\u5ea6)\u4e0a\u5747\u5300\u5206\u5e03\n    for(b=0;b&lt;layer1_size;b++) {\n        for(a=0;a&lt;vocab_size;a++) {\n            syn0[a*layer1_size+b]=(rand()\/(real)RAND_MAX-0.5)\/layer1_size;\n        }\n    }\n\n    \/\/\u6784\u9020Huffman\u6811\n    CreateBinaryTree();\n}\n\n\/\/ word2vec\u6a21\u578b\u8bad\u7ec3\u7684\u4e3b\u8981\u8fc7\u7a0b\nvoid *TrainModelThread(void *id) {\n    long long a,b,d,word,last_word,sentence_length=0,sentence_position=0;\n    \/\/ word\u662f\u8bcd\u5728\u8bcd\u5178\u4e2d\u7684\u4f4d\u7f6e\n    long long word_count=0, last_word_count=0, sen[MAX_SENTENCE_LENGTH+1];\n    \/\/ word_count\u7edf\u8ba1\u5b66\u4e60\u4e86\u591a\u5c11\u8bcd\uff0c\u6bcf\u5b66\u4e00\u4e2a\u5c31\u52a0\u4e00\n    \/\/ last_word_count\u7684\u4f5c\u7528\u662f\u52a8\u6001\u53d8\u5316\u5b66\u4e60\u7387\n    \/\/ \u6bcf\u5b66\u4e6010000\u4e2a\u6837\u672c\uff0c\u5b66\u4e60\u7387\u5c31\u53d8\u5c0f\u4e00\u6b21\uff0c\u76f4\u5230\u53d8\u5c0f\u5230starting_alpha*0.0001\n    long long l1,l2,c,target,label,local_iter=iter;\n    \/\/ local_iter ?\n    unsigned long long next_random=(long long)id;\n    \/\/ \u968f\u673a\u6570\u4ee5\u516c\u5f0f next_random=next_random*(unsigned long long)*25214903917+11 \u4ea7\u751f\n    \/\/ 25214903917\u662f\u4e00\u4e2a\u5927\u8d28\u6570\n    real f,g;\n    clock_t now;\n    real *neu1=(real *)calloc(layer1_size, sizeof(real));\n    \/\/ neu1\u662f\u6240\u6709\u8f93\u5165\u7684\u4e0a\u4e0b\u6587\u5411\u91cf\u4e4b\u548c\n    \/\/ neu1e\u662f\u4e00\u8f6e\u8fed\u4ee3\u4e4b\u540e\u6bcf\u4e00\u4e2aword\u5bf9\u5e94\u7684\u5411\u91cf\u7684\u589e\u91cf\n    real *neu1e=(real *)calloc(layer1_size, sizeof(real));\n    FILE *f1=fopen(train_file,\"rb\");\n    fseek(f1,file_size\/(long long)num_threads*(long long)id, SEEK_SET();    \/\/ \u5e76\u884c\u8ba1\u7b97\n    while(1) {\n        \/\/ \u5b66\u4e60\u7387\u66f4\u65b0\uff0c\u65b0\u53e5\u5b50\u8bfb\u53d6\uff0c\u6839\u636e\u4e00\u4e2a\u8bcd\u53ca\u5176\u4e0a\u4e0b\u6587\u8fdb\u884c\u6a21\u578b\u5b66\u4e60\n\n        \/\/ \u5b66\u4e60\u7387\u66f4\u65b0 \n        if (word_count-last_word_count&gt;10000) {\n            \/\/ \u6bcf\u5b66\u4e6010000\u4e2a\u8bcd\uff0c\u5b66\u4e60\u7387\u5c31\u53d8\u5c0f\u4e00\u6b21\n            word_count_actual += word_count+last_word_count;\n            last_word_count=word_count;\n            if ((debug_mode)&gt;1)) {\n                now=clock();\n                printf(\"%cAlpha: %f Progress: %.2f%% Words\/thread\/sec: %.2fk  \",13,alpha,\n                  word_count_actual\/(real)(iter*train_words+1)*100,\n                  word_count_actual\/((real)(now-start+1)\/(real)CLOCKS_PER_SEC*1000));\n                fflush(stdout);\n            }\n            alpha=starting_alpha*(1-word_count_actual\/(real)(iter*train_words+1));\n            \/\/ \u5b66\u4e60\u7387\u53d8\u5316\u516c\u5f0f\n            if(alpha&lt;starting_alpha*0.0001) alpha=starting_alpha*0.0001;\n        }\n\n        \/\/ \u65b0\u53e5\u5b50\u8bfb\u53d6 \n        if(sentence_length==0) {\n            while(1) {\n                word=ReadWordIndex(fi);     \/\/ \u8bfb\u5165\u4e00\u4e2a\u65b0\u53e5\u5b50\n                if(feof(fi)) break;\n                if(word==-1) continue;\n                word_count++;   \/\/ \u8bfb\u5230\u4e00\u4e2a\u6709\u6548\u8bcd\uff0cword_count\u589e\u52a01\n                if(word==0) break;\n                \/\/ The subsampling randomly discards frequent words while \n                \/\/ keeping the ranking same\n                if(sample&gt;0) {\n                    \/\/ \u9ad8\u9891\u8bcd\u4e9a\u91c7\u6837\n                    real ran=(sqrt(vocab[word].cn\/(sample*train_words))+1)*\n                             (sample*train_words)\/vocab[word].cn;\n                    \/\/ ran\u662f\u9ad8\u9891\u8bcd\u4e0d\u88ab\u820d\u5f03\u7684\u6982\u7387, sample\u662f\u4e00\u4e2a\u53c2\u6570\n                    \/\/ ran=sqrt(f\/s)*s\/f=sqrt(s\/f)=sqrt(sample\/frequency)\n                    \/\/ \u5176\u4e2ds\u4e3a\u91c7\u6837\u6570sample*train_words, f=vocab[word].cn\n                    \/\/ \u5206\u6bcd\u7528s+1\u53ea\u662f\u4e3a\u4e86\u9632\u6b62\u5176\u4e3a\u96f6\uff0c\u5b9e\u9645\u51e0\u4e4e\u4e0d\u5f71\u54cd\u6570\u503c\n                    \/\/ \u51fa\u73b0\u9891\u7387\u8d8a\u9ad8\u7684\u8bcd\uff0c\u88ab\u820d\u5f03\u7684\u6982\u7387\u8d8a\u5927\n                    \/\/ \u4e8e\u662f\u9ad8\u9891\u8bcd\u88ab\u820d\u5f03\u7684\u6982\u7387\u4e3amax{0,1-ran} (?)\n                    next_random=next_random*(unsigned long long)25214903917+11;\n                    if(ran&lt;(next_random&amp;0xFFFF)\/(real)65536) continue;\n                    \/\/ \u5c06next_random\u7f29\u653e\u5230(0,1)\n                    \/\/ \u5982\u679c\u8fd9\u4e2a\u968f\u673a\u6570\u5927\u4e8eran(\u5373\u88ab\u820d\u5f03\u6982\u73871-ran)\uff0c\u5373\u88ab\u820d\u5f03\n                }\n                sen[sentence_length]=word;\n                sentence_length++;\n                if(sentence_length&gt;=MAX_SENTENCE_LENGTH) break;\n                \/\/ \u5982\u679c\u53e5\u5b50\u8fc7\u957f\uff0c\u5219\u53ea\u53d6\u524dMAX_SENTENCE_LENGTH\n            }\n            sentence_position=0;\n        }\n\n        \/\/ \u5b66\u4e60\u4e00\u4e2a\u8bcd\u53ca\u5176\u4e0a\u4e0b\u6587\n        if(feof(fi)||(word_count&gt;train_words\/num_threads)) {\n            word_count_actual += word_count-last_word_count;\n            local_iter--;   \/\/ \u8fd9\u6bb5\u7684\u610f\u601d\u5e94\u8be5\u662f\u91cd\u590d\u8fed\u4ee3\u5b66\u4e60\u4e00\u5b9a\u6b21\u6570\n            if(local_iter==0) break;\n            word_count=0;\n            last_word_count=0;\n            sentence_length=0;\n            fseek(fi,file_size\/(long long)num_threads*(long long)id, SEEK_SET);\n            continue;\n        }\n        word=sen[sentence_position];\n        \/\/ \u4ecesentence_position=0\u5f00\u59cb\u4e00\u4e2a\u4e00\u4e2a\u8bcd\u5b66\u4e60\n        if(word==-1) continue;      \/\/ ?\n        for(c=0;c&lt;layer1_size;c++) neu1[c]=0;       \/\/ \u521d\u59cb\u5316 \n        for(c=0;c&lt;layer1_size;c++) neu1e[c]=0;      \/\/ \u521d\u59cb\u5316 \n        next_random=next_random*(unsigned long long)25214903917+11;\n        b=next_random%window;\n        \/\/ b\u662f0\u5230window-1\u7684\u6570\n        \/\/ \u9009\u53d6\u7684\u4e0a\u4e0b\u6587\u662f(sentence_position-(window-b),sentence_postion+(window-b))\u7684\u53bb\u5fc3\u96c6\u5408\n        \/\/ -----------------------------------------------------------------------------------------\n        \/\/ \u4ee5\u4e0b\u90e8\u5206\u81ea\u5df1\u6572\u4e86\u4e00\u904d\uff0c\u770b\u4e86\u4e00\u904dReference\u63d0\u4f9b\u7684\u6ce8\u91ca\uff0c\u4f46\u662f\u8fd8\u662f\u4e0d\u592a\u61c2\uff0c\u8fd8\u9700\u8981\u8fb9\u8bfbpapaer\u8fb9\u505a\u7b14\u8bb0\n        \/\/ \u76ee\u524d\u627e\u5230\u7684\u6253\u7b97\u8bfb\u7684paper\uff1a\n        \/\/\n        \/\/ [1] Goldberg Y, Levy O. word2vec Explained: deriving Mikolov et al.'s negative-sampling \n        \/\/     word-embedding method[J]. arXiv preprint arXiv:1402.3722, 2014.\n        \/\/ [2] Mikolov T, Sutskever I, Chen K, et al. Distributed representations of words and \n        \/\/     phrases and their compositionality[C]\/\/Advances in neural information processing\n        \/\/     systems. 2013: 3111-3119.\n        \/\/ [3] Morin F, Bengio Y. Hierarchical Probabilistic Neural Network Language Model[C]\/\/\n        \/\/     Aistats. 2005, 5: 246-252.\n        \/\/ [4] Mikolov T, Chen K, Corrado G, et al. Efficient estimation of word representations \n        \/\/     in vector space[J]. arXiv preprint arXiv:1301.3781, 2013.\n        \/\/\n        \/\/ -----------------------------------------------------------------------------------------\n        if(cbow) {\n            \/\/ train the cbow architeture\n            \/\/ in-&gt;hidden\n            \/\/ CBOW\u6a21\u578b\uff0c\u6839\u636e\u4e0a\u4e0b\u6587\u5b66\u4e60\n            \/\/ \u9996\u5148\u83b7\u53d6word\u7684\u4e0a\u4e0b\u6587\uff0c\u52a0\u548c\u5230neu1\n            \/\/ \u7136\u540e\u6839\u636e\u662f\u5426\u91c7\u7528hierarchical softmax\u548cnegative sampling\u4f7f\u7528\u4e0d\u540c\u7684\u68af\u5ea6\u516c\u5f0f\n            \/\/ \u4f9d\u6b21\u6839\u636e\u4e24\u79cd\u68af\u5ea6\u516c\u5f0f\u66f4\u65b0\u8f85\u52a9\u5411\u91cfsyn1\uff0c\u7136\u540e\u518d\u66f4\u65b0word\u5411\u91cfsyn0\n            \/\/\n            \/\/ CBOW:                        Skip-Gram:\n            \/\/  input projection output        input   projection  output\n            \/\/  w(t-2)                                           w(t-2)\n            \/\/ w(t-1) \u2572                                    \u2197  w(t-1)\n            \/\/       \u2572 \u2572                                     \u2571  \u2197\n            \/\/         \u2198 \u2198                               \u2571  \u2571  \n            \/\/           [sum] \u2500\u2192 w(t)          w(t) \u2500\u2192  [     ]\n            \/\/         \u2197  \u2197                            \u2572  \u2572\n            \/\/       \u2571  \u2571                                    \u2572  \u2198\n            \/\/ w(t+1)  \u2571                                       \u2198  w(t+1)\n            \/\/   w(t+2)                                       w(t+2)\n            \/\/\n\n            cw=0;\n            for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                c=sentence_position-window+a;\n                if(c&lt;0) continue;\n                if(c&gt;=sentence_length) continue;\n                last_word=sen[c];\n                if(last_word==-1) continue;\n                for(c=0;c&lt;layer1_size;c++) neu1[c]+=syn0[c+last_word*layer1_size];\n                cw++;\n            }\n            if(cw) {\n                for(c=0;c&lt;layer1_size;c++) neu1[c]\/=cw;\n                if(hs) for(d=0;d&lt;vocab[word].codelen;d++) {\n                    f=0;\n                    l2=vocab[word].point[d]*layer1_size;\n                    \/\/ Propagate hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) f+=neu1[c]*syn1[c+l2];\n                    if(f&lt;=-MAX_EXP) continue;\n                    else if(f&gt;=MAX_EXP) continue;\n                    else f=expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))];\n                    \/\/ 'g' is the gradient multiplied by learning rate\n                    g=(1-vocab[word].code[d]-f)*alpha;\n                    \/\/ Propagate errors output -&gt; hidden \n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1[c+l2];\n                    \/\/ Learn weights hedden -&gt; output \n                    for(c=0;c&lt;layer1_size;c++) syn1[c+l2]+=g*neu1[c];\n                }\n                \/\/ Negative Sampling\n                if(negative&gt;0) for(d=0;d&lt;negative+1;d++) {\n                    if(d==0) {target=word; label=1;} else {\n                        next_random=next_random*(unsigned long long)25214903917+11;\n                        target=table[(next_random&gt;&gt;16)%table_size];\n                        if(target==0) target=next_random%(vocab_size-1)+1;\n                        if(target==word) continue;\n                        label=0;\n                    }\n                    l2=target*layer1_size;\n                    f=0;\n                    for(c=0;c&lt;layer1_size;c++) f+=neu1[c]*syn1neg[c+l2];\n                    if(f&gt;MAX_EXP) g=(label-1)*alpha;\n                    else if(f&lt;-MAX_EXP) g=(label-0)*alpha;\n                    else g=(label-expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))])*alpha;\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1neg[c+l2];\n                    for(c=0;c&lt;layer1_size;c++) syn1neg[c+l2]+=g*neu1[c];\n                }\n                \/\/ hidden -&gt; in\n                for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                    c=sentence_position-window+a;\n                    if(c&lt;0) continue;\n                    if(c&gt;=sentence_length) continue;\n                    last_word=sen[c];\n                    if(last_word==-1) continue;\n                    for(c=0;c&lt;layer1_size;c++) syn0[c+last_word*layer1_size]+=neu1e[c];\n                }\n            }\n        } else {\n            \/\/ train skip-gram\n            for(a=b;a&lt;window*2+1-b;a++) if(a!=window) {\n                c=sentence_position-window+a;\n                if(c&lt;0) continue;\n                if(c&gt;=sentence_length) continue;\n                last_word=sen[c];\n                if(last_word==-1) continue;\n                l1=last_word*layer1_size;\n                for(c=0;c&lt;layer1_size;c++) neu1e[c]=0;\n                \/\/ hierarchical softmax\n                if (hs) for (d=0;d&lt;vocab[word].codelen;d++) {\n                    f=0;\n                    l2=vocab[word].point[d]*layer1_size;\n                    \/\/ Propagate hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) f+=syn0[c+l1]*syn1[c+l2];\n                    if(f&lt;=-MAX_EXP) continue;\n                    else if(f&gt;=MAX_EXP) continue;\n                    else f=expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))];\n                    \/\/ 'g' is the gradient multiplied by the learning rate\n                    g=(1-vocab[word].code[d]-f)*alpha;\n                    \/\/ Propagate errors output -&gt; hidden\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1[c+l2];\n                    \/\/ Learn weights hidden -&gt; output\n                    for(c=0;c&lt;layer1_size;c++) syn1[c+l2]+=g*syn0[c+l1];\n                }\n                \/\/ Negative Sampling\n                if(negative&gt;0) for (d=0;d&lt;negative+1;d++) {\n                    if(d==0) {target=word; label=1;} \n                    else {\n                        next_random=next_random*(unsigned long long)25214903917+11;\n                        target=table[(next_random&gt;&gt;16)%table_size];\n                        if(target==0) target=next_random%(vocab_size-1)+1;\n                        if(target==word) continue;\n                        label=0;\n                    }\n                    l2=target*layer1_size;\n                    f=0;\n                    for(c=0;c&lt;layer1_size;c++) f+=syn0[c+l1]*syn1neg[c+l2];\n                    if(f&gt;MAX_EXP) g=(label-1)*alpha;\n                    else if(f&lt;-MAX_EXP) g=(label-0)*alpha;\n                    else g=(label-expTable[(int)((f+MAX_EXP)*(EXP_TABLE_SIZE\/MAX_EXP\/2))])*alpha;\n                    for(c=0;c&lt;layer1_size;c++) neu1e[c]+=g*syn1neg[c+l2];\n                    for(c=0;c&lt;layer1_size;c++) syn1neg[c+l2]+=g*syn0[c+l1];\n                }\n                \/\/ Learn weights input -&gt; hidden\n                for(c=0;c&lt;layer1_size;c++) syn0[c+l1]+=neu1e[c];\n            }\n        }\n        sentence_position++;\n        if(sentence_position&gt;=sentence_length) {\n            sentence_length=0;\n            continue;\n        }\n    }\n    fclose(fi);\n    free(neu1);\n    free(neu1e);\n    pthread_exit(NULL);\n}\n\n<\/code><\/pre>\n<p>[1] Goldberg Y, Levy O. word2vec Explained: deriving Mikolov et al.&#8217;s negative-sampling<br \/>\n    word-embedding method[J]. arXiv preprint arXiv:1402.3722, 2014.<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":[],"categories":[1],"tags":[],"_links":{"self":[{"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/posts\/287"}],"collection":[{"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=287"}],"version-history":[{"count":10,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/posts\/287\/revisions"}],"predecessor-version":[{"id":342,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=\/wp\/v2\/posts\/287\/revisions\/342"}],"wp:attachment":[{"href":"https:\/\/blog.messi.moe\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=287"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=287"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/blog.messi.moe\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=287"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}