首页 > 解决方案 > 如何在休息节点 api 中转换我的机器学习模型

问题描述

这是我的代码,如何将模型转换为节点休息 api。我已经创建了训练集并保存了模型。任何人都可以帮助我完成我尝试过但没有成功的 api 部分。

training = []

output = []

为我们的输出创建一个空数组

output_empty = [0] * len(classes)

训练集,每个句子的词袋

for doc in documents:

    # initialize our bag of words

    bag = []

    # list of tokenized words for the pattern

    pattern_words = doc[0]

    # stem each word

    pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]

    # create our bag of words array

    for w in words:

        bag.append(1) if w in pattern_words else bag.append(0)



    # output is a '0' for each tag and '1' for current tag

    output_row = list(output_empty)

    output_row[classes.index(doc[1])] = 1



    training.append([bag, output_row])



# shuffle our features and turn into np.array

random.shuffle(training)training = []

output = []

# create an empty array for our output

output_empty = [0] * len(classes)



# training set, bag of words for each sentence

for doc in documents:

    # initialize our bag of words

    bag = []

    # list of tokenized words for the pattern

    pattern_words = doc[0]

    # stem each word

    pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]

    # create our bag of words array

    for w in words:

        bag.append(1) if w in pattern_words else bag.append(0)



    # output is a '0' for each tag and '1' for current tag

    output_row = list(output_empty)

    output_row[classes.index(doc[1])] = 1



    training.append([bag, output_row])



# shuffle our features and turn into np.array

random.shuffle(training)

training = np.array(training)



# create train and test lists

train_x = list(training[:,0])

train_y = list(training[:,1])

training = np.array(training)



# create train and test lists

train_x = list(training[:,0])

train_y = list(training[:,1])

tf.reset_default_graph()

# Build neural network

net = tflearn.input_data(shape=[None, len(train_x[0])])

net = tflearn.fully_connected(net, 8)

net = tflearn.fully_connected(net, 8)

net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')

net = tflearn.regression(net)

定义模型并设置张量板

model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')

# Start training (apply gradient descent algorithm)

model.fit(train_x, train_y, n_epoch=4000, batch_size=8, show_metric=True)

保存模型

model.save('model.tflearn')

# save all of our data structures
import pickle
pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )

import pickle

data = pickle.load( open( "training_data", "rb" ) )

words = data['words']

classes = data['classes']

train_x = data['train_x']

train_y = data['train_y']



# import our chat-bot intents file

import json

with open('D:\\android\\ad.json') as json_data:

    intents = json.load(json_data)

def clean_up_sentence(sentence):
    

    # tokenize the pattern

    sentence_words = nltk.word_tokenize(sentence)

    # stem each word

    sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]

    return sentence_words



# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence

def bow(sentence, words, show_details=False):

    # tokenize the pattern

    sentence_words = clean_up_sentence(sentence)

    # bag of words

    bag = [0]*len(words)  

    for s in sentence_words:

        for i,w in enumerate(words):

            if w == s: 

                bag[i] = 1

                if show_details:

                    print ("found in bag: %s" % w)



    return(np.array(bag))

ERROR_THRESHOLD = 0.25

对输入进行分类

def classify(sentence):

    # generate probabilities from the model

    results = model.predict([bow(sentence, words)])[0]

    # filter out predictions below a threshold

    results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]

    # sort by strength of probability

    results.sort(key=lambda x: x[1], reverse=True)

    return_list = []

    for r in results:

        return_list.append((classes[r[0]], r[1]))

    # return tuple of intent and probability

    return return_list




def response(sentence, userID='123', show_details=False):

    results = classify(sentence)

    # if we have a classification then find the matching intent tag

    if results:

        # loop as long as there are matches to process

        while results:
            
            
                     
            for i in intents['intents']:
                

                # find a tag matching the first result

                if i['tag'] == results[0][0]:

                    # a random response from the intent

                    return print(random.choice(i['response']))

标签: machine-learning

解决方案


根据 Tflearn 文档,该库与 Tensorflow 保持兼容。Google 发布了 Tensorflow JS,它既可以作为基于浏览器的库,也可以作为 NodeJS Javascript 库。

可以将 Tensorflow 模型加载到 Tensorflow.JS,如链接中所述:

https://js.tensorflow.org/tutorials/import-saved-model.html

供参考;模型需要转成TF.JS格式

- 您需要先将 Tensorflow.JS 安装到您的 Python 环境中:

pip install tensorflowjs

-将现有的 TensorFlow 模型转换为 TensorFlow.js Web 格式

tensorflowjs_converter \
--input_format=tf_saved_model \
--output_node_names='Some/Model/Name' \
--saved_model_tags=serve \
/my/saved_model \
/my/web_model

在 NodeJS 环境中加载保存的模型:

const model = await tf.loadModel('file:///mypath/mymodel.json');

推荐阅读