训练集: {sentence: "张三是华为公司的员工", entities: [{"start": 0, "end": 2, "type": "person"}, {"start": 6, "end": 9, "type": "company"}]} {sentence: "今天是2021年10月1日", entities: [{"start": 3, "end": 15, "type": "date"}]}
import sklearn_crfsuite # 定义特征函数 def word2features(sentence, i): word = sentence[i] features = { 'word': word, 'is_capitalized': word[0].upper() == word[0], 'is_all_lower': word.lower() == word, # 添加更多的特征 } return features # 提取特征和标签 def extract_features_and_labels(sentences): X = [] y = [] for sentence in sentences: X_sentence = [] y_sentence = [] for i in range(len(sentence['sentence'])): X_sentence.append(word2features(sentence['sentence'], i)) y_sentence.append(sentence['entities'][i].get('type', 'O')) X.append(X_sentence) y.append(y_sentence) return X, y # 准备训练数据 train_sentences = [ {'sentence': ["张三", "是", "华为", "公司", "的", "员工"], 'entities': [{'start': 0, 'end': 2, 'type': 'person'}, {'start': 2, 'end': 4, 'type': 'company'}]}, {'sentence': ["今天", "是", "2021", "年", "10", "月", "1", "日"], 'entities': [{'start': 0, 'end': 8, 'type': 'date'}]} ] X_train, y_train = extract_features_and_labels(train_sentences) # 训练模型 model = sklearn_crfsuite.CRF() model.fit(X_train, y_train) # 预测实体 test_sentence = ["张三", "是", "华为", "公司", "的", "员工"] X_test = [word2features(test_sentence, i) for i in range(len(test_sentence))] y_pred = model.predict_single(X_test) # 打印预测结果 entities = [] for i in range(len(y_pred)): if y_pred[i] != 'O': entities.append({'start': i, 'end': i+1, 'type': y_pred[i]}) print(entities)
以上がナレッジグラフ構築におけるナレッジ抽出の問題の詳細内容です。詳細については、PHP 中国語 Web サイトの他の関連記事を参照してください。