有个人长的像洋葱,走着走着就哭了…….

基于ES的aliyun-knn插件,开发的以图搜图搜索引擎

基于ES的aliyun-knn插件,开发的以图搜图搜索引擎

本例是基于Elasticsearch6.7 版本, 安装了aliyun-knn插件;设计的图片向量特征为512维度.
如果自建ES,是无法使用aliyun-knn插件的,自建建议使用ES7.x版本,并按照fast-elasticsearch-vector-scoring插件(https://github.com/lior-k/fast-elasticsearch-vector-scoring/)

由于我的python水平有限,文中设计到的图片特征提取,使用了yongyuan.name的VGGNet库,再此表示感谢!

一、 ES设计

1.1 索引结构

# 创建一个图片索引
PUT images_v2
{
  "aliases": {
    "images": {}
  }, 
  "settings": {
    "index.codec": "proxima",
    "index.vector.algorithm": "hnsw",
    "index.number_of_replicas":1,
    "index.number_of_shards":3
  },
  "mappings": {
    "_doc": {
      "properties": {
        "feature": {
          "type": "proxima_vector",
          "dim": 512
        },
        "relation_id": {
          "type": "keyword"
        },
        "image_path": {
          "type": "keyword"
        }
      }
    }
  }
}

1.2 DSL语句

GET images/_search
{
  "query": {
    "hnsw": {
      "feature": {
        "vector": [255,....255],
        "size": 3,
        "ef": 1
      }
    }
  },
  "from": 0,
  "size": 20, 
  "sort": [
    {
      "_score": {
        "order": "desc"
      }
    }
  ], 
  "collapse": {
    "field": "relation_id"
  },
  "_source": {
    "includes": [
      "relation_id",
      "image_path"
    ]
  }
}

二、图片特征

extract_cnn_vgg16_keras.py

# -*- coding: utf-8 -*-
# Author: yongyuan.name
import numpy as np
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class VGGNet:
    def __init__(self):
        # weights: 'imagenet'
        # pooling: 'max' or 'avg'
        # input_shape: (width, height, 3), width and height should >= 48
        self.input_shape = (224, 224, 3)
        self.weight = 'imagenet'
        self.pooling = 'max'
        self.model = VGG16(weights = self.weight, input_shape = (self.input_shape[0], self.input_shape[1], self.input_shape[2]), pooling = self.pooling, include_top = False)
        self.model.predict(np.zeros((1, 224, 224 , 3)))
    '''
    Use vgg16 model to extract features
    Output normalized feature vector
    '''
    def extract_feat(self, img_path):
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        feat = self.model.predict(img)
        norm_feat = feat[0]/LA.norm(feat[0])
        return norm_feat
# 获取图片特征
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
file_path = "./demo.jpg"
queryVec = model.extract_feat(file_path)
feature = queryVec.tolist()

三、将图片特征写入ES

helper.py

import re
import urllib.request
def strip(path):
    """
    需要清洗的文件夹名字
    清洗掉Windows系统非法文件夹名字的字符串
    :param path:
    :return:
    """
    path = re.sub(r'[?\\*|“<>:/]', '', str(path))
    return path

def getfilename(url):
    """
    通过url获取最后的文件名
    :param url:
    :return:
    """
    filename = url.split('/')[-1]
    filename = strip(filename)
    return filename

def urllib_download(url, filename):
    """
    下载
    :param url:
    :param filename:
    :return:
    """
    return urllib.request.urlretrieve(url, filename)

train.py

# coding=utf-8
import mysql.connector
import os
from helper import urllib_download, getfilename
from elasticsearch5 import Elasticsearch, helpers
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
http_auth = ("elastic", "123455")
es = Elasticsearch("http://127.0.0.1:9200", http_auth=http_auth)
mydb = mysql.connector.connect(
    host="127.0.0.1",  # 数据库主机地址
    user="root",  # 数据库用户名
    passwd="123456",  # 数据库密码
    database="images"
)
mycursor = mydb.cursor()
imgae_path = "./images/"
def get_data(page=1):
    page_size = 20
    offset = (page - 1) * page_size
    sql = """
    SELECT id, relation_id, photo FROM  images  LIMIT {0},{1}
    """
    mycursor.execute(sql.format(offset, page_size))
    myresult = mycursor.fetchall()
    return myresult

def train_image_feature(myresult):
    indexName = "images"
    photo_path = "http://域名/{0}"
    actions = []
    for x in myresult:
            id = str(x[0])
    relation_id = x[1]
    # photo = x[2].decode(encoding="utf-8")
    photo = x[2]
    full_photo = photo_path.format(photo)
    filename = imgae_path + getfilename(full_photo)
    if not os.path.exists(filename):
        try:
            urllib_download(full_photo, filename)
        except BaseException as e:
            print("gid:{0}的图片{1}未能下载成功".format(gid, full_photo))
            continue
    if not os.path.exists(filename):
         continue
    try:
        feature = model.extract_feat(filename).tolist()
        action = {
        "_op_type": "index",
        "_index": indexName,
        "_type": "_doc",
        "_id": id,
        "_source": {
                            "relation_id": relation_id,
                            "feature": feature,
                            "image_path": photo
        }
        }
        actions.append(action)
    except BaseException as e:
        print("id:{0}的图片{1}未能获取到特征".format(id, full_photo))
        continue
    # print(actions)
    succeed_num = 0
    for ok, response in helpers.streaming_bulk(es, actions):
        if not ok:
            print(ok)
            print(response)
        else:
            succeed_num += 1
            print("本次更新了{0}条数据".format(succeed_num))
            es.indices.refresh(indexName)

page = 1
while True:
    print("当前第{0}页".format(page))
    myresult = get_data(page=page)
    if not myresult:
        print("没有获取到数据了,退出")
        break
    train_image_feature(myresult)
    page += 1

四、搜索图片

import requests
import json
import os
import time
from elasticsearch5 import Elasticsearch
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
http_auth = ("elastic", "123455")
es = Elasticsearch("http://127.0.0.1:9200", http_auth=http_auth)
#上传图片保存
upload_image_path = "./runtime/"
upload_image = request.files.get("image")
upload_image_type = upload_image.content_type.split('/')[-1]
file_name = str(time.time())[:10] + '.' + upload_image_type
file_path = upload_image_path + file_name
upload_image.save(file_path)
# 计算图片特征向量
queryVec = model.extract_feat(file_path)
feature = queryVec.tolist()
# 删除图片
os.remove(file_path)
# 根据特征向量去ES中搜索
body = {
    "query": {
        "hnsw": {
            "feature": {
                "vector": feature,
                "size": 5,
                "ef": 10
            }
        }
    },
    # "collapse": {
    # "field": "relation_id"
    # },
    "_source": {"includes": ["relation_id", "image_path"]},
    "from": 0,
    "size": 40
}
indexName = "images"
res = es.search(indexName, body=body)
# 返回的结果,最好根据自身情况,将得分低的过滤掉...经过测试, 得分在0.65及其以上的,比较符合要求

五、依赖的包

mysql_connector_repackaged
elasticsearch
Pillow
tensorflow
requests
pandas
Keras
numpy
继续阅读 »

基于ES的aliyun-knn插件,开发的以图搜图搜索引擎

本例是基于Elasticsearch6.7 版本, 安装了aliyun-knn插件;设计的图片向量特征为512维度.
如果自建ES,是无法使用aliyun-knn插件的,自建建议使用ES7.x版本,并按照fast-elasticsearch-vector-scoring插件(https://github.com/lior-k/fast-elasticsearch-vector-scoring/)

由于我的python水平有限,文中设计到的图片特征提取,使用了yongyuan.name的VGGNet库,再此表示感谢!

一、 ES设计

1.1 索引结构

# 创建一个图片索引
PUT images_v2
{
  "aliases": {
    "images": {}
  }, 
  "settings": {
    "index.codec": "proxima",
    "index.vector.algorithm": "hnsw",
    "index.number_of_replicas":1,
    "index.number_of_shards":3
  },
  "mappings": {
    "_doc": {
      "properties": {
        "feature": {
          "type": "proxima_vector",
          "dim": 512
        },
        "relation_id": {
          "type": "keyword"
        },
        "image_path": {
          "type": "keyword"
        }
      }
    }
  }
}

1.2 DSL语句

GET images/_search
{
  "query": {
    "hnsw": {
      "feature": {
        "vector": [255,....255],
        "size": 3,
        "ef": 1
      }
    }
  },
  "from": 0,
  "size": 20, 
  "sort": [
    {
      "_score": {
        "order": "desc"
      }
    }
  ], 
  "collapse": {
    "field": "relation_id"
  },
  "_source": {
    "includes": [
      "relation_id",
      "image_path"
    ]
  }
}

二、图片特征

extract_cnn_vgg16_keras.py

# -*- coding: utf-8 -*-
# Author: yongyuan.name
import numpy as np
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class VGGNet:
    def __init__(self):
        # weights: 'imagenet'
        # pooling: 'max' or 'avg'
        # input_shape: (width, height, 3), width and height should >= 48
        self.input_shape = (224, 224, 3)
        self.weight = 'imagenet'
        self.pooling = 'max'
        self.model = VGG16(weights = self.weight, input_shape = (self.input_shape[0], self.input_shape[1], self.input_shape[2]), pooling = self.pooling, include_top = False)
        self.model.predict(np.zeros((1, 224, 224 , 3)))
    '''
    Use vgg16 model to extract features
    Output normalized feature vector
    '''
    def extract_feat(self, img_path):
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        feat = self.model.predict(img)
        norm_feat = feat[0]/LA.norm(feat[0])
        return norm_feat
# 获取图片特征
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
file_path = "./demo.jpg"
queryVec = model.extract_feat(file_path)
feature = queryVec.tolist()

三、将图片特征写入ES

helper.py

import re
import urllib.request
def strip(path):
    """
    需要清洗的文件夹名字
    清洗掉Windows系统非法文件夹名字的字符串
    :param path:
    :return:
    """
    path = re.sub(r'[?\\*|“<>:/]', '', str(path))
    return path

def getfilename(url):
    """
    通过url获取最后的文件名
    :param url:
    :return:
    """
    filename = url.split('/')[-1]
    filename = strip(filename)
    return filename

def urllib_download(url, filename):
    """
    下载
    :param url:
    :param filename:
    :return:
    """
    return urllib.request.urlretrieve(url, filename)

train.py

# coding=utf-8
import mysql.connector
import os
from helper import urllib_download, getfilename
from elasticsearch5 import Elasticsearch, helpers
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
http_auth = ("elastic", "123455")
es = Elasticsearch("http://127.0.0.1:9200", http_auth=http_auth)
mydb = mysql.connector.connect(
    host="127.0.0.1",  # 数据库主机地址
    user="root",  # 数据库用户名
    passwd="123456",  # 数据库密码
    database="images"
)
mycursor = mydb.cursor()
imgae_path = "./images/"
def get_data(page=1):
    page_size = 20
    offset = (page - 1) * page_size
    sql = """
    SELECT id, relation_id, photo FROM  images  LIMIT {0},{1}
    """
    mycursor.execute(sql.format(offset, page_size))
    myresult = mycursor.fetchall()
    return myresult

def train_image_feature(myresult):
    indexName = "images"
    photo_path = "http://域名/{0}"
    actions = []
    for x in myresult:
            id = str(x[0])
    relation_id = x[1]
    # photo = x[2].decode(encoding="utf-8")
    photo = x[2]
    full_photo = photo_path.format(photo)
    filename = imgae_path + getfilename(full_photo)
    if not os.path.exists(filename):
        try:
            urllib_download(full_photo, filename)
        except BaseException as e:
            print("gid:{0}的图片{1}未能下载成功".format(gid, full_photo))
            continue
    if not os.path.exists(filename):
         continue
    try:
        feature = model.extract_feat(filename).tolist()
        action = {
        "_op_type": "index",
        "_index": indexName,
        "_type": "_doc",
        "_id": id,
        "_source": {
                            "relation_id": relation_id,
                            "feature": feature,
                            "image_path": photo
        }
        }
        actions.append(action)
    except BaseException as e:
        print("id:{0}的图片{1}未能获取到特征".format(id, full_photo))
        continue
    # print(actions)
    succeed_num = 0
    for ok, response in helpers.streaming_bulk(es, actions):
        if not ok:
            print(ok)
            print(response)
        else:
            succeed_num += 1
            print("本次更新了{0}条数据".format(succeed_num))
            es.indices.refresh(indexName)

page = 1
while True:
    print("当前第{0}页".format(page))
    myresult = get_data(page=page)
    if not myresult:
        print("没有获取到数据了,退出")
        break
    train_image_feature(myresult)
    page += 1

四、搜索图片

import requests
import json
import os
import time
from elasticsearch5 import Elasticsearch
from extract_cnn_vgg16_keras import VGGNet
model = VGGNet()
http_auth = ("elastic", "123455")
es = Elasticsearch("http://127.0.0.1:9200", http_auth=http_auth)
#上传图片保存
upload_image_path = "./runtime/"
upload_image = request.files.get("image")
upload_image_type = upload_image.content_type.split('/')[-1]
file_name = str(time.time())[:10] + '.' + upload_image_type
file_path = upload_image_path + file_name
upload_image.save(file_path)
# 计算图片特征向量
queryVec = model.extract_feat(file_path)
feature = queryVec.tolist()
# 删除图片
os.remove(file_path)
# 根据特征向量去ES中搜索
body = {
    "query": {
        "hnsw": {
            "feature": {
                "vector": feature,
                "size": 5,
                "ef": 10
            }
        }
    },
    # "collapse": {
    # "field": "relation_id"
    # },
    "_source": {"includes": ["relation_id", "image_path"]},
    "from": 0,
    "size": 40
}
indexName = "images"
res = es.search(indexName, body=body)
# 返回的结果,最好根据自身情况,将得分低的过滤掉...经过测试, 得分在0.65及其以上的,比较符合要求

五、依赖的包

mysql_connector_repackaged
elasticsearch
Pillow
tensorflow
requests
pandas
Keras
numpy
收起阅读 »

社区日报 第882期 (2020-03-15)

1.PYTHON FLASK CELERY + ELK。
http://t.cn/A6zakWIP
2.ELK Stack教程–有效地发现,分析和可视化您的数据。
http://t.cn/RTqOKYy
3.(自备梯子)人们实际编写的32+条有趣的代码注释。
http://t.cn/A6zSvmhc

编辑:至尊宝
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1.PYTHON FLASK CELERY + ELK。
http://t.cn/A6zakWIP
2.ELK Stack教程–有效地发现,分析和可视化您的数据。
http://t.cn/RTqOKYy
3.(自备梯子)人们实际编写的32+条有趣的代码注释。
http://t.cn/A6zSvmhc

编辑:至尊宝
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第881期 (2020-03-14)

1.php操作es例子

http://t.cn/A6z66a9g

2.lucene8.2.0底层架构-ByteBlockPool结构分析

http://t.cn/A6z66a9d

3.es中search_as_you_type和Context Suggester字段区别

http://t.cn/A6z66a9r

继续阅读 »

1.php操作es例子

http://t.cn/A6z66a9g

2.lucene8.2.0底层架构-ByteBlockPool结构分析

http://t.cn/A6z66a9d

3.es中search_as_you_type和Context Suggester字段区别

http://t.cn/A6z66a9r

收起阅读 »

社区日报 第880期 (2020-03-13)

1、Elasticsearch快照指南(梯子)
http://t.cn/A6zI6iOI
2、使用Spark和ES-Hadoop在Elastic Search上进行机器学习(梯子)
http://t.cn/A6zI6axs
3、Elasticsearch中的地理空间查询(梯子)
http://t.cn/A6zI6Kwp

编辑:铭毅天下
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1、Elasticsearch快照指南(梯子)
http://t.cn/A6zI6iOI
2、使用Spark和ES-Hadoop在Elastic Search上进行机器学习(梯子)
http://t.cn/A6zI6axs
3、Elasticsearch中的地理空间查询(梯子)
http://t.cn/A6zI6Kwp

编辑:铭毅天下
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第879期 (2020-03-12)

1.在Docker里运行Filebeat
http://t.cn/A6z5WgQt
2.Elasticsearch 平滑下线节点实践指南
http://t.cn/A6z5WsPL
3.如何使用Elasticsearch实现对动态字段的搜索
http://t.cn/A6z5lhdO

编辑:金桥
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1.在Docker里运行Filebeat
http://t.cn/A6z5WgQt
2.Elasticsearch 平滑下线节点实践指南
http://t.cn/A6z5WsPL
3.如何使用Elasticsearch实现对动态字段的搜索
http://t.cn/A6z5lhdO

编辑:金桥
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第878期 (2020-03-11)

1、95后运维小哥20天+通过 Elastic 认证考试经验分享
http://t.cn/A6zbKLpy
2、腾讯健康码16亿亮码背后的 Elasticsearch 系统调优实践
http://t.cn/A6zbKcx2
3、Elasticsearch 应用场景之 cross_fields
http://t.cn/A67FEMGY
 
编辑:江水
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
 
继续阅读 »
1、95后运维小哥20天+通过 Elastic 认证考试经验分享
http://t.cn/A6zbKLpy
2、腾讯健康码16亿亮码背后的 Elasticsearch 系统调优实践
http://t.cn/A6zbKcx2
3、Elasticsearch 应用场景之 cross_fields
http://t.cn/A67FEMGY
 
编辑:江水
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
  收起阅读 »

一种处理Elasticsearch对象数组类型的方式

目前情况

Elasticsearch中处理对象数组有两种格式array和nested,但这两种都有一定的不足。
以下面的文档为例:

{
  "user": [
    {
      "first": "John",
      "last": "Smith"
    },
    {
      "first": "Alice",
      "last": "White"
    }
  ]
}

如果在mapping中以array存储,那么实际存储为:

user.first:["John","Alice"]
user.last:["Smith","White"]

如果以must的方式查询user.first:Johnuser.last:White,那么这篇文档也会命中,这不是我们期望的。

如果在mapping中以array存储,Elasticsearch将每个对象视为一个doc,这例子会存储3个doc,会严重影响ES写入和查询的效率。

Flatten格式

我想到的存储方式很简单,就是将对象数组打平保存为一个keyword类型的字符串数组,故起名Flatten格式。 以上面文档为例,数组对象需要转换为下面的格式

"user.flatten": [
    "first:John",
    "last:Smith",
    "first:John&last:Smith",
    "first:Alice",
    "last:White",
    "first:Alice&last:White"
  ]

这样以must的方式查询user.first:Johnuser.last:White,可以转换为term查询first:John&last:White,并不会命中文档。
同时,这种方式还是保存1个doc,避免了nested的缺点。

对于flatten格式有几点说明

user.flatten数组的大小

如果user对象个数为M,user属性个数为N,那么其数组大小为(2^N-1)*M

对象为空怎么处理

建议以null方式保存,例如:

    {
              "first": "John",
             "last": null
    }

转换后的格式

    [
        "first:John",
        "last:null",
        "first:John&last:null",
    ]

保存和查询对于对象属性的处理顺序要保持一致

上述例子都是按first&last顺序存储的,那么以must的方式查询user.first:Johnuser.last:White也要以first:John&last:White方式查询,不能用last:White&first:John

不足

  • 需要自己编码将JSON对象转换为字符串数组
  • 需要自己编码转换查询语句
  • 只支持term查询
继续阅读 »

目前情况

Elasticsearch中处理对象数组有两种格式array和nested,但这两种都有一定的不足。
以下面的文档为例:

{
  "user": [
    {
      "first": "John",
      "last": "Smith"
    },
    {
      "first": "Alice",
      "last": "White"
    }
  ]
}

如果在mapping中以array存储,那么实际存储为:

user.first:["John","Alice"]
user.last:["Smith","White"]

如果以must的方式查询user.first:Johnuser.last:White,那么这篇文档也会命中,这不是我们期望的。

如果在mapping中以array存储,Elasticsearch将每个对象视为一个doc,这例子会存储3个doc,会严重影响ES写入和查询的效率。

Flatten格式

我想到的存储方式很简单,就是将对象数组打平保存为一个keyword类型的字符串数组,故起名Flatten格式。 以上面文档为例,数组对象需要转换为下面的格式

"user.flatten": [
    "first:John",
    "last:Smith",
    "first:John&last:Smith",
    "first:Alice",
    "last:White",
    "first:Alice&last:White"
  ]

这样以must的方式查询user.first:Johnuser.last:White,可以转换为term查询first:John&last:White,并不会命中文档。
同时,这种方式还是保存1个doc,避免了nested的缺点。

对于flatten格式有几点说明

user.flatten数组的大小

如果user对象个数为M,user属性个数为N,那么其数组大小为(2^N-1)*M

对象为空怎么处理

建议以null方式保存,例如:

    {
              "first": "John",
             "last": null
    }

转换后的格式

    [
        "first:John",
        "last:null",
        "first:John&last:null",
    ]

保存和查询对于对象属性的处理顺序要保持一致

上述例子都是按first&last顺序存储的,那么以must的方式查询user.first:Johnuser.last:White也要以first:John&last:White方式查询,不能用last:White&first:John

不足

  • 需要自己编码将JSON对象转换为字符串数组
  • 需要自己编码转换查询语句
  • 只支持term查询
收起阅读 »

社区日报 第877期 (2020-03-10)

1、Kafka连接Elasticsearch实战。
http://t.cn/A673xv8b
2、打开ElasticSearch、kibana、logstash的正确方式。
http://t.cn/A6z2U2px
3、在docker环境中部署Elasticsearch集群。
http://t.cn/A6z2UyIs

编辑:叮咚光军
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
继续阅读 »
1、Kafka连接Elasticsearch实战。
http://t.cn/A673xv8b
2、打开ElasticSearch、kibana、logstash的正确方式。
http://t.cn/A6z2U2px
3、在docker环境中部署Elasticsearch集群。
http://t.cn/A6z2UyIs

编辑:叮咚光军
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub 收起阅读 »

社区日报 第876期 (2020-03-09)

1、Elasticsearch OOM调整优化
http://t.cn/A67sslcO
2、记一次ElasticSearch 集群灾难恢复
http://t.cn/EAlk7Et
3、为什么开发人员应该关心es的cluster state?
http://t.cn/A6zvh4Yk

编辑:cyberdak
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1、Elasticsearch OOM调整优化
http://t.cn/A67sslcO
2、记一次ElasticSearch 集群灾难恢复
http://t.cn/EAlk7Et
3、为什么开发人员应该关心es的cluster state?
http://t.cn/A6zvh4Yk

编辑:cyberdak
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第875期 (2020-03-08)

1.Elasticsearch和NodeJS教程。
http://t.cn/A67DPpga
2.如何使用Node,Vue和ElasticSearch构建实时搜索引擎。
http://t.cn/A67DhWIj
3.(自备梯子)了解为什么以太坊存在,并且您会明白为什么这很重要。
http://t.cn/A67D7Z3o

编辑:至尊宝
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1.Elasticsearch和NodeJS教程。
http://t.cn/A67DPpga
2.如何使用Node,Vue和ElasticSearch构建实时搜索引擎。
http://t.cn/A67DhWIj
3.(自备梯子)了解为什么以太坊存在,并且您会明白为什么这很重要。
http://t.cn/A67D7Z3o

编辑:至尊宝
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第874期 (2020-03-07)

1.使用logstash转换过滤器丰富数据 http://t.cn/A67djezW

2.es read-only问题复盘 http://t.cn/A67djezT

3.一款kafka连接es的工具 http://t.cn/A67djezn

继续阅读 »

1.使用logstash转换过滤器丰富数据 http://t.cn/A67djezW

2.es read-only问题复盘 http://t.cn/A67djezT

3.一款kafka连接es的工具 http://t.cn/A67djezn

收起阅读 »

Elasticsearch 7.6 利用机器学习来检测文本语言

Elasticsearch 新近发布的 7.6 版本里面包含了很多激动人心的功能,而最让我感兴趣的是利用机器学习来自动检测语言的功能。

功能初探

检测文本语言本身不是什么稀奇事,之前做爬虫的时候,就做过对网页正文进行语言的检测,有很多成熟的方案,而最好的就属 Google Chrome 团队开源的 CLD 系列(全名:Compact Language Detector)了,能够检测多达 80 种各种语言,我用过CLD2,是基于 C++ 贝叶斯分类器实现的,而 CLD3 则是基于神经网络实现的,无疑更加准确,这次 Elasticsearch 将这个非常小的功能也直接集成到了默认的发行包里面,对于使用者来说可以说是带来很大的方便。

多语言的痛点

相信很多朋友,在实际的业务场景中,对碰到过一个字段同时存在多个语种的文本内容的情况,尤其是出海的产品,比如类似大众点评的 APP 吧,一个餐馆下面,来自七洲五湖四海的朋友都来品尝过了,自然要留下点评语是不,德国的朋友使用的是德语,法国的朋友使用的是法语,广州的朋友用的是粤语,那对于开发这个 APP 的后台工程师可就犯难了,如果这些评论都存在一个字段里面,就不好设置一个统一的分词器,因为不同的语言他们的切分规则肯定是不一样的,最简单的例子,比如中文和英文,设置的分词不对,查询结果就会不精准。

相信也有很多人用过这样的解决方案,既然一个字段搞不定,那就把这个字段复制几份,英文字段存一份,中文字段存一份,用 MultiField 来做,这样虽然可以解决一部分问题,但是同样会带来容量和资源的浪费,和查询时候具体该选择哪个字段来参与查询的挑战。

而利用 7.6 的这个新功能,可以在创建索引的时候,可以自动的根据内容进行推理,从而影响索引文档的构成,进而做到特定的文本进特定的字段,从而提升查询体验和性能,关于这个功能,Elastic 官网这里也有一篇博客2,提供了详细的例子。

实战上手

看上去不错,但是鲁迅说过,网上得来终觉浅,觉知此事要躬行,来, 今天一起跑一遍看看具体怎么个用法。

功能剖析

首先,这个功能叫 Language identification,是机器学习的一个 Feature,但是不能单独使用,要结合 Ingest Node 的一个 inference ingest processor 来使用,Ingest processor 是在 Elasticsearch 里面运行的数据预处理器,部分功能类似于 Logstash 的数据解析,对于简单数据操作场景,完全可以替代掉 Logstash,简化部署架构。

Elasticsearch 在 7.6 的包里面,默认打包了提前训练好的机器学习模型,就是 Language identification 需要调用的语言检测模型,名称是固定的 lang_ident_model_1,这也是 Elasticsearch 自带的第一个模型包,大家了解一下就好。

那这个模型包在什么位置呢,我们来解刨一下:

$unzip /usr/share/elasticsearch/modules/x-pack-ml/x-pack-ml-7.6.0.jar 
$/org/elasticsearch/xpack/ml/inference$ tree
.
|-- ingest
|   |-- InferenceProcessor$Factory.class
|   `-- InferenceProcessor.class
|-- loadingservice
|   |-- LocalModel$1.class
|   |-- LocalModel.class
|   |-- Model.class
|   `-- ModelLoadingService.class
`-- persistence
    |-- InferenceInternalIndex.class
    |-- TrainedModelDefinitionDoc$1.class
    |-- TrainedModelDefinitionDoc$Builder.class
    |-- TrainedModelDefinitionDoc.class
    |-- TrainedModelProvider.class
    `-- lang_ident_model_1.json

3 directories, 12 files

可以看到,在 persistence 目录就有这个模型包,是 json 格式的,里面有个压缩的二进制编码后的字段。

Jietu20200306-234807.png

查看模型信息

我们还可以通过新的 API 来获取这个模型信息,以后模型多了之后会比较有用:

GET _ml/inference/lang_ident_model_1
{
  "count" : 1,
  "trained_model_configs" : [
    {
      "model_id" : "lang_ident_model_1",
      "created_by" : "_xpack",
      "version" : "7.6.0",
      "description" : "Model used for identifying language from arbitrary input text.",
      "create_time" : 1575548914594,
      "tags" : [
        "lang_ident",
        "prepackaged"
      ],
      "input" : {
        "field_names" : [
          "text"
        ]
      },
      "estimated_heap_memory_usage_bytes" : 1053992,
      "estimated_operations" : 39629,
      "license_level" : "basic"
    }
  ]
}

Ingest Pipeline 模拟测试

好了,基本的了解就到这里了,我们开始动手吧,既然要和 Ingest 结合使用,自然免不了要定义 Ingest Pipeline,也就是说定一个解析规则,索引的时候会调用这个规则来处理输入的索引文档。Ingest Pipeline 的调试是个问题,好在Ingest 提供了模拟调用的方法,我们测试一下:


POST _ingest/pipeline/_simulate
{
   "pipeline":{
      "processors":[
         {
            "inference":{
               "model_id":"lang_ident_model_1", 
               "inference_config":{
                  "classification":{
                     "num_top_classes":5 
                  }
               },
               "field_mappings":{

               }
            }
         }
      ]
   },
   "docs":[
      {
         "_source":{ 
            "text":"新冠病毒让你在家好好带着,你服不服"
         }
      }
   ]
}

上面是借助 Ingest 的推理 Process 来模拟调用这个机器学习模型进行文本判断的方法,第一部分是设置 processor 的定义,设置了一个 inference processor,也就是要进行语言模型的检测,第二部分 docs 则是输入了一个 json 文档,作为测试的输入源,运行结果如下:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "text" : "新冠病毒让你在家好好带着,你服不服",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.9999872511022145,
                  "class_score" : 0.9999872511022145
                },
                {
                  "class_name" : "ja",
                  "class_probability" : 1.061491174235718E-5,
                  "class_score" : 1.061491174235718E-5
                },
                {
                  "class_name" : "hy",
                  "class_probability" : 6.304673023324264E-7,
                  "class_score" : 6.304673023324264E-7
                },
                {
                  "class_name" : "ta",
                  "class_probability" : 4.1374037676410867E-7,
                  "class_score" : 4.1374037676410867E-7
                },
                {
                  "class_name" : "te",
                  "class_probability" : 2.0709260170937159E-7,
                  "class_score" : 2.0709260170937159E-7
                }
              ],
              "predicted_value" : "zh",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T15:58:44.783736Z"
        }
      }
    }
  ]
}

可以看到,第一条返回结果,zh 表示中文语言类型,可能性为 0.9999872511022145,基本上无限接近肯定了,这个是中文文本,而第二位和剩下的就明显得分比较低了,如果你看到是他们的得分开头是 1.x 和 6.x 等,是不是觉得,不对啊,后面的得分怎么反而大一些,哈哈,你仔细看会发现它后面其实还有 -E 啥的尾巴呢,这个是科学计数法,其实数值远远小于 0。

创建一个 Pipeline

简单模拟倒是证明这个功能 work 了,那具体怎么使用,一起看看吧。

首先创建一个 Pipeline:


PUT _ingest/pipeline/lang_detect_add_tag
{
  "description": "检测文本,添加语种标签",
  "processors": [
    {
      "inference": {
        "model_id": "lang_ident_model_1",
        "inference_config": {
          "classification": {
            "num_top_classes": 2
          }
        },
        "field_mappings": {
            "contents": "text"
        }
      }
    },
    {
      "set": {
        "field": "tag",
        "value": "{{ml.inference.predicted_value}}"
      }
    }
  ]
}

可以看到,我们定义了一个 ID 为 lang_detect_add_tag 的 Ingest Pipeline,并且我们设置了这个推理模型的参数,只返回 2 个分类结果,和设置了 content 字段作为检测对象。同时,我们还定义了一个新的 set processor,这个的意思是设置一个名为 tag 的字段,它的值是来自于一个其它的字段的变量引用,也就是把检测到的文本对应的语种存成一个标签字段。

测试这个 Pipeline

这个 Pipeline 创建完之后,我们同样可以对这个 Pipeline 进行模拟测试,模拟的好处是不会实际创建索引,方便调试。

POST /_ingest/pipeline/lang_detect_add_tag/_simulate
{
  "docs": [
    {
      "_index": "index",
      "_id": "id",
      "_source": {
        "contents": "巴林境内新型冠状病毒肺炎确诊病例累计达56例"
      }
    },
    {
      "_index": "index",
      "_id": "id",
      "_source": {
        "contents": "Watch live: WHO gives a coronavirus update as global cases top 100,000"
      }
    }
  ]
}

返回结果:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "index",
        "_type" : "_doc",
        "_id" : "id",
        "_source" : {
          "tag" : "zh",
          "contents" : "巴林境内新型冠状病毒肺炎确诊病例累计达56例",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.999812378112116,
                  "class_score" : 0.999812378112116
                },
                {
                  "class_name" : "ja",
                  "class_probability" : 1.8175264877915687E-4,
                  "class_score" : 1.8175264877915687E-4
                }
              ],
              "predicted_value" : "zh",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:21:26.981249Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "index",
        "_type" : "_doc",
        "_id" : "id",
        "_source" : {
          "tag" : "en",
          "contents" : "Watch live: WHO gives a coronavirus update as global cases top 100,000",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9896669173070857,
                  "class_score" : 0.9896669173070857
                },
                {
                  "class_name" : "tg",
                  "class_probability" : 0.0033122788575614993,
                  "class_score" : 0.0033122788575614993
                }
              ],
              "predicted_value" : "en",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:21:26.981261Z"
        }
      }
    }
  ]
}

继续完善 Pipeline

可以看到,两个文档分别都正确识别了语种,并且创建了对应的 tag 字段,不过这个时候,文档里面的 ml 对象字段,就显得有点多余了,可以使用 remove processor 来删除这个字段。

PUT _ingest/pipeline/lang_detect_add_tag
{
  "description": "检测文本,添加语种标签",
  "processors": [
    {
      "inference": {
        "model_id": "lang_ident_model_1",
        "inference_config": {
          "classification": {
            "num_top_classes": 2
          }
        },
        "field_mappings": {
          "contents": "text"
        }
      }
    },
    {
      "set": {
        "field": "tag",
        "value": "{{ml.inference.predicted_value}}"
      }
    },
    {
      "remove": {
        "field": "ml"
      }
    }
  ]
}

索引文档并调用 Pipeline

那索引的时候,怎么使用这个 Pipeline 呢,看下面的例子:


POST news/_doc/1?pipeline=lang_detect_add_tag
{
  "contents":"""
  On Friday, he added: "In a globalised world, the only option is to stand together. All countries should really make sure that we stand together." Meanwhile, Italy—the country worst affected in Europe—reported 41 new COVID-19 deaths in just 24 hours. The country's civil protection agency said on Thursday evening that 3,858 people had been infected and 148 had died.
  """
}

GET news/_doc/1

上面的这个例子就不贴返回值了,大家自己试试。

另外一个例子

那回到最开始的场景,如果要根据检测结果来分别存储文本到不同的字段,怎么做呢,这里贴一下官网博客的例子:

POST _ingest/pipeline/_simulate
{
  "pipeline": {
    "processors": [
      {
        "inference": {
          "model_id": "lang_ident_model_1",
          "inference_config": {
            "classification": {
              "num_top_classes": 1
            }
          },
          "field_mappings": {
            "contents": "text"
          },
          "target_field": "_ml.lang_ident"
        }
      },
      {
        "rename": {
          "field": "contents",
          "target_field": "contents.default"
        }
      },
      {
        "rename": {
          "field": "_ml.lang_ident.predicted_value",
          "target_field": "contents.language"
        }
      },
      {
        "script": {
          "lang": "painless",
          "source": "ctx.contents.supported = (['de', 'en', 'ja', 'ko', 'zh'].contains(ctx.contents.language))"
        }
      },
      {
        "set": {
          "if": "ctx.contents.supported",
          "field": "contents.{{contents.language}}",
          "value": "{{contents.default}}",
          "override": false
        }
      }
    ]
  },
  "docs": [
    {
      "_source": {
        "contents": "Das leben ist kein Ponyhof"
      }
    },
    {
      "_source": {
        "contents": "The rain in Spain stays mainly in the plains"
      }
    },
    {
      "_source": {
        "contents": "オリンピック大会"
      }
    },
    {
      "_source": {
        "contents": "로마는 하루아침에 이루어진 것이 아니다"
      }
    },
    {
      "_source": {
        "contents": "授人以鱼不如授人以渔"
      }
    },
    {
      "_source": {
        "contents": "Qui court deux lievres a la fois, n’en prend aucun"
      }
    },
    {
      "_source": {
        "contents": "Lupus non timet canem latrantem"
      }
    },
    {
      "_source": {
        "contents": "This is mostly English but has a touch of Latin since we often just say, Carpe diem"
      }
    }
  ]
}

返回结果:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "de" : "Das leben ist kein Ponyhof",
            "default" : "Das leben ist kein Ponyhof",
            "language" : "de",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "de",
                  "class_probability" : 0.9996006023972855,
                  "class_score" : 0.9996006023972855
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211596Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "en" : "The rain in Spain stays mainly in the plains",
            "default" : "The rain in Spain stays mainly in the plains",
            "language" : "en",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9988809847231199,
                  "class_score" : 0.9988809847231199
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211611Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "オリンピック大会",
            "language" : "ja",
            "ja" : "オリンピック大会",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "ja",
                  "class_probability" : 0.9993823252841599,
                  "class_score" : 0.9993823252841599
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211618Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "로마는 하루아침에 이루어진 것이 아니다",
            "language" : "ko",
            "ko" : "로마는 하루아침에 이루어진 것이 아니다",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "ko",
                  "class_probability" : 0.9999939196272863,
                  "class_score" : 0.9999939196272863
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211624Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "授人以鱼不如授人以渔",
            "language" : "zh",
            "zh" : "授人以鱼不如授人以渔",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.9999810103320087,
                  "class_score" : 0.9999810103320087
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211629Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "Qui court deux lievres a la fois, n’en prend aucun",
            "language" : "fr",
            "supported" : false
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "fr",
                  "class_probability" : 0.9999669852240882,
                  "class_score" : 0.9999669852240882
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211635Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "Lupus non timet canem latrantem",
            "language" : "la",
            "supported" : false
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "la",
                  "class_probability" : 0.614050940088811,
                  "class_score" : 0.614050940088811
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.21164Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "en" : "This is mostly English but has a touch of Latin since we often just say, Carpe diem",
            "default" : "This is mostly English but has a touch of Latin since we often just say, Carpe diem",
            "language" : "en",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9997901768317939,
                  "class_score" : 0.9997901768317939
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211646Z"
        }
      }
    }
  ]
}

可以看到 Ingest Processor 非常灵活,且功能强大,所有的相关操作都可以在 Ingest processor 里面进行处理,再结合脚本做一下规则判断,对原始的字段重命名即可满足我们的文档处理需求。

小结

今天我们聊了聊 Language Identity 这个功能,也聊了聊 Ingest Pipeline 的使用,怎么样,这个功能是不是很赞呀,如果有类似使用场景的朋友,可以自己试试看。另外值得注意的是,如果文本长度太小可能会识别不准,CLD3 设计的文本长度要超过 200 个字符。

相关链接

继续阅读 »

Elasticsearch 新近发布的 7.6 版本里面包含了很多激动人心的功能,而最让我感兴趣的是利用机器学习来自动检测语言的功能。

功能初探

检测文本语言本身不是什么稀奇事,之前做爬虫的时候,就做过对网页正文进行语言的检测,有很多成熟的方案,而最好的就属 Google Chrome 团队开源的 CLD 系列(全名:Compact Language Detector)了,能够检测多达 80 种各种语言,我用过CLD2,是基于 C++ 贝叶斯分类器实现的,而 CLD3 则是基于神经网络实现的,无疑更加准确,这次 Elasticsearch 将这个非常小的功能也直接集成到了默认的发行包里面,对于使用者来说可以说是带来很大的方便。

多语言的痛点

相信很多朋友,在实际的业务场景中,对碰到过一个字段同时存在多个语种的文本内容的情况,尤其是出海的产品,比如类似大众点评的 APP 吧,一个餐馆下面,来自七洲五湖四海的朋友都来品尝过了,自然要留下点评语是不,德国的朋友使用的是德语,法国的朋友使用的是法语,广州的朋友用的是粤语,那对于开发这个 APP 的后台工程师可就犯难了,如果这些评论都存在一个字段里面,就不好设置一个统一的分词器,因为不同的语言他们的切分规则肯定是不一样的,最简单的例子,比如中文和英文,设置的分词不对,查询结果就会不精准。

相信也有很多人用过这样的解决方案,既然一个字段搞不定,那就把这个字段复制几份,英文字段存一份,中文字段存一份,用 MultiField 来做,这样虽然可以解决一部分问题,但是同样会带来容量和资源的浪费,和查询时候具体该选择哪个字段来参与查询的挑战。

而利用 7.6 的这个新功能,可以在创建索引的时候,可以自动的根据内容进行推理,从而影响索引文档的构成,进而做到特定的文本进特定的字段,从而提升查询体验和性能,关于这个功能,Elastic 官网这里也有一篇博客2,提供了详细的例子。

实战上手

看上去不错,但是鲁迅说过,网上得来终觉浅,觉知此事要躬行,来, 今天一起跑一遍看看具体怎么个用法。

功能剖析

首先,这个功能叫 Language identification,是机器学习的一个 Feature,但是不能单独使用,要结合 Ingest Node 的一个 inference ingest processor 来使用,Ingest processor 是在 Elasticsearch 里面运行的数据预处理器,部分功能类似于 Logstash 的数据解析,对于简单数据操作场景,完全可以替代掉 Logstash,简化部署架构。

Elasticsearch 在 7.6 的包里面,默认打包了提前训练好的机器学习模型,就是 Language identification 需要调用的语言检测模型,名称是固定的 lang_ident_model_1,这也是 Elasticsearch 自带的第一个模型包,大家了解一下就好。

那这个模型包在什么位置呢,我们来解刨一下:

$unzip /usr/share/elasticsearch/modules/x-pack-ml/x-pack-ml-7.6.0.jar 
$/org/elasticsearch/xpack/ml/inference$ tree
.
|-- ingest
|   |-- InferenceProcessor$Factory.class
|   `-- InferenceProcessor.class
|-- loadingservice
|   |-- LocalModel$1.class
|   |-- LocalModel.class
|   |-- Model.class
|   `-- ModelLoadingService.class
`-- persistence
    |-- InferenceInternalIndex.class
    |-- TrainedModelDefinitionDoc$1.class
    |-- TrainedModelDefinitionDoc$Builder.class
    |-- TrainedModelDefinitionDoc.class
    |-- TrainedModelProvider.class
    `-- lang_ident_model_1.json

3 directories, 12 files

可以看到,在 persistence 目录就有这个模型包,是 json 格式的,里面有个压缩的二进制编码后的字段。

Jietu20200306-234807.png

查看模型信息

我们还可以通过新的 API 来获取这个模型信息,以后模型多了之后会比较有用:

GET _ml/inference/lang_ident_model_1
{
  "count" : 1,
  "trained_model_configs" : [
    {
      "model_id" : "lang_ident_model_1",
      "created_by" : "_xpack",
      "version" : "7.6.0",
      "description" : "Model used for identifying language from arbitrary input text.",
      "create_time" : 1575548914594,
      "tags" : [
        "lang_ident",
        "prepackaged"
      ],
      "input" : {
        "field_names" : [
          "text"
        ]
      },
      "estimated_heap_memory_usage_bytes" : 1053992,
      "estimated_operations" : 39629,
      "license_level" : "basic"
    }
  ]
}

Ingest Pipeline 模拟测试

好了,基本的了解就到这里了,我们开始动手吧,既然要和 Ingest 结合使用,自然免不了要定义 Ingest Pipeline,也就是说定一个解析规则,索引的时候会调用这个规则来处理输入的索引文档。Ingest Pipeline 的调试是个问题,好在Ingest 提供了模拟调用的方法,我们测试一下:


POST _ingest/pipeline/_simulate
{
   "pipeline":{
      "processors":[
         {
            "inference":{
               "model_id":"lang_ident_model_1", 
               "inference_config":{
                  "classification":{
                     "num_top_classes":5 
                  }
               },
               "field_mappings":{

               }
            }
         }
      ]
   },
   "docs":[
      {
         "_source":{ 
            "text":"新冠病毒让你在家好好带着,你服不服"
         }
      }
   ]
}

上面是借助 Ingest 的推理 Process 来模拟调用这个机器学习模型进行文本判断的方法,第一部分是设置 processor 的定义,设置了一个 inference processor,也就是要进行语言模型的检测,第二部分 docs 则是输入了一个 json 文档,作为测试的输入源,运行结果如下:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "text" : "新冠病毒让你在家好好带着,你服不服",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.9999872511022145,
                  "class_score" : 0.9999872511022145
                },
                {
                  "class_name" : "ja",
                  "class_probability" : 1.061491174235718E-5,
                  "class_score" : 1.061491174235718E-5
                },
                {
                  "class_name" : "hy",
                  "class_probability" : 6.304673023324264E-7,
                  "class_score" : 6.304673023324264E-7
                },
                {
                  "class_name" : "ta",
                  "class_probability" : 4.1374037676410867E-7,
                  "class_score" : 4.1374037676410867E-7
                },
                {
                  "class_name" : "te",
                  "class_probability" : 2.0709260170937159E-7,
                  "class_score" : 2.0709260170937159E-7
                }
              ],
              "predicted_value" : "zh",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T15:58:44.783736Z"
        }
      }
    }
  ]
}

可以看到,第一条返回结果,zh 表示中文语言类型,可能性为 0.9999872511022145,基本上无限接近肯定了,这个是中文文本,而第二位和剩下的就明显得分比较低了,如果你看到是他们的得分开头是 1.x 和 6.x 等,是不是觉得,不对啊,后面的得分怎么反而大一些,哈哈,你仔细看会发现它后面其实还有 -E 啥的尾巴呢,这个是科学计数法,其实数值远远小于 0。

创建一个 Pipeline

简单模拟倒是证明这个功能 work 了,那具体怎么使用,一起看看吧。

首先创建一个 Pipeline:


PUT _ingest/pipeline/lang_detect_add_tag
{
  "description": "检测文本,添加语种标签",
  "processors": [
    {
      "inference": {
        "model_id": "lang_ident_model_1",
        "inference_config": {
          "classification": {
            "num_top_classes": 2
          }
        },
        "field_mappings": {
            "contents": "text"
        }
      }
    },
    {
      "set": {
        "field": "tag",
        "value": "{{ml.inference.predicted_value}}"
      }
    }
  ]
}

可以看到,我们定义了一个 ID 为 lang_detect_add_tag 的 Ingest Pipeline,并且我们设置了这个推理模型的参数,只返回 2 个分类结果,和设置了 content 字段作为检测对象。同时,我们还定义了一个新的 set processor,这个的意思是设置一个名为 tag 的字段,它的值是来自于一个其它的字段的变量引用,也就是把检测到的文本对应的语种存成一个标签字段。

测试这个 Pipeline

这个 Pipeline 创建完之后,我们同样可以对这个 Pipeline 进行模拟测试,模拟的好处是不会实际创建索引,方便调试。

POST /_ingest/pipeline/lang_detect_add_tag/_simulate
{
  "docs": [
    {
      "_index": "index",
      "_id": "id",
      "_source": {
        "contents": "巴林境内新型冠状病毒肺炎确诊病例累计达56例"
      }
    },
    {
      "_index": "index",
      "_id": "id",
      "_source": {
        "contents": "Watch live: WHO gives a coronavirus update as global cases top 100,000"
      }
    }
  ]
}

返回结果:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "index",
        "_type" : "_doc",
        "_id" : "id",
        "_source" : {
          "tag" : "zh",
          "contents" : "巴林境内新型冠状病毒肺炎确诊病例累计达56例",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.999812378112116,
                  "class_score" : 0.999812378112116
                },
                {
                  "class_name" : "ja",
                  "class_probability" : 1.8175264877915687E-4,
                  "class_score" : 1.8175264877915687E-4
                }
              ],
              "predicted_value" : "zh",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:21:26.981249Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "index",
        "_type" : "_doc",
        "_id" : "id",
        "_source" : {
          "tag" : "en",
          "contents" : "Watch live: WHO gives a coronavirus update as global cases top 100,000",
          "ml" : {
            "inference" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9896669173070857,
                  "class_score" : 0.9896669173070857
                },
                {
                  "class_name" : "tg",
                  "class_probability" : 0.0033122788575614993,
                  "class_score" : 0.0033122788575614993
                }
              ],
              "predicted_value" : "en",
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:21:26.981261Z"
        }
      }
    }
  ]
}

继续完善 Pipeline

可以看到,两个文档分别都正确识别了语种,并且创建了对应的 tag 字段,不过这个时候,文档里面的 ml 对象字段,就显得有点多余了,可以使用 remove processor 来删除这个字段。

PUT _ingest/pipeline/lang_detect_add_tag
{
  "description": "检测文本,添加语种标签",
  "processors": [
    {
      "inference": {
        "model_id": "lang_ident_model_1",
        "inference_config": {
          "classification": {
            "num_top_classes": 2
          }
        },
        "field_mappings": {
          "contents": "text"
        }
      }
    },
    {
      "set": {
        "field": "tag",
        "value": "{{ml.inference.predicted_value}}"
      }
    },
    {
      "remove": {
        "field": "ml"
      }
    }
  ]
}

索引文档并调用 Pipeline

那索引的时候,怎么使用这个 Pipeline 呢,看下面的例子:


POST news/_doc/1?pipeline=lang_detect_add_tag
{
  "contents":"""
  On Friday, he added: "In a globalised world, the only option is to stand together. All countries should really make sure that we stand together." Meanwhile, Italy—the country worst affected in Europe—reported 41 new COVID-19 deaths in just 24 hours. The country's civil protection agency said on Thursday evening that 3,858 people had been infected and 148 had died.
  """
}

GET news/_doc/1

上面的这个例子就不贴返回值了,大家自己试试。

另外一个例子

那回到最开始的场景,如果要根据检测结果来分别存储文本到不同的字段,怎么做呢,这里贴一下官网博客的例子:

POST _ingest/pipeline/_simulate
{
  "pipeline": {
    "processors": [
      {
        "inference": {
          "model_id": "lang_ident_model_1",
          "inference_config": {
            "classification": {
              "num_top_classes": 1
            }
          },
          "field_mappings": {
            "contents": "text"
          },
          "target_field": "_ml.lang_ident"
        }
      },
      {
        "rename": {
          "field": "contents",
          "target_field": "contents.default"
        }
      },
      {
        "rename": {
          "field": "_ml.lang_ident.predicted_value",
          "target_field": "contents.language"
        }
      },
      {
        "script": {
          "lang": "painless",
          "source": "ctx.contents.supported = (['de', 'en', 'ja', 'ko', 'zh'].contains(ctx.contents.language))"
        }
      },
      {
        "set": {
          "if": "ctx.contents.supported",
          "field": "contents.{{contents.language}}",
          "value": "{{contents.default}}",
          "override": false
        }
      }
    ]
  },
  "docs": [
    {
      "_source": {
        "contents": "Das leben ist kein Ponyhof"
      }
    },
    {
      "_source": {
        "contents": "The rain in Spain stays mainly in the plains"
      }
    },
    {
      "_source": {
        "contents": "オリンピック大会"
      }
    },
    {
      "_source": {
        "contents": "로마는 하루아침에 이루어진 것이 아니다"
      }
    },
    {
      "_source": {
        "contents": "授人以鱼不如授人以渔"
      }
    },
    {
      "_source": {
        "contents": "Qui court deux lievres a la fois, n’en prend aucun"
      }
    },
    {
      "_source": {
        "contents": "Lupus non timet canem latrantem"
      }
    },
    {
      "_source": {
        "contents": "This is mostly English but has a touch of Latin since we often just say, Carpe diem"
      }
    }
  ]
}

返回结果:

{
  "docs" : [
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "de" : "Das leben ist kein Ponyhof",
            "default" : "Das leben ist kein Ponyhof",
            "language" : "de",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "de",
                  "class_probability" : 0.9996006023972855,
                  "class_score" : 0.9996006023972855
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211596Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "en" : "The rain in Spain stays mainly in the plains",
            "default" : "The rain in Spain stays mainly in the plains",
            "language" : "en",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9988809847231199,
                  "class_score" : 0.9988809847231199
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211611Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "オリンピック大会",
            "language" : "ja",
            "ja" : "オリンピック大会",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "ja",
                  "class_probability" : 0.9993823252841599,
                  "class_score" : 0.9993823252841599
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211618Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "로마는 하루아침에 이루어진 것이 아니다",
            "language" : "ko",
            "ko" : "로마는 하루아침에 이루어진 것이 아니다",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "ko",
                  "class_probability" : 0.9999939196272863,
                  "class_score" : 0.9999939196272863
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211624Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "授人以鱼不如授人以渔",
            "language" : "zh",
            "zh" : "授人以鱼不如授人以渔",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "zh",
                  "class_probability" : 0.9999810103320087,
                  "class_score" : 0.9999810103320087
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211629Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "Qui court deux lievres a la fois, n’en prend aucun",
            "language" : "fr",
            "supported" : false
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "fr",
                  "class_probability" : 0.9999669852240882,
                  "class_score" : 0.9999669852240882
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211635Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "default" : "Lupus non timet canem latrantem",
            "language" : "la",
            "supported" : false
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "la",
                  "class_probability" : 0.614050940088811,
                  "class_score" : 0.614050940088811
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.21164Z"
        }
      }
    },
    {
      "doc" : {
        "_index" : "_index",
        "_type" : "_doc",
        "_id" : "_id",
        "_source" : {
          "contents" : {
            "en" : "This is mostly English but has a touch of Latin since we often just say, Carpe diem",
            "default" : "This is mostly English but has a touch of Latin since we often just say, Carpe diem",
            "language" : "en",
            "supported" : true
          },
          "_ml" : {
            "lang_ident" : {
              "top_classes" : [
                {
                  "class_name" : "en",
                  "class_probability" : 0.9997901768317939,
                  "class_score" : 0.9997901768317939
                }
              ],
              "model_id" : "lang_ident_model_1"
            }
          }
        },
        "_ingest" : {
          "timestamp" : "2020-03-06T16:31:36.211646Z"
        }
      }
    }
  ]
}

可以看到 Ingest Processor 非常灵活,且功能强大,所有的相关操作都可以在 Ingest processor 里面进行处理,再结合脚本做一下规则判断,对原始的字段重命名即可满足我们的文档处理需求。

小结

今天我们聊了聊 Language Identity 这个功能,也聊了聊 Ingest Pipeline 的使用,怎么样,这个功能是不是很赞呀,如果有类似使用场景的朋友,可以自己试试看。另外值得注意的是,如果文本长度太小可能会识别不准,CLD3 设计的文本长度要超过 200 个字符。

相关链接

收起阅读 »

使用Elasticsearch实现同段和同句搜索

同句搜索要求搜索多个关键词时,返回的文章不只要包含关键词,而且这些关键词必须在同一句中。
同段搜素类似,只是范围为同一段落。

SpanQuery

同段、同句搜索,使用常用的term、match查询,没有找到办法可以实现。
Elasticsearch提供了SpanQuery,官方文档中如下的介绍:

Span queries are low-level positional queries which provide expert control over the order and proximity of the specified terms. These are typically used to implement very specific queries on legal documents or patents.

上面提到,SpanQuery常常应用在法律或专利的特定搜索。这些领域,常常提供同段 /同句搜索 。
下面我们看一下三种类型的SpanQuery,能否实现我们的需求:

准备数据

PUT article

POST article/_mapping
{
  "properties": {
    "maincontent": {
      "type": "text"
    }
  }
}

POST article/_doc/1
{
   "maincontent":"the quick red fox jumps over the sleepy cat"
}

POST article/_doc/2
{
   "maincontent":"the quick brown fox jumps over the lazy dog"
}

SpanTermQuery

SpanTermQuery 和 Term Query类似, 下面的查询会返回_id为1的doc。 the quick red fox jumps over the sleepy cat

POST article/_search
{
  "profile": "true",
  "query": {
    "span_term": {
      "maincontent": {
        "value": "red"
      }
    }
  }
}

SpanNearQuery

SpanNearQuery 表示邻近搜索,查找多个term是否邻近,slop可以设置邻近距离,如果设置为0,那么代表两个term是挨着的,相当于matchphase in_order参数,代表文档中的term和查询设置的term保持相同的顺序。

POST article/_search
{
  "query": {
    "span_near": {
      "clauses": [
        {
          "span_term": {
            "maincontent": {
              "value": "quick"
            }
          }
        },
        {
          "span_term": {
            "maincontent": {
              "value": "brown"
            }
          }
        }
      ],
      "slop": 0,
      "in_order": true
    }
  }
}

上面的查询会返回_id为2的doc。

the quick brown fox jumps over the lazy dog

SpanNotQuery

SpanNotQuery非常重要,它要求两个SpanQuery的跨度,不能够重合。
看下面的例子:

  • include: 匹配的SpanQuery,例子为需要一个包含quick和fox两个词的邻近搜索。
  • exclude:设置一个SpanQuery,要求include中的SpanQuery不能包含这个SpanQuery
    POST article/_search
    {
    "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "maincontent": {
                  "value": "quick"
                }
              }
            },
            {
              "span_term": {
                "maincontent": {
                  "value": "fox"
                }
              }
            }
          ],
          "slop": 1,
          "in_order": true
        }
      },
      "exclude": {
        "span_term": {
          "maincontent": {
            "value": "red"
          }
        }
      }
    }
    }
    }

    上面的查询会返回_id为2的doc。
    因为_id为1的文档,虽然quick red fox符合include中的SpanQuery,但是red也符合exclude中的SpanQuery。因此,这篇文章需要排除掉。 the quick red fox jumps over the sleepy cat

同句/同段搜索原理

同句搜索,反向来说,就是搜索词不能够跨句。再进一步,就是搜索词之间不能够有等其他标点符号。
其对应的查询类似如下:

POST article/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "maincontent": {
                  "value": "word1"
                }
              }
            },
            {
              "span_term": {
                "maincontent": {
                  "value": "word2"
                }
              }
            }
          ],
          "slop": 1,
          "in_order": true
        }
      },
      "exclude": {
        "span_term": {
          "maincontent": {
            "value": "。/?/!"
          }
        }
      }
    }
  }
}

同段搜素类似,对应分隔符变为\n,或者<p>,</p>

同段/同句搜索实现

文本为HTML格式

创建索引

PUT sample1
{
  "settings": {
    "number_of_replicas": 0,
    "number_of_shards": 1,
    "analysis": {
      "analyzer": {
        "maincontent_analyzer": {
          "type": "custom",
          "char_filter": [
            "sentence_paragrah_mapping",
            "html_strip"
          ],
          "tokenizer": "ik_max_word"
        }
      },
      "char_filter": {
        "sentence_paragrah_mapping": {
          "type": "mapping",
          "mappings": [
            """<h1> => \u0020paragraph\u0020""",
            """</h1> => \u0020sentence\u0020paragraph\u0020 """,
            """<h2> => \u0020paragraph\u0020""",
            """</h2> => \u0020sentence\u0020paragraph\u0020 """,
            """<p> => \u0020paragraph\u0020""",
            """</p> => \u0020sentence\u0020paragraph\u0020 """,
            """! => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """。 => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """! => \u0020sentence\u0020"""
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "mainContent": {
        "type": "text",
        "analyzer": "maincontent_analyzer",
        "search_analyzer": "ik_smart"
      }
    }
  }
}

我们创建了一个名称为sentence_paragrah_mapping的char filter,它的目的有两个:

  • 替换p,h1,h2标签为统一的分段符:paragraph
  • 替换中英文 ,, 标点符号为统一的分页符:sentence

有几个细节,需要说明:

  • paragraphsentence前后都需要添加空格,并且需要使用Unicode \u0020表示空格。

    # 期望
    hello world! => hello world sentence
    # 不合理的配置,可能会出现下面的情况
    hello world! => hello worldsentence
  • </p>,</h1>,</h2>的结尾标签需要添加paragraphsentence两个分隔符,避免结尾没有标点符号的情况
# 期望
<h1>hello world</h1> <p>hello china</p> => paragraph hello world sentence paragraph hello china sentence

# </p>,</h1>,</h2>只使用paragraph替换的结果
# 此时 hello world hello china 为同句
<h1>hello world</h1> <p>hello china</p> => paragraph hello world  paragraph hello china sentence

# 上面配置结果有些冗余:有两个连续的paragraph 
# 如果能保证HTML文本都符合标准,可以只替换</p>,</h1>,</h2>,不替换<p>,<h1>,<h2>
<h1>hello world</h1> <p>hello china</p> => paragraph hello world sentence paragraph paragraph hello china sentence
  • 注意sentence_paragrah_mapping和html_strip的配置顺序

插入测试数据

POST sample1/_doc/1
{
  "mainContent":"<p>java python javascript</p><p>oracle mysql sqlserver</p>"
} 

# 测试分词
POST sample1/_analyze
{
  "text": ["<p>java python javascript</p><p>oracle mysql sqlserver</p>"],
  "analyzer": "maincontent_analyzer"
}

# 返回结果
{
  "tokens" : [
    {
      "token" : "paragraph",
      "start_offset" : 1,
      "end_offset" : 2,
      "type" : "ENGLISH",
      "position" : 0
    },
    {
      "token" : "java",
      "start_offset" : 3,
      "end_offset" : 7,
      "type" : "ENGLISH",
      "position" : 1
    },
    {
      "token" : "python",
      "start_offset" : 8,
      "end_offset" : 14,
      "type" : "ENGLISH",
      "position" : 2
    },
    {
      "token" : "javascript",
      "start_offset" : 15,
      "end_offset" : 25,
      "type" : "ENGLISH",
      "position" : 3
    },
    {
      "token" : "sentence",
      "start_offset" : 26,
      "end_offset" : 28,
      "type" : "ENGLISH",
      "position" : 4
    },
    {
      "token" : "paragraph",
      "start_offset" : 28,
      "end_offset" : 28,
      "type" : "ENGLISH",
      "position" : 5
    },
    {
      "token" : "paragraph",
      "start_offset" : 30,
      "end_offset" : 31,
      "type" : "ENGLISH",
      "position" : 6
    },
    {
      "token" : "oracle",
      "start_offset" : 32,
      "end_offset" : 38,
      "type" : "ENGLISH",
      "position" : 7
    },
    {
      "token" : "mysql",
      "start_offset" : 39,
      "end_offset" : 44,
      "type" : "ENGLISH",
      "position" : 8
    },
    {
      "token" : "sqlserver",
      "start_offset" : 45,
      "end_offset" : 54,
      "type" : "ENGLISH",
      "position" : 9
    },
    {
      "token" : "sentence",
      "start_offset" : 55,
      "end_offset" : 57,
      "type" : "ENGLISH",
      "position" : 10
    },
    {
      "token" : "paragraph",
      "start_offset" : 57,
      "end_offset" : 57,
      "type" : "ENGLISH",
      "position" : 11
    }
  ]
}

测试查询

  • 同段查询:java python
GET sample1/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "mainContent": {
                  "value": "java"
                }
              }
            },
            {
              "span_term": {
                "mainContent": {
                  "value": "python"
                }
              }
            }
          ],
          "slop": 12,
          "in_order": false
        }
      },
      "exclude": {
        "span_term": {
          "mainContent": {
            "value": "paragraph"
          }
        }
      }
    }
  }
}

//结果
{
  "took" : 0,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 1,
      "relation" : "eq"
    },
    "max_score" : 0.1655603,
    "hits" : [
      {
        "_index" : "sample1",
        "_type" : "_doc",
        "_id" : "1",
        "_score" : 0.1655603,
        "_source" : {
          "mainContent" : "<p>java python javascript</p><p>oracle mysql sqlserver</p>"
        }
      }
    ]
  }
}
  • 同段查询:java oracle
GET sample1/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "mainContent": {
                  "value": "java"
                }
              }
            },
            {
              "span_term": {
                "mainContent": {
                  "value": "oracle"
                }
              }
            }
          ],
          "slop": 12,
          "in_order": false
        }
      },
      "exclude": {
        "span_term": {
          "mainContent": {
            "value": "paragraph"
          }
        }
      }
    }
  }
}

#结果:没有文档返回
{
  "took" : 0,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 0,
      "relation" : "eq"
    },
    "max_score" : null,
    "hits" : [ ]
  }
}

纯文本格式

纯文本和HTML的区别是段落分割符不同,使用\n.

创建索引

PUT sample2
{
  "settings": {
    "number_of_replicas": 0,
    "number_of_shards": 1,
    "analysis": {
      "analyzer": {
        "maincontent_analyzer": {
          "type": "custom",
          "char_filter": [
            "sentence_paragrah_mapping"
          ],
          "tokenizer": "ik_max_word"
        }
      },
      "char_filter": {
        "sentence_paragrah_mapping": {
          "type": "mapping",
          "mappings": [
            """\n => \u0020sentence\u0020paragraph\u0020 """,
            """! => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """。 => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """! => \u0020sentence\u0020"""
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "mainContent": {
        "type": "text",
        "analyzer": "maincontent_analyzer",
        "search_analyzer": "ik_smart"
      }
    }
  }
}

测试分词

POST sample2/_analyze
{
  "text": ["java python javascript\noracle mysql sqlserver"],
  "analyzer": "maincontent_analyzer"
}

# 结果
{
  "tokens" : [
    {
      "token" : "java",
      "start_offset" : 0,
      "end_offset" : 4,
      "type" : "ENGLISH",
      "position" : 0
    },
    {
      "token" : "python",
      "start_offset" : 5,
      "end_offset" : 11,
      "type" : "ENGLISH",
      "position" : 1
    },
    {
      "token" : "javascript",
      "start_offset" : 12,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 2
    },
    {
      "token" : "sentence",
      "start_offset" : 22,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 3
    },
    {
      "token" : "paragraph",
      "start_offset" : 22,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 4
    },
    {
      "token" : "oracle",
      "start_offset" : 23,
      "end_offset" : 29,
      "type" : "ENGLISH",
      "position" : 5
    },
    {
      "token" : "mysql",
      "start_offset" : 30,
      "end_offset" : 35,
      "type" : "ENGLISH",
      "position" : 6
    },
    {
      "token" : "sqlserver",
      "start_offset" : 36,
      "end_offset" : 45,
      "type" : "ENGLISH",
      "position" : 7
    }
  ]
}
继续阅读 »

同句搜索要求搜索多个关键词时,返回的文章不只要包含关键词,而且这些关键词必须在同一句中。
同段搜素类似,只是范围为同一段落。

SpanQuery

同段、同句搜索,使用常用的term、match查询,没有找到办法可以实现。
Elasticsearch提供了SpanQuery,官方文档中如下的介绍:

Span queries are low-level positional queries which provide expert control over the order and proximity of the specified terms. These are typically used to implement very specific queries on legal documents or patents.

上面提到,SpanQuery常常应用在法律或专利的特定搜索。这些领域,常常提供同段 /同句搜索 。
下面我们看一下三种类型的SpanQuery,能否实现我们的需求:

准备数据

PUT article

POST article/_mapping
{
  "properties": {
    "maincontent": {
      "type": "text"
    }
  }
}

POST article/_doc/1
{
   "maincontent":"the quick red fox jumps over the sleepy cat"
}

POST article/_doc/2
{
   "maincontent":"the quick brown fox jumps over the lazy dog"
}

SpanTermQuery

SpanTermQuery 和 Term Query类似, 下面的查询会返回_id为1的doc。 the quick red fox jumps over the sleepy cat

POST article/_search
{
  "profile": "true",
  "query": {
    "span_term": {
      "maincontent": {
        "value": "red"
      }
    }
  }
}

SpanNearQuery

SpanNearQuery 表示邻近搜索,查找多个term是否邻近,slop可以设置邻近距离,如果设置为0,那么代表两个term是挨着的,相当于matchphase in_order参数,代表文档中的term和查询设置的term保持相同的顺序。

POST article/_search
{
  "query": {
    "span_near": {
      "clauses": [
        {
          "span_term": {
            "maincontent": {
              "value": "quick"
            }
          }
        },
        {
          "span_term": {
            "maincontent": {
              "value": "brown"
            }
          }
        }
      ],
      "slop": 0,
      "in_order": true
    }
  }
}

上面的查询会返回_id为2的doc。

the quick brown fox jumps over the lazy dog

SpanNotQuery

SpanNotQuery非常重要,它要求两个SpanQuery的跨度,不能够重合。
看下面的例子:

  • include: 匹配的SpanQuery,例子为需要一个包含quick和fox两个词的邻近搜索。
  • exclude:设置一个SpanQuery,要求include中的SpanQuery不能包含这个SpanQuery
    POST article/_search
    {
    "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "maincontent": {
                  "value": "quick"
                }
              }
            },
            {
              "span_term": {
                "maincontent": {
                  "value": "fox"
                }
              }
            }
          ],
          "slop": 1,
          "in_order": true
        }
      },
      "exclude": {
        "span_term": {
          "maincontent": {
            "value": "red"
          }
        }
      }
    }
    }
    }

    上面的查询会返回_id为2的doc。
    因为_id为1的文档,虽然quick red fox符合include中的SpanQuery,但是red也符合exclude中的SpanQuery。因此,这篇文章需要排除掉。 the quick red fox jumps over the sleepy cat

同句/同段搜索原理

同句搜索,反向来说,就是搜索词不能够跨句。再进一步,就是搜索词之间不能够有等其他标点符号。
其对应的查询类似如下:

POST article/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "maincontent": {
                  "value": "word1"
                }
              }
            },
            {
              "span_term": {
                "maincontent": {
                  "value": "word2"
                }
              }
            }
          ],
          "slop": 1,
          "in_order": true
        }
      },
      "exclude": {
        "span_term": {
          "maincontent": {
            "value": "。/?/!"
          }
        }
      }
    }
  }
}

同段搜素类似,对应分隔符变为\n,或者<p>,</p>

同段/同句搜索实现

文本为HTML格式

创建索引

PUT sample1
{
  "settings": {
    "number_of_replicas": 0,
    "number_of_shards": 1,
    "analysis": {
      "analyzer": {
        "maincontent_analyzer": {
          "type": "custom",
          "char_filter": [
            "sentence_paragrah_mapping",
            "html_strip"
          ],
          "tokenizer": "ik_max_word"
        }
      },
      "char_filter": {
        "sentence_paragrah_mapping": {
          "type": "mapping",
          "mappings": [
            """<h1> => \u0020paragraph\u0020""",
            """</h1> => \u0020sentence\u0020paragraph\u0020 """,
            """<h2> => \u0020paragraph\u0020""",
            """</h2> => \u0020sentence\u0020paragraph\u0020 """,
            """<p> => \u0020paragraph\u0020""",
            """</p> => \u0020sentence\u0020paragraph\u0020 """,
            """! => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """。 => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """! => \u0020sentence\u0020"""
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "mainContent": {
        "type": "text",
        "analyzer": "maincontent_analyzer",
        "search_analyzer": "ik_smart"
      }
    }
  }
}

我们创建了一个名称为sentence_paragrah_mapping的char filter,它的目的有两个:

  • 替换p,h1,h2标签为统一的分段符:paragraph
  • 替换中英文 ,, 标点符号为统一的分页符:sentence

有几个细节,需要说明:

  • paragraphsentence前后都需要添加空格,并且需要使用Unicode \u0020表示空格。

    # 期望
    hello world! => hello world sentence
    # 不合理的配置,可能会出现下面的情况
    hello world! => hello worldsentence
  • </p>,</h1>,</h2>的结尾标签需要添加paragraphsentence两个分隔符,避免结尾没有标点符号的情况
# 期望
<h1>hello world</h1> <p>hello china</p> => paragraph hello world sentence paragraph hello china sentence

# </p>,</h1>,</h2>只使用paragraph替换的结果
# 此时 hello world hello china 为同句
<h1>hello world</h1> <p>hello china</p> => paragraph hello world  paragraph hello china sentence

# 上面配置结果有些冗余:有两个连续的paragraph 
# 如果能保证HTML文本都符合标准,可以只替换</p>,</h1>,</h2>,不替换<p>,<h1>,<h2>
<h1>hello world</h1> <p>hello china</p> => paragraph hello world sentence paragraph paragraph hello china sentence
  • 注意sentence_paragrah_mapping和html_strip的配置顺序

插入测试数据

POST sample1/_doc/1
{
  "mainContent":"<p>java python javascript</p><p>oracle mysql sqlserver</p>"
} 

# 测试分词
POST sample1/_analyze
{
  "text": ["<p>java python javascript</p><p>oracle mysql sqlserver</p>"],
  "analyzer": "maincontent_analyzer"
}

# 返回结果
{
  "tokens" : [
    {
      "token" : "paragraph",
      "start_offset" : 1,
      "end_offset" : 2,
      "type" : "ENGLISH",
      "position" : 0
    },
    {
      "token" : "java",
      "start_offset" : 3,
      "end_offset" : 7,
      "type" : "ENGLISH",
      "position" : 1
    },
    {
      "token" : "python",
      "start_offset" : 8,
      "end_offset" : 14,
      "type" : "ENGLISH",
      "position" : 2
    },
    {
      "token" : "javascript",
      "start_offset" : 15,
      "end_offset" : 25,
      "type" : "ENGLISH",
      "position" : 3
    },
    {
      "token" : "sentence",
      "start_offset" : 26,
      "end_offset" : 28,
      "type" : "ENGLISH",
      "position" : 4
    },
    {
      "token" : "paragraph",
      "start_offset" : 28,
      "end_offset" : 28,
      "type" : "ENGLISH",
      "position" : 5
    },
    {
      "token" : "paragraph",
      "start_offset" : 30,
      "end_offset" : 31,
      "type" : "ENGLISH",
      "position" : 6
    },
    {
      "token" : "oracle",
      "start_offset" : 32,
      "end_offset" : 38,
      "type" : "ENGLISH",
      "position" : 7
    },
    {
      "token" : "mysql",
      "start_offset" : 39,
      "end_offset" : 44,
      "type" : "ENGLISH",
      "position" : 8
    },
    {
      "token" : "sqlserver",
      "start_offset" : 45,
      "end_offset" : 54,
      "type" : "ENGLISH",
      "position" : 9
    },
    {
      "token" : "sentence",
      "start_offset" : 55,
      "end_offset" : 57,
      "type" : "ENGLISH",
      "position" : 10
    },
    {
      "token" : "paragraph",
      "start_offset" : 57,
      "end_offset" : 57,
      "type" : "ENGLISH",
      "position" : 11
    }
  ]
}

测试查询

  • 同段查询:java python
GET sample1/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "mainContent": {
                  "value": "java"
                }
              }
            },
            {
              "span_term": {
                "mainContent": {
                  "value": "python"
                }
              }
            }
          ],
          "slop": 12,
          "in_order": false
        }
      },
      "exclude": {
        "span_term": {
          "mainContent": {
            "value": "paragraph"
          }
        }
      }
    }
  }
}

//结果
{
  "took" : 0,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 1,
      "relation" : "eq"
    },
    "max_score" : 0.1655603,
    "hits" : [
      {
        "_index" : "sample1",
        "_type" : "_doc",
        "_id" : "1",
        "_score" : 0.1655603,
        "_source" : {
          "mainContent" : "<p>java python javascript</p><p>oracle mysql sqlserver</p>"
        }
      }
    ]
  }
}
  • 同段查询:java oracle
GET sample1/_search
{
  "query": {
    "span_not": {
      "include": {
        "span_near": {
          "clauses": [
            {
              "span_term": {
                "mainContent": {
                  "value": "java"
                }
              }
            },
            {
              "span_term": {
                "mainContent": {
                  "value": "oracle"
                }
              }
            }
          ],
          "slop": 12,
          "in_order": false
        }
      },
      "exclude": {
        "span_term": {
          "mainContent": {
            "value": "paragraph"
          }
        }
      }
    }
  }
}

#结果:没有文档返回
{
  "took" : 0,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 0,
      "relation" : "eq"
    },
    "max_score" : null,
    "hits" : [ ]
  }
}

纯文本格式

纯文本和HTML的区别是段落分割符不同,使用\n.

创建索引

PUT sample2
{
  "settings": {
    "number_of_replicas": 0,
    "number_of_shards": 1,
    "analysis": {
      "analyzer": {
        "maincontent_analyzer": {
          "type": "custom",
          "char_filter": [
            "sentence_paragrah_mapping"
          ],
          "tokenizer": "ik_max_word"
        }
      },
      "char_filter": {
        "sentence_paragrah_mapping": {
          "type": "mapping",
          "mappings": [
            """\n => \u0020sentence\u0020paragraph\u0020 """,
            """! => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """。 => \u0020sentence\u0020 """,
            """? => \u0020sentence\u0020 """,
            """! => \u0020sentence\u0020"""
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "mainContent": {
        "type": "text",
        "analyzer": "maincontent_analyzer",
        "search_analyzer": "ik_smart"
      }
    }
  }
}

测试分词

POST sample2/_analyze
{
  "text": ["java python javascript\noracle mysql sqlserver"],
  "analyzer": "maincontent_analyzer"
}

# 结果
{
  "tokens" : [
    {
      "token" : "java",
      "start_offset" : 0,
      "end_offset" : 4,
      "type" : "ENGLISH",
      "position" : 0
    },
    {
      "token" : "python",
      "start_offset" : 5,
      "end_offset" : 11,
      "type" : "ENGLISH",
      "position" : 1
    },
    {
      "token" : "javascript",
      "start_offset" : 12,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 2
    },
    {
      "token" : "sentence",
      "start_offset" : 22,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 3
    },
    {
      "token" : "paragraph",
      "start_offset" : 22,
      "end_offset" : 22,
      "type" : "ENGLISH",
      "position" : 4
    },
    {
      "token" : "oracle",
      "start_offset" : 23,
      "end_offset" : 29,
      "type" : "ENGLISH",
      "position" : 5
    },
    {
      "token" : "mysql",
      "start_offset" : 30,
      "end_offset" : 35,
      "type" : "ENGLISH",
      "position" : 6
    },
    {
      "token" : "sqlserver",
      "start_offset" : 36,
      "end_offset" : 45,
      "type" : "ENGLISH",
      "position" : 7
    }
  ]
}
收起阅读 »

社区日报 第873期 (2020-03-06)

1.logstash应该如何同步到List形式的ES对象字段呢
http://t.cn/A673I0Tj
2.kafka连接Elasticsearch实战
http://t.cn/A673xv8b
3.Elastic APM部署实操指南(梯子)
http://t.cn/A673x7D4
 
编辑:铭毅天下
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1.logstash应该如何同步到List形式的ES对象字段呢
http://t.cn/A673I0Tj
2.kafka连接Elasticsearch实战
http://t.cn/A673xv8b
3.Elastic APM部署实操指南(梯子)
http://t.cn/A673x7D4
 
编辑:铭毅天下
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »

社区日报 第872期 (2020-03-05)

1.Elasticsearch: 基于Text Embedding的文本相似性搜索
http://t.cn/A67HlfKF
2.跨集群复制 Cross-cluster replication
http://t.cn/A67HlJgg
3.一次有趣的ES+矩阵变换聚合实践
http://t.cn/A67HlSqu

编辑:金桥
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup
继续阅读 »
1.Elasticsearch: 基于Text Embedding的文本相似性搜索
http://t.cn/A67HlfKF
2.跨集群复制 Cross-cluster replication
http://t.cn/A67HlJgg
3.一次有趣的ES+矩阵变换聚合实践
http://t.cn/A67HlSqu

编辑:金桥
归档:https://ela.st/cn-daily-all
订阅:https://ela.st/cn-daily-sub
沙龙:https://ela.st/cn-meetup 收起阅读 »