ES(Elasticsearch)的应用与代码示例
Elasticsearch应用与代码示例技术文章大纲
一、引言
- Elasticsearch在现代化应用中的核心作用
- 典型应用场景分析(日志分析/全文检索/数据聚合)
二、环境准备(前提条件)
- Elasticsearch 8.x集群部署要点
- IK中文分词插件配置指南
- Ingest Attachment插件安装说明
三、核心代码结构解析
src/main/
├── java/
│ ├── config/
│ │ └── ElasticSearchConfig.java
│ ├── controller/
│ │ └── ElasticSearchController.java
│ ├── service/
│ │ ├── ElasticSearchService.java
│ │ └── ElasticSearchServiceImpl.java
│ ├── model/
│ │ ├── FileData.java
│ │ ├── Attachment.java
│ │ └── SearchResult.java
│ ├── dto/
│ │ └── WarningInfoDto.java
│ └── util/
│ └── ElasticSearchUtils.java
四、核心组件实现(含代码示例)
1. 配置中心(ElasticSearchConfig.java)
application.properties or yml
es.uri=192.168.1.1
es.port=9200
es.username=""
es.password=""
package com.zbxsoft.wds.config;import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.json.JsonpMapper;
import co.elastic.clients.json.jackson.JacksonJsonpMapper;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.rest_client.RestClientTransport;
import com.alibaba.fastjson.parser.deserializer.JSONPDeserializer;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;/*** es配置*/
@Configuration
@ConfigurationProperties(prefix = "es")
public class ElasticSearchConfig{public String getUri() {return uri;}public void setUri(String uri) {this.uri = uri;}public Integer getPort() {return port;}public void setPort(Integer port) {this.port = port;}public String getPassword() {return password;}public void setPassword(String password) {this.password = password;}public String getUsername() {return username;}public void setUsername(String username) {this.username = username;}private String uri;private Integer port;private String password;private String username;@Beanpublic ElasticsearchClient elasticsearchClient(@Autowired(required = false) JsonpMapper jsonpMapper, @Autowired(required = false) ObjectMapper objectMapper) {// 解析hostlist配置信息// 创建HttpHost数组,其中存放es主机和端口的配置信息HttpHost[] httpHostArray = new HttpHost[1];httpHostArray[0] = new HttpHost(uri,Integer.valueOf(port));RestClientBuilder builder = RestClient.builder(httpHostArray);final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();if(StringUtils.isNotBlank(username) && StringUtils.isNotBlank(password)){credentialsProvider.setCredentials(AuthScope.ANY,new UsernamePasswordCredentials(username, password));RestClient client = RestClient.builder(new HttpHost(uri,Integer.valueOf(port))).build();ElasticsearchTransport transport = new RestClientTransport(client, new JacksonJsonpMapper());return new ElasticsearchClient(transport);}RestClient restClient = builder.build();ElasticsearchTransport transport = null;if (jsonpMapper != null) {transport = new RestClientTransport(restClient, jsonpMapper);} else if (objectMapper != null) {transport = new RestClientTransport(restClient, new JacksonJsonpMapper(objectMapper));} else {transport = new RestClientTransport(restClient, new JacksonJsonpMapper());}// Create the transport with a Jackson mapper// And create the API clientreturn new ElasticsearchClient(transport);}
}
2. 数据处理模型(FileData.java)
package com.zbxsoft.wds.config;import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.zbxsoft.wds.fileupload.Attachment;
import lombok.Data;
import org.springframework.data.elasticsearch.annotations.Document;
import org.springframework.data.elasticsearch.annotations.Field;
import org.springframework.data.elasticsearch.annotations.FieldType;import java.io.Serializable;
import java.util.Objects;@Data
@Document(indexName = "file_data", createIndex = false)
@JsonIgnoreProperties(ignoreUnknown = true)
public class FileData implements Serializable {@Field(name = "file_id",type = FieldType.Text)private String file_id;@Field(name = "file_type",type = FieldType.Text)private String file_type;@Field(name = "file_name",type = FieldType.Text)private String file_name;@Field(name = "file_url",type = FieldType.Text)private String file_url;@Field(name = "file_size",type = FieldType.Text)private String file_size;@Field(name = "group_file_id",type = FieldType.Text)private String group_file_id;@Field(name = "file_suffix",type = FieldType.Text)private String file_suffix;@Field(name = "file_dir_name",type = FieldType.Text)private String file_dir_name;//保存时使用@Field(name = "attachment.content",type = FieldType.Text)private String content;//检索时使用private Attachment attachment;}}
3. 附件解析组件(Attachment.java)
package com.zbxsoft.wds.fileupload;import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;import java.io.Serializable;@Data
@AllArgsConstructor
@NoArgsConstructor
@ToString
public class Attachment implements Serializable {private String date;private String content_type;private String author;private String language;private String title;private String content;private String content_length;
}}
4. 服务层实现(ElasticSearchService.java)
package com.zbxsoft.wds.fileupload;import com.baomidou.mybatisplus.core.metadata.IPage;
import com.zbxsoft.wds.config.FileData;
import org.springframework.http.HttpEntity;
import org.springframework.web.multipart.MultipartFile;import java.io.IOException;
import java.util.List;
import java.util.Map;public interface ElasticsearchService {HttpEntity<?> createFileIndex(MultipartFile file) throws IOException;String createFileIndex(FileData fileData) throws IOException;Map<String,Double> queryWord(String keyword) throws IOException;IPage<SearchResult> queryWord(WarningInfoDto warningInfoDto) throws IOException;List<String> getAssociationalWordOther(WarningInfoDto warningInfoDto);String updateFileIndex(String id, FileData fileData) throws IOException;
}
工具类(ElasticSearchUtils.java)
package com.zbxsoft.wds.config;import cn.hutool.extra.spring.SpringUtil;import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch._types.Result;
import co.elastic.clients.elasticsearch._types.SortOptions;
import co.elastic.clients.elasticsearch._types.SortOrder;
import co.elastic.clients.elasticsearch._types.aggregations.Aggregate;
import co.elastic.clients.elasticsearch._types.aggregations.Aggregation;
import co.elastic.clients.elasticsearch._types.mapping.DateProperty;
import co.elastic.clients.elasticsearch._types.mapping.Property;
import co.elastic.clients.elasticsearch._types.mapping.TextProperty;
import co.elastic.clients.elasticsearch._types.mapping.TypeMapping;
import co.elastic.clients.elasticsearch._types.query_dsl.Query;
import co.elastic.clients.elasticsearch.core.*;
import co.elastic.clients.elasticsearch.core.bulk.BulkResponseItem;
import co.elastic.clients.elasticsearch.core.search.HitsMetadata;
import co.elastic.clients.elasticsearch.core.search.TotalHits;
import co.elastic.clients.elasticsearch.core.search.TotalHitsRelation;
import co.elastic.clients.elasticsearch.core.search.TrackHits;
import co.elastic.clients.elasticsearch.indices.*;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import lombok.extern.slf4j.Slf4j;import java.io.IOException;
import java.io.StringReader;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.util.*;
import java.util.stream.Collectors;@Slf4j
public class ElasticSearchUtils<T> {public static ElasticsearchClient elasticsearchClient;public String index;public Class obj;public String idField;public Set<String> fields;static {ElasticSearchConfig elasticSearchConfig = SpringUtil.getBean(ElasticSearchConfig.class);
// elasticsearchClient = elasticSearchConfig.elasticsearchClient();}/*** 获取id字段** @return*/private String getIdValue(T t) throws Exception {Field field = t.getClass().getDeclaredField(idField);field.setAccessible(true);Object object = field.get(t);return object.toString();}/*** 判断索引是否存在** @return*/public Boolean getIndex() {try {GetIndexRequest getIndexRequest = GetIndexRequest.of(builder -> builder.index(index));GetIndexResponse getIndexResponse = elasticsearchClient.indices().get(getIndexRequest);log.info("getIndexResponse:{}", getIndexResponse);return true;} catch (IndexOutOfBoundsException | IOException e) {log.info("getIndexResponse:{}", e.getMessage());return false;}}public void deleteIndex() throws IOException {//1.创建索引请求DeleteIndexRequest request = DeleteIndexRequest.of(builder -> builder.index(index));//2.执行创建请求DeleteIndexResponse delete = elasticsearchClient.indices().delete(request);//如果为true就删除了log.info("DeleteIndexRequest:{}", delete);}/*** 插入数据** @throws IOException*/public void push(T t) throws Exception {String id = getIdValue(t);IndexRequest<T> indexRequest = IndexRequest.of(b -> b.index(index).id(id).document(t)//刷新可以立刻搜索到,消耗性能/*.refresh(Refresh.True)*/);elasticsearchClient.index(indexRequest);}/*** 索引信息查询** @throws IOException*/public T query(String id) throws IOException {GetResponse<T> response = elasticsearchClient.get(g -> g.index(index).id(id), obj);if (response.found()) {return response.source();}return null;}/*** 索引信息查询** @throws IOException*/public HitsMetadata<T> queryList(int page, int pageSize, Query query, SortOptions... sortOptions) throws IOException {SearchResponse<T> search = elasticsearchClient.search(s -> s.index(index).query(query).trackTotalHits(TrackHits.of(i -> i.enabled(true))).sort(Arrays.asList(sortOptions)).from((page - 1) * pageSize).size(pageSize), obj);return search.hits();}/*** 删除文档** @throws IOException*/public boolean del(String id) throws IOException {DeleteResponse delete = elasticsearchClient.delete(d -> d.index(index).id(id));Result result = delete.result();return "deleted".equals(result.jsonValue()) | "not_found".equals(result.jsonValue());}/*** 批量** @throws IOException*/public Map<String, String> batchDel(Set<String> ids) throws Exception {BulkRequest.Builder br = new BulkRequest.Builder();for (String id : ids) {br.operations(op -> op.delete(d -> d.index(index).id(id)));}return requestBulk(br);}/*** 批量** @throws IOException*/public Map<String, String> batchAdd(List<T> list) throws Exception {BulkRequest.Builder br = new BulkRequest.Builder();for (T t : list) {String idValue = getIdValue(t);br.operations(op -> op.index(idx -> idx.index(index).id(idValue).document(t)));}return requestBulk(br);}/*** 处理批量请求** @param br* @return* @throws IOException*/private Map<String, String> requestBulk(BulkRequest.Builder br) throws IOException {//刷新可以立刻搜索到,消耗性能//br.refresh(Refresh.True);BulkResponse result = elasticsearchClient.bulk(br.build());System.out.println(result);Map<String, String> returnResult = new HashMap<>();if (result.errors()) {returnResult = result.items().stream().filter(e -> e.error() != null).collect(Collectors.toMap(BulkResponseItem::id, b -> b.error().reason()));}return returnResult;}/*** 分组* @param map* @return* @throws IOException*/public Map<String, Aggregate> buildAggregate(Map<String, Aggregation> map) throws IOException {SearchResponse<T> search = elasticsearchClient.search(s -> s.index(index).size(0).aggregations(map), obj);return search.aggregations();}public Long queryCount(Query query) throws IOException {CountResponse search = elasticsearchClient.count(s -> s.index(index).query(query));return search.count();}}
服务层实现(ElasticSearchServiceImpl.java)
package com.zbxsoft.wds.fileupload;import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.lang.UUID;
import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch.core.*;
import co.elastic.clients.elasticsearch.core.search.*;import com.aliyun.oss.ServiceException;
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.zbxsoft.wds.aneo4j.TextAnalysis;
import com.zbxsoft.wds.aneo4j.rep.CustomNeo4jRepository;
import com.zbxsoft.wds.filequery.FileQuery;
import com.zbxsoft.wds.mapper.FileQueryMapper;
import com.zbxsoft.wds.config.FileData;
import com.zbxsoft.xwk.vo.R;
import lombok.extern.slf4j.Slf4j;import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
import org.jetbrains.annotations.NotNull;
import org.springframework.beans.factory.annotation.Autowired;import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.ResourceLoader;
import org.springframework.data.util.Pair;
import org.springframework.http.HttpEntity;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URL;
import java.util.*;@Service
@Slf4j
public class ElasticsearchServiceImpl implements ElasticsearchService {@Autowiredprivate ElasticsearchClient client;@Autowiredprivate ResourceLoader resourceLoader;@Autowiredprotected FileQueryMapper fileQueryMapper;@Value("${file.server.address}")private String fileServerAddress;@Autowiredprivate CustomNeo4jRepository customNeo4jRepository;/*** 创建索引* @param fileData 文件数据* @return 索引id* @throws IOException exp*/public String createFileIndex(FileData fileData) throws IOException {String path = fileData.getFile_url();String url = fileServerAddress + path;log.info("已上传文件地址:" + url);String fileName = FileUtil.getName(url);// 上传并返回新文件名称String prefix = fileName.substring(fileName.lastIndexOf(".") + 1);File file = File.createTempFile(fileName, prefix);IOUtils.copy(new URL(url),file);try (PDDocument document = PDDocument.load(file)) {//提取pdf中的文本内容PDFTextStripper pdfTextStripper = new PDFTextStripper();String text = pdfTextStripper.getText(document);//该主键 在es dm 中用作数据唯一id, 在neo4j中作为实体的typeString id = fileData.getFile_id();//提取文章中的关键词List<String> keywords = TextAnalysis.getKeywords(text);if (CollectionUtil.isNotEmpty(keywords)) {//构建知识图谱customNeo4jRepository.buildArticleKeywordGraph(id, keywords);//更新keywords字段 file_id 就是数据库中存在的主键updateKeywords(id, String.join(",", keywords));}} catch (IOException e) {log.error("提取文本或创建关键词图谱失败,异常信息: {}", e.toString());}//最后建立该文章的索引String _docId = createIndex(file,fileData);return _docId;}/*** 更新文件对象的keyword* @param id 主键* @param keyword 关键词*/private void updateKeywords(String id,String keyword){LambdaUpdateWrapper<FileQuery> updateWrapper = new LambdaUpdateWrapper<>();updateWrapper.eq(FileQuery::getFileQueryId,id).set(FileQuery::getKeyword,keyword);fileQueryMapper.update(updateWrapper);}/*** 创建索引* @param file 文件* @param fileData 文件对象* @return 索引id*/private String createIndex(File file, FileData fileData) {try {byte[] bytes = getContent(file);String base64 = Base64.getEncoder().encodeToString(bytes);fileData.setContent(base64);IndexRequest<Object> requestData = IndexRequest.of(i -> i.index("file_data").pipeline("attachment").id(fileData.getFile_id()).document(fileData));IndexResponse indexResponse = client.index(requestData);log.info("indexResponse:" + indexResponse);return indexResponse.id();} catch (IOException e) {log.error("文件上传异常,异常信息: {}", e.toString());return "";}}/*** 更新索引,在更新文件的时候发生* @param id 索引id 也是关系库主键* @param fileData 文件对象* @return* @throws IOException*/@Overridepublic String updateFileIndex(String id, FileData fileData) throws IOException {//直接删除索引, 重建DeleteRequest deleteRequest = DeleteRequest.of(s -> s.index("file_data").id(id));client.delete(deleteRequest);String _docId = createFileIndex(fileData);return _docId;}/*** 文件转base64** @param file 文件对象* @return buffer* @throws IOException*/private byte[] getContent(File file) throws IOException {long fileSize = file.length();if (fileSize > Integer.MAX_VALUE) {log.info("file too big...");return null;}FileInputStream fi = new FileInputStream(file);byte[] buffer = new byte[(int) fileSize];int offset = 0;int numRead = 0;while (offset < buffer.length&& (numRead = fi.read(buffer, offset, buffer.length - offset)) >= 0) {offset += numRead;}// 确保所有数据均被读取if (offset != buffer.length) {throw new ServiceException("Could not completely read file "+ file.getName());}return buffer;}/*** 给定keyword 检索数据* @param keyword* @return* @throws IOException*/public Map<String,Double> queryWord(String keyword) throws IOException {SearchResponse<FileData> response = doQuery(keyword);log.info("检索结果: {}", response.hits().hits());List<Pair<String,Double>> idList = Lists.newArrayList();Map<String,Double> idsWithScore = Maps.newHashMap();List<Hit<FileData>> hits = response.hits().hits();for (Hit<FileData> hit : hits) {Double score = hit.score();String id = hit.id();score = score == null ? 0.0d : score;idsWithScore.put(id,score);}log.info("查询数据: {}", idsWithScore);return idsWithScore;}/*** es attachment.content 检索封装* @param keyword 关键词* @return 检索结果* @throws IOException exp*/private SearchResponse<FileData> doQuery(String keyword) throws IOException {Map<String, HighlightField> map = new HashMap<>();HighlightField build = new HighlightField.Builder().preTags("").postTags("").build();map.put("file_name",build);map.put("attachment.content",HighlightField.of(hf -> hf.preTags("").postTags("").numberOfFragments(4)));Highlight highlight = Highlight.of(h -> h.type(HighlighterType.of(ht -> ht.builtin(BuiltinHighlighterType.Unified))).fields(map).fragmentSize(50).numberOfFragments(5));//索引 file_name 分词器 为ik_max_word 颗粒度较细 而attachment.content 使用ik_smart分词器 , 颗粒度相对粗一点SearchResponse<FileData> response = client.search(s -> s.index("file_data").highlight(highlight).query(q -> q.bool(b -> b.should(sh -> sh.match(t -> t.field("file_name").query(keyword))).should(sh -> sh.match(t -> t.field("attachment.content").query(keyword))))),FileData.class);return response;}/*** 高亮分词搜索其它类型文档** @param warningInfoDto* @return*/public IPage<SearchResult> queryWord(WarningInfoDto warningInfoDto) throws IOException {//分页SearchResponse<FileData> response = doQuery(warningInfoDto.getKeyword());//手动创建分页对象IPage<SearchResult> warningInfoIPage = getFileDataIPage(warningInfoDto, response);return warningInfoIPage;}/*** 获取检索数据, 分页* @param warningInfoDto* @param response* @return*/@NotNullprivate static IPage<SearchResult> getFileDataIPage(WarningInfoDto warningInfoDto, SearchResponse<FileData> response) {List<Hit<FileData>> hits = response.hits().hits();TotalHits total = response.hits().total();List<SearchResult> resultList = new LinkedList<>();//处理返回内容for (Hit<FileData> hit : hits) {Map<String, List<String>> map = hit.highlight();List<String> highLightWords = Lists.newArrayList();map.forEach((k,v) -> highLightWords.addAll(v));FileData fileData = hit.source();SearchResult searchResult = new SearchResult(fileData,highLightWords);resultList.add(searchResult);}// //设置一个最后需要返回的实体类集合//手动分页返回信息IPage<SearchResult> warningInfoIPage = new Page<>();assert total != null;warningInfoIPage.setTotal(total.value());warningInfoIPage.setRecords(resultList);warningInfoIPage.setCurrent(warningInfoDto.getPageIndex());warningInfoIPage.setSize(warningInfoDto.getPageSize());warningInfoIPage.setPages(warningInfoIPage.getTotal() % warningInfoDto.getPageSize());return warningInfoIPage;}/*** 文档信息关键词联想(根据输入框的词语联想文件名称)** @param warningInfoDto* @return*/public List<String> getAssociationalWordOther(WarningInfoDto warningInfoDto) {
// //需要查询的字段
// BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery()
// .should(QueryBuilders.matchBoolPrefixQuery("fileName", warningInfoDto.getKeyword()));
// //contentType标签内容过滤
// boolQueryBuilder.must(QueryBuilders.termsQuery("contentType", warningInfoDto.getContentType()));
// //构建高亮查询
// NativeSearchQuery searchQuery = new NativeSearchQueryBuilder()
// .withQuery(boolQueryBuilder)
// .withHighlightFields(
// new HighlightBuilder.Field("fileName")
// )
// .withHighlightBuilder(new HighlightBuilder().preTags("<span style='color:red'>").postTags("</span>"))
// .build();
// //查询
// SearchHits<FileData> search = null;
// try {
// search = client.search(searchQuery);
// } catch (Exception ex) {
// ex.printStackTrace();
// throw new ServiceException(String.format("操作错误,请联系管理员!%s", ex.getMessage()));
// }
// //设置一个最后需要返回的实体类集合
// List<String> resultList = new LinkedList<>();
// //遍历返回的内容进行处理
// for (org.springframework.data.elasticsearch.core.SearchHit<FileInfo> searchHit : search.getSearchHits()) {
// //高亮的内容
// Map<String, List<String>> highlightFields = searchHit.getHighlightFields();
// //将高亮的内容填充到content中
// searchHit.getContent().setFileName(highlightFields.get("fileName") == null ? searchHit.getContent().getFileName() : highlightFields.get("fileName").get(0));
// if (highlightFields.get("fileName") != null) {
// resultList.add(searchHit.getContent().getFileName());
// }
// }
// //list去重
// List<String> newResult = null;
// if (!FastUtils.checkNullOrEmpty(resultList)) {
// if (resultList.size() > 9) {
// newResult = resultList.stream().distinct().collect(Collectors.toList()).subList(0, 9);
// } else {
// newResult = resultList.stream().distinct().collect(Collectors.toList());
// }
// }return new ArrayList<>();}/*** 上传文件并进行文件内容识别上传到es 测试使用* @param file* @return*/public HttpEntity<?> createFileIndex(MultipartFile file) throws IOException {//http://192.168.100.243//file/wds/fileQuery/2024-12/1734319529179/downloadFile.pdf
// InputStream inputStream = resourceLoader.getResource(url).getInputStream();String fileName = file.getName();String prefix = fileName.substring(fileName.lastIndexOf(".") + 1);File tempFile = File.createTempFile(fileName, prefix);// 上传文件路径file.transferTo(tempFile);FileData fileData = new FileData();fileData.setFile_url("http://localhost:9000/bucket/p2");fileData.setFile_id(UUID.fastUUID().toString(true));fileData.setFile_name("张飞吃豆芽333.pdf");fileData.setFile_suffix(".pdf");fileData.setFile_type("pdf");fileData.setFile_size("44");fileData.setFile_dir_name("p2");fileData.setGroup_file_id("g1");String _docId = createIndex(tempFile,fileData);return StringUtils.isBlank(_docId) ? R.badRequest("文件上传异常") : R.success("文件上传成功");}}
五、数据传输
1. 前端请求数据传输(WarningInfoDto.java)
package com.zbxsoft.wds.fileupload;import lombok.Data;import java.util.List;/*** 前端请求数据传输* WarningInfo* @author luoY*/
@Datapublic class WarningInfoDto{/*** 页数*/private Integer pageIndex;/*** 每页数量*/private Integer pageSize;/*** 查询关键词*/private String keyword;/*** 内容类型*/private List<String> contentType;/*** 用户手机号*/private String phone;
}
}
2. 文档全文检索接口(ElasticSearchController.java)
package com.zbxsoft.wds.fileupload;import com.baomidou.mybatisplus.core.metadata.IPage;import com.zbxsoft.wds.common.R;import org.springframework.http.HttpEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.List;/*** es搜索引擎** @author luoy*/
@RestController
@RequestMapping("es")
public class ElasticsearchController {@Resourceprivate ElasticsearchService elasticsearchService;@PostMapping("/uploadFile")public HttpEntity<?> uploadFile(@RequestParam(required = false) MultipartFile file) throws IOException {return elasticsearchService.createFileIndex(file);}/*** 告警信息关键词联想** @param warningInfoDto* @return*/@PostMapping("getAssociationalWordDoc")public HttpEntity<?> getAssociationalWordDoc(@RequestBody WarningInfoDto warningInfoDto, HttpServletRequest request) {List<String> words = elasticsearchService.getAssociationalWordOther(warningInfoDto);return R.list(words);}/*** 告警信息高亮分词分页查询** @param warningInfoDto* @return*/@PostMapping("queryHighLightWordDoc")public HttpEntity<?> queryHighLightWordDoc(@RequestBody WarningInfoDto warningInfoDto,HttpServletRequest request) throws IOException {IPage<SearchResult> warningInfoListPage = elasticsearchService.queryWord(warningInfoDto);return R.entity(warningInfoListPage);}
}
3. 数据返回(SearchResult.java)
package com.zbxsoft.wds.fileupload;import com.zbxsoft.wds.config.FileData;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;import java.io.Serializable;
import java.util.List;@Data
@AllArgsConstructor
@NoArgsConstructor
@ToString
public class SearchResult implements Serializable {private FileData fileData;private List<String> highLightWords;
}
六、高级功能实现
- 多条件复合查询构建技巧
- 聚合统计实现方案
- 搜索高亮显示配置
七、性能优化建议
- 索引分片策略设计
- 查询DSL优化技巧
- 批量操作最佳实践
八、总结与展望
- 当前架构优势分析
- 后续演进方向建议
(注:各代码示例需配合完整的类定义和import语句,实际开发中需补充异常处理、日志记录等生产级代码要素)