(一)、搭建环境
0.启动ElasticSearch和head和kblian
(1).启动EslaticSearch (9200)
(2).启动Es-head (9101)
(3).启动 Kibana (5602)
1.项目依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.5.5</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.jsxs</groupId>
<artifactId>Jsxs-es-JD</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>Jsxs-es-JD</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>1.8</java.version>
<!-- 自己定义es版本依赖,保证和本地一致 -->
<elasticsearch.version>7.6.2</elasticsearch.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-devtools</artifactId>
<scope>runtime</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-elasticsearch</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-devtools</artifactId>
<version>2.7.9</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<!-- 引入我们的JSON包 -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>2.0.26</version>
</dependency>
<!-- 引入Thymeleaf启动器 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-thymeleaf</artifactId>
<version>2.7.7</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<excludes>
<exclude>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>
2.启动测试
(二)、爬虫
1.数据从哪里获取
- 数据库获取。
- 消息队列中获取中。
- 爬虫
2.导入爬虫的依赖
tika包解析电影的.jsoup解析网页
<!-- jsoup解析网页-->
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>1.10.2</version>
</dependency>
3.编写爬虫工具类
(1).实体类
package com.jsxs.pojo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @Author Jsxs
* @Date 2023/6/30 13:06
* @PackageName:com.jsxs.pojo
* @ClassName: Content
* @Description: TODO
* @Version 1.0
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Content {
private String title;
private String img;
private String price;
}
(2).工具类编写 (已废弃⭐)
package com.jsxs.utils;
import com.jsxs.pojo.Content;
import org.elasticsearch.common.recycler.Recycler;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.stereotype.Component;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
/**
* @Author Jsxs
* @Date 2023/6/30 12:40
* @PackageName:com.jsxs.utils
* @ClassName: HtmlParseUtil
* @Description: TODO
* @Version 1.0
*/
@Component
public class HtmlParseUtil {
public List<Content> parseJD(String keywords) throws Exception {
// 1.获得请求
String url = "https://search.jd.com/Search?keyword="+keywords;
// 2.解析网页 返回的document对象就是浏览器的Document对象
Document document = Jsoup.parse(new URL(url), 3000);
// 3.利用js的Document对象进行操作 ->获取商品整个html页面
Element element = document.getElementById("J_goodsList");
// 4.获取所有的li元素 是一个集合。
Elements elements = element.getElementsByTag("li");
// 创建一个链表,用于存放我们爬取到的信息
ArrayList<Content> contents = new ArrayList<>();
// 5.获取元素中的各个内容
for (Element li : elements) {
// 获取图片 这里面加上attr目的是懒加载。
String img = li.getElementsByTag("img").eq(0).attr("data-lazy-img"); // 爬取懒加载的图片
// 获取价格
String price = li.getElementsByClass("p-price").eq(0).text();
// 获取上坪的价格
String title = li.getElementsByClass("p-name").eq(0).text();
// 存放我们爬取到的信息
contents.add(new Content(title,img,price));
}
return contents;
}
public static void main(String[] args) throws Exception {
for (Content java : new HtmlParseUtil().parseJD("码出高效")) {
System.out.println(java);
}
}
}
(3).工具类编写 - 解决京东防护
package com.jsxs.utils;
import com.jsxs.pojo.Content;
import org.elasticsearch.common.recycler.Recycler;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.stereotype.Component;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @Author Jsxs
* @Date 2023/6/30 12:40
* @PackageName:com.jsxs.utils
* @ClassName: HtmlParseUtil
* @Description: TODO
* @Version 1.0
*/
@Component
public class HtmlParseUtil {
public List<Content> parseJD(String keywords) throws Exception {
// 1.获得请求
String url = "https://search.jd.com/Search?keyword="+keywords;
System.out.println(url);
// 设置cookie
Map<String, String> cookies = new HashMap<String, String>();
cookies.put("thor", "35C0A430DD191386DC5C6605461B820975545DB4E7B5F6CD3717B58D8F3B4CF548ED5F724A0CFF52528BCC4C1382E38FDD39F7714D356D73C80DBC98E351588E74A77B0CB8B5348847042F8AB08B9D4BC87539F45579E34614217BFD76FCEEBEC829173EEA7B4D51FAA162DD62B98376375C46B24B2FAAC96C7C733BC0F3B6165DB89F97C62170FD0838A7F72212B95CD38FC61DEF2B38C36A1F8C252C2809C8");
// 2.解析网页 返回的document对象就是浏览器的Document对象
Document document = Jsoup.connect(url).cookies(cookies).get();
// 3.利用js的Document对象进行操作 ->获取商品整个html页面
Element element = document.getElementById("J_goodsList");
System.out.println("***************"+element);
// 4.获取所有的li元素 是一个集合。
Elements elements = element.getElementsByTag("li");
// 创建一个链表,用于存放我们爬取到的信息
ArrayList<Content> contents = new ArrayList<>();
// 5.获取元素中的各个内容
for (Element li : elements) {
// 获取图片 这里面加上attr目的是懒加载。
String img = li.getElementsByTag("img").eq(0).attr("data-lazy-img"); // 爬取懒加载的图片
// 获取价格
String price = li.getElementsByClass("p-price").eq(0).text();
// 获取上坪的价格
String title = li.getElementsByClass("p-name").eq(0).text();
// 存放我们爬取到的信息
contents.add(new Content(title,img,price));
}
return contents;
}
public static void main(String[] args) throws Exception {
for (Content java : new HtmlParseUtil().parseJD("java")) {
System.out.println(java);
}
}
}
4.导入配置类
package com.jsxs.config;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @Author Jsxs
* @Date 2023/6/30 14:13
* @PackageName:com.jsxs.config
* @ClassName: ElasticSearchClientConfig
* @Description: TODO
* @Version 1.0
*/
@Configuration
public class ElasticSearchClientConfig {
@Bean
public RestHighLevelClient restHighLevelClient(){
RestHighLevelClient client = new RestHighLevelClient(
RestClient.builder(
new HttpHost("localhost", 9200, "http")));
return client;
}
}
(三)、将爬取到的数据存放到ES
1.创建Service层
ContentService.java
package com.jsxs.service;
import com.alibaba.fastjson2.JSON;
import com.jsxs.pojo.Content;
import com.jsxs.utils.HtmlParseUtil;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
/**
* @Author Jsxs
* @Date 2023/6/30 14:08
* @PackageName:com.jsxs.service
* @ClassName: ContentService
* @Description: TODO
* @Version 1.0
*/
@Service
public class ContentService {
@Resource
RestHighLevelClient client;
public static void main(String[] args) throws Exception {
System.out.println(new ContentService().parseContent("java"));
}
// 1.解析数据放入我们的es索引中
public Boolean parseContent(String keywords) throws Exception {
List<Content> list = new HtmlParseUtil().parseJD(keywords);
// 2. 把查询到的数据批量放入es中去
BulkRequest bulkRequest = new BulkRequest();
// 3.设置超时的时间
bulkRequest.timeout("2s");
// 4.创建一个新的索引名字叫做 jd_goods ⭐⭐运行第二次的时候,要把创建库的语句给删除掉
CreateIndexRequest request = new CreateIndexRequest("jd_goods");
client.indices().create(request, RequestOptions.DEFAULT);
// 5.批量插入到数据中 并设置id。
for (int i = 0; i < list.size(); i++) {
bulkRequest.add(new IndexRequest("jd_goods")
.id(""+i+1)
.source(JSON.toJSONString(list.get(i)), XContentType.JSON)
);
}
BulkResponse bulk = client.bulk(bulkRequest, RequestOptions.DEFAULT);
// 如果没有失败就返回成功
return !bulk.hasFailures();
}
}
2.进行测试 (ES是否存放成功)
package com.jsxs;
import com.jsxs.service.ContentService;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
import javax.annotation.Resource;
@SpringBootTest
class JsxsEsJdApplicationTests {
@Resource
ContentService contentService;
@Test
void contextLoads() throws Exception {
System.out.println(contentService.parseContent("java"));
}
}
(四)、从ES中分页读取数据 (关键字不能为中文)
切记我们只能读取到我们ES中存放的数据,假如进行查询没有存放在ES的数据,我们就会得到空的数据。
1.从ES中读取数据
(1).ContentService 层
package com.jsxs.service;
import com.alibaba.fastjson2.JSON;
import com.jsxs.pojo.Content;
import com.jsxs.utils.HtmlParseUtil;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* @Author Jsxs
* @Date 2023/6/30 14:08
* @PackageName:com.jsxs.service
* @ClassName: ContentService
* @Description: TODO
* @Version 1.0
*/
@Service
public class ContentService {
@Resource
RestHighLevelClient client;
// 1.解析数据放入我们的es索引中
public Boolean parseContent(String keywords) throws Exception {
List<Content> list = new HtmlParseUtil().parseJD(keywords);
// 2. 把查询到的数据批量放入es中去
BulkRequest bulkRequest = new BulkRequest();
// 3.设置超时的时间
bulkRequest.timeout("2s");
// 4.创建一个新的索引名字叫做 jd_goods
// CreateIndexRequest request = new CreateIndexRequest("jd_goods");
// client.indices().create(request, RequestOptions.DEFAULT);
// 5.批量插入到数据中 并设置id。
for (int i = 0; i < list.size(); i++) {
bulkRequest.add(new IndexRequest("jd_goods")
.id(i+1+"")
.source(JSON.toJSONString(list.get(i)), XContentType.JSON)
);
}
BulkResponse bulk = client.bulk(bulkRequest, RequestOptions.DEFAULT);
// 如果没有失败就返回成功
return !bulk.hasFailures();
}
// 2. 从ES中进行搜索内容
public List<Map<String,Object>> searchesPage(String keywords,int pageNo,int pageSize) throws IOException {
if (pageNo<=1){
pageNo=1;
}
// 1.条件搜索 ⭐
SearchRequest request = new SearchRequest("jd_goods");
// 2.构建搜索条件 ⭐⭐
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
// 3.分页 ⭐⭐⭐
searchSourceBuilder.from(pageNo);
searchSourceBuilder.size(pageSize);
// 4. 精确匹配: 第一个参数是参数列名,第二个参数是 搜索的内容 ⭐⭐⭐⭐
TermQueryBuilder query = QueryBuilders.termQuery("title", keywords);
searchSourceBuilder.query(query);
searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
// 5.执行搜索 ⭐⭐⭐⭐⭐
request.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(request, RequestOptions.DEFAULT); //这里会得到一个结果
// 6.解析结果 ⭐⭐⭐⭐⭐⭐
SearchHits hits = searchResponse.getHits(); // 这里会获取到一个对象,对象里面包含着一个hits数组
ArrayList<Map<String,Object>> list = new ArrayList<>();
for (SearchHit hit : searchResponse.getHits().getHits()) {
list.add(hit.getSourceAsMap());
}
System.out.println(list);
return list;
}
}
(2).ContentController 控制层
package com.jsxs.controller;
import com.jsxs.service.ContentService;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* @Author Jsxs
* @Date 2023/6/30 14:08
* @PackageName:com.jsxs.controller
* @ClassName: ContentController
* @Description: TODO
* @Version 1.0
*/
@RestController
public class ContentController {
@Resource
private ContentService contentService;
// 普通查询数据
@GetMapping("/parse/{keywords}")
public Boolean parse(@PathVariable("keywords") String keywords) throws Exception {
return contentService.parseContent(keywords);
}
// 分页查询数据加高亮
@GetMapping("/search/{keyword}/{pageNo}/{pageSize}")
public List<Map<String,Object>> search(@PathVariable("keyword") String keyword,@PathVariable("pageNo") int pageNo,@PathVariable("pageSize") int pageSize) throws IOException {
return contentService.searchesPage(keyword,pageNo,pageSize);
}
//
}
2.错误演示 (读取es中没有的数据)
1. 我们在ES中存放的关键字是 java 而我们读取的关键字是 夏装
2. 读取不到夏装的数据
(五)、前后端交互
1.新增两个js
2.修改原本的页面信息
(1).修改的前端页面
index.html
<!DOCTYPE html>
<html xmlns:th="http://www.thymeleaf.org">
<head>
<meta charset="utf-8"/>
<title>狂神说Java-ES仿京东实战</title>
<link rel="stylesheet" th:href="@{/css/style.css}"/>
</head>
<body class="pg">
<div class="page" id="app">
<div id="mallPage" class=" mallist tmall- page-not-market ">
<!-- 头部搜索 -->
<div id="header" class=" header-list-app">
<div class="headerLayout">
<div class="headerCon ">
<!-- Logo-->
<h1 id="mallLogo">
<img th:src="@{/images/jdlogo.png}" alt="">
</h1>
<div class="header-extra">
<!--搜索-->
<div id="mallSearch" class="mall-search">
<form name="searchTop" class="mallSearch-form clearfix">
<fieldset>
<legend>天猫搜索</legend>
<div class="mallSearch-input clearfix">
<div class="s-combobox" id="s-combobox-685">
<div class="s-combobox-input-wrap">
<input v-model="keywords" type="text" autocomplete="off" value="dd" id="mq"
class="s-combobox-input" aria-haspopup="true">
</div>
</div>
<button type="submit" @click.prevent="searchKey" id="searchbtn">搜索</button>
</div>
</fieldset>
</form>
<ul class="relKeyTop">
<li><a>狂神说Java</a></li>
<li><a>狂神说前端</a></li>
<li><a>狂神说Linux</a></li>
<li><a>狂神说大数据</a></li>
<li><a>狂神聊理财</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<!-- 商品详情页面 -->
<div id="content">
<div class="main">
<!-- 品牌分类 -->
<form class="navAttrsForm">
<div class="attrs j_NavAttrs" style="display:block">
<div class="brandAttr j_nav_brand">
<div class="j_Brand attr">
<div class="attrKey">
品牌
</div>
<div class="attrValues">
<ul class="av-collapse row-2">
<li><a href="#"> 狂神说 </a></li>
<li><a href="#"> Java </a></li>
</ul>
</div>
</div>
</div>
</div>
</form>
<!-- 排序规则 -->
<div class="filter clearfix">
<a class="fSort fSort-cur">综合<i class="f-ico-arrow-d"></i></a>
<a class="fSort">人气<i class="f-ico-arrow-d"></i></a>
<a class="fSort">新品<i class="f-ico-arrow-d"></i></a>
<a class="fSort">销量<i class="f-ico-arrow-d"></i></a>
<a class="fSort">价格<i class="f-ico-triangle-mt"></i><i class="f-ico-triangle-mb"></i></a>
</div>
<!-- 商品详情 -->
<div class="view grid-nosku">
<div class="product" v-for="result in results">
<div class="product-iWrap">
<!--商品封面-->
<div class="productImg-wrap">
<a class="productImg">
<img :src="result.img">
</a>
</div>
<!--价格-->
<p class="productPrice">
<em><b>¥</b>{{result.price}}</em>
</p>
<!--标题-->
<p class="productTitle">
<a> {{result.title}} </a>
</p>
<!-- 店铺名 -->
<div class="productShop">
<span>店铺: 狂神说Java </span>
</div>
<!-- 成交信息 -->
<p class="productStatus">
<span>月成交<em>999笔</em></span>
<span>评价 <a>3</a></span>
</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!--前端使用vue,完成前后端分离 ⭐⭐⭐-->
<script th:src="@{/js/axios.min.js}"></script>
<script th:src="@{/js/vue.js}"></script>
<script>
new Vue({
el:'#app',
data:{
keywords: '', //搜索的关键字
results:[] //搜索的结果
},
methods: {
searchKey() {
var keyword = this.keywords;
console.log(keyword);
//对接后端的接口,异步获取查询到的值
axios.get('search/' + keyword + "/1/10").then(response => {
console.log(response);
this.results = response.data; // 绑定数据
})
}
}
})
</script>
</body>
</html>
3.在批量插入数据的时候把id给注释掉
4.运行测试成功
读取的都是es中的数据,假如es中没有数据的话,就会查不到
(六)、实现搜索的高亮
1.实现搜索高亮
(1).ContentService 层
package com.jsxs.service;
import com.alibaba.fastjson2.JSON;
import com.jsxs.pojo.Content;
import com.jsxs.utils.HtmlParseUtil;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* @Author Jsxs
* @Date 2023/6/30 14:08
* @PackageName:com.jsxs.service
* @ClassName: ContentService
* @Description: TODO
* @Version 1.0
*/
@Service
public class ContentService {
@Resource
RestHighLevelClient client;
// 1.解析数据放入我们的es索引中
public Boolean parseContent(String keywords) throws Exception {
List<Content> list = new HtmlParseUtil().parseJD(keywords);
// 2. 把查询到的数据批量放入es中去
BulkRequest bulkRequest = new BulkRequest();
// 3.设置超时的时间
bulkRequest.timeout("2s");
// 4.创建一个新的索引名字叫做 jd_goods
// CreateIndexRequest request = new CreateIndexRequest("jd_goods");
// client.indices().create(request, RequestOptions.DEFAULT);
// 5.批量插入到数据中 并设置id。
for (int i = 0; i < list.size(); i++) {
bulkRequest.add(new IndexRequest("jd_goods")
// .id(i+1+"") 尽量去掉id可以
.source(JSON.toJSONString(list.get(i)), XContentType.JSON)
);
}
BulkResponse bulk = client.bulk(bulkRequest, RequestOptions.DEFAULT);
// 如果没有失败就返回成功
return !bulk.hasFailures();
}
// 2. 获取ES数据实现基本的搜索功能
public List<Map<String,Object>> searchesPage(String keywords,int pageNo,int pageSize) throws IOException {
if (pageNo<=1){
pageNo=1;
}
// 1.条件搜索
SearchRequest request = new SearchRequest("jd_goods");
// 2.构建搜索条件
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
// 3.分页
searchSourceBuilder.from(pageNo);
searchSourceBuilder.size(pageSize);
// 4. 精确匹配: 第一个参数是参数列名,第二个参数是 搜索的内容
TermQueryBuilder query = QueryBuilders.termQuery("title", keywords);
searchSourceBuilder.query(query);
searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
// 5.执行搜索
request.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(request, RequestOptions.DEFAULT); //这里会得到一个结果
// 6.解析结果
SearchHits hits = searchResponse.getHits(); // 这里会获取到一个对象,对象里面包含着一个hits数组
ArrayList<Map<String,Object>> list = new ArrayList<>();
for (SearchHit hit : searchResponse.getHits().getHits()) {
list.add(hit.getSourceAsMap());
}
return list;
}
// 3.获取ES数据实现高亮+分页
public List<Map<String,Object>> searchesPageHight(String keywords,int pageNo,int pageSize) throws IOException {
if (pageNo<=1){
pageNo=1;
}
// 1.条件搜索
SearchRequest request = new SearchRequest("jd_goods");
// 2.构建搜索条件
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
// 3.分页
searchSourceBuilder.from(pageNo);
searchSourceBuilder.size(pageSize);
// 4. 精确匹配: 第一个参数是参数列名,第二个参数是 搜索的内容
TermQueryBuilder query = QueryBuilders.termQuery("title", keywords);
searchSourceBuilder.query(query);
searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
// 5.配置高亮设置 ⭐
HighlightBuilder highlightBuilder = new HighlightBuilder();
highlightBuilder.requireFieldMatch(false); //一个文本中多个关键字只高亮一个关键字
highlightBuilder.field("title"); //对那个属性进行高亮
highlightBuilder.preTags("<span style='color:yellow'>"); //文本的前标签
highlightBuilder.postTags("</span>"); // 文本的后标签
// 6.执行高亮设置 ⭐⭐
searchSourceBuilder.highlighter(highlightBuilder);
// 7.执行搜索
request.source(searchSourceBuilder);
SearchResponse searchResponse = client.search(request, RequestOptions.DEFAULT); //这里会得到一个结果
// 8.将原本文本解析成高亮文本 ⭐⭐⭐
SearchHits hits = searchResponse.getHits(); // 这里会获取到一个对象,对象里面包含着一个hits数组
ArrayList<Map<String,Object>> list = new ArrayList<>();
for (SearchHit hit : searchResponse.getHits().getHits()) {
Map<String, HighlightField> highlightFields = hit.getHighlightFields();
HighlightField title = highlightFields.get("title");
Map<String, Object> sourceAsMap = hit.getSourceAsMap(); // 原来的结果
// 解析高亮的字段,将原来的字段换为我们高亮的字段即可
if (title!=null){
Text[] fragments = title.fragments();
String n_title="";
for (Text text : fragments) {
n_title+=text;
}
sourceAsMap.put("title",n_title); //高亮字段替换掉原来的内容即可。
}
list.add(sourceAsMap);
}
return list;
}
}
(2).ContentController 层
package com.jsxs.controller;
import com.jsxs.service.ContentService;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* @Author Jsxs
* @Date 2023/6/30 14:08
* @PackageName:com.jsxs.controller
* @ClassName: ContentController
* @Description: TODO
* @Version 1.0
*/
@RestController
public class ContentController {
@Resource
private ContentService contentService;
// 普通查询数据
@GetMapping("/parse/{keywords}")
public Boolean parse(@PathVariable("keywords") String keywords) throws Exception {
return contentService.parseContent(keywords);
}
// 分页查询数据加高亮
@GetMapping("/search/{keyword}/{pageNo}/{pageSize}")
public List<Map<String,Object>> search(@PathVariable("keyword") String keyword,@PathVariable("pageNo") int pageNo,@PathVariable("pageSize") int pageSize) throws IOException {
return contentService.searchesPageHight(keyword,pageNo,pageSize);
}
}
(3).我们发现结果没有进行解析
文章来源:https://www.toymoban.com/news/detail-520801.html
2.解决结果没有解析
vue 页面没有被html解析,我们只需要html解析即可。
文章来源地址https://www.toymoban.com/news/detail-520801.html
到了这里,关于121.【ElasticSearch伪京东搜索】的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!