大数据毕业设计选题推荐-基于大数据的旅游网站用户行为数据分析系统-Hadoop-Spark-数据可视化-BigData
✨作者主页:IT毕设梦工厂✨
个人简介:曾从事计算机专业培训教学,擅长Java、Python、PHP、.NET、Node.js、GO、微信小程序、安卓Android等项目实战。接项目定制开发、代码讲解、答辩教学、文档编写、降重等。
☑文末获取源码☑
精彩专栏推荐⬇⬇⬇
Java项目
Python项目
安卓项目
微信小程序项目
文章目录
- 一、前言
- 二、开发环境
- 三、系统界面展示
- 四、部分代码设计
- 五、系统视频
- 结语
一、前言
XXXX
二、开发环境
- 大数据框架:Hadoop+Spark(本次没用Hive,支持定制)
- 开发语言:Python+Java(两个版本都支持)
- 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持)
- 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery
- 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy
- 数据库:MySQL
三、系统界面展示
- 基于大数据的旅游网站用户行为数据分析系统界面展示:
四、部分代码设计
- 项目实战-代码参考:
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from datetime import datetime, timedeltaspark = SparkSession.builder.appName("TourismUserBehaviorAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()def user_basic_feature_analysis(data_path):df = spark.read.csv(data_path, header=True, inferSchema=True)device_analysis = df.groupBy("preferred_device").agg(count("*").alias("user_count"),(count("*") * 100.0 / df.count()).alias("percentage"),sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)).alias("purchase_count"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("conversion_rate")).orderBy(desc("user_count"))location_analysis = df.groupBy("preferred_location_type").agg(count("*").alias("user_count"),avg("Daily_Avg_mins_spend_on_traveling_page").alias("avg_time_spent"),sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)).alias("purchase_count"))working_impact = df.groupBy("working_flag").agg(count("*").alias("total_users"),sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)).alias("purchasers"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("purchase_rate"),avg("Yearly_avg_view_on_travel_page").alias("avg_page_views"))family_analysis = df.groupBy("member_in_family").agg(count("*").alias("user_count"),sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)).alias("buyers"),avg("Daily_Avg_mins_spend_on_traveling_page").alias("avg_engagement_time")).orderBy("member_in_family")cross_analysis = df.groupBy("Adult_flag", "preferred_location_type").agg(count("*").alias("user_count"),(count("*") * 100.0 / df.count()).alias("distribution_rate")).orderBy("Adult_flag", desc("user_count"))return {"device_preference": device_analysis.collect(),"location_preference": location_analysis.collect(),"working_impact": working_impact.collect(),"family_structure": family_analysis.collect(),"adult_location_cross": cross_analysis.collect()}def user_interaction_behavior_analysis(data_path):df = spark.read.csv(data_path, header=True, inferSchema=True)df = df.withColumn("engagement_level", when(col("Daily_Avg_mins_spend_on_traveling_page") < 5, "短时停留(0-5分钟)").when(col("Daily_Avg_mins_spend_on_traveling_page") < 15, "中等停留(5-15分钟)").otherwise("长时停留(>15分钟)"))engagement_analysis = df.groupBy("engagement_level").agg(count("*").alias("user_count"),avg("Yearly_avg_view_on_travel_page").alias("avg_page_views"),sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)).alias("conversions"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("conversion_rate")).orderBy(desc("conversion_rate"))comment_behavior = df.withColumn("comment_activity_level",when(col("Yearly_avg_comment_on_travel_page") < 10, "低评论活跃度(1-10条/月)").when(col("Yearly_avg_comment_on_travel_page") < 20, "中评论活跃度(11-20条/月)").otherwise("高评论活跃度(>20条/月)")).groupBy("comment_activity_level").agg(count("*").alias("user_count"),avg("Daily_Avg_mins_spend_on_traveling_page").alias("avg_time_spent"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("purchase_rate"))checkin_analysis = df.withColumn("checkin_frequency",when(col("yearly_avg_Outstation_checkins") == 0, "无签到").when(col("yearly_avg_Outstation_checkins") < 5, "低频签到(1-4次)").when(col("yearly_avg_Outstation_checkins") < 10, "中频签到(5-9次)").otherwise("高频签到(≥10次)")).groupBy("checkin_frequency").agg(count("*").alias("user_count"),avg("total_likes_on_outstation_checkin_given").alias("avg_likes_given"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("conversion_rate"))social_interaction = df.withColumn("social_balance",col("total_likes_on_outofstation_checkin_received") - col("total_likes_on_outstation_checkin_given")).select("social_balance", "Buy_ticket", "following_company_page")return {"engagement_analysis": engagement_analysis.collect(),"comment_behavior": comment_behavior.collect(),"checkin_patterns": checkin_analysis.collect(),"social_interaction_balance": social_interaction.collect()}def user_segmentation_rfm_analysis(data_path):df = spark.read.csv(data_path, header=True, inferSchema=True)df_cleaned = df.fillna({"week_since_last_outstation_checkin": df.agg(avg("week_since_last_outstation_checkin")).collect()[0][0],"yearly_avg_Outstation_checkins": 0,"Daily_Avg_mins_spend_on_traveling_page": 0})recency_percentiles = df_cleaned.approxQuantile("week_since_last_outstation_checkin", [0.33, 0.67], 0.01)frequency_percentiles = df_cleaned.approxQuantile("yearly_avg_Outstation_checkins", [0.33, 0.67], 0.01)monetary_percentiles = df_cleaned.approxQuantile("Daily_Avg_mins_spend_on_traveling_page", [0.33, 0.67], 0.01)rfm_df = df_cleaned.withColumn("R_Score",when(col("week_since_last_outstation_checkin") <= recency_percentiles[0], 3).when(col("week_since_last_outstation_checkin") <= recency_percentiles[1], 2).otherwise(1)).withColumn("F_Score",when(col("yearly_avg_Outstation_checkins") >= frequency_percentiles[1], 3).when(col("yearly_avg_Outstation_checkins") >= frequency_percentiles[0], 2).otherwise(1)).withColumn("M_Score",when(col("Daily_Avg_mins_spend_on_traveling_page") >= monetary_percentiles[1], 3).when(col("Daily_Avg_mins_spend_on_traveling_page") >= monetary_percentiles[0], 2).otherwise(1))rfm_segments = rfm_df.withColumn("RFM_Segment",when((col("R_Score") == 3) & (col("F_Score") == 3) & (col("M_Score") >= 2), "高价值用户").when((col("R_Score") >= 2) & (col("F_Score") >= 2) & (col("M_Score") >= 2), "中高价值用户").when((col("R_Score") >= 2) & (col("F_Score") == 1) & (col("M_Score") >= 2), "潜力用户").when((col("R_Score") == 1) & (col("F_Score") >= 2), "流失风险用户").otherwise("低价值用户"))segment_analysis = rfm_segments.groupBy("RFM_Segment").agg(count("*").alias("user_count"),avg("Yearly_avg_view_on_travel_page").alias("avg_page_views"),avg("total_likes_on_outstation_checkin_given").alias("avg_social_activity"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("conversion_rate"),avg("R_Score").alias("avg_recency"),avg("F_Score").alias("avg_frequency"),avg("M_Score").alias("avg_monetary")).orderBy(desc("conversion_rate"))feature_cols = ["Yearly_avg_view_on_travel_page", "yearly_avg_Outstation_checkins", "Daily_Avg_mins_spend_on_traveling_page", "total_likes_on_outstation_checkin_given"]assembler = VectorAssembler(inputCols=feature_cols, outputCol="features")feature_df = assembler.transform(rfm_df).select("features", "Buy_ticket")kmeans = KMeans(k=4, seed=42)model = kmeans.fit(feature_df)clustered_df = model.transform(feature_df)cluster_analysis = clustered_df.groupBy("prediction").agg(count("*").alias("cluster_size"),(sum(when(col("Buy_ticket") == "Yes", 1).otherwise(0)) * 100.0 / count("*")).alias("purchase_rate")).orderBy("prediction")return {"rfm_segments": segment_analysis.collect(),"kmeans_clusters": cluster_analysis.collect(),"segment_distribution": rfm_segments.groupBy("RFM_Segment").count().collect()}
五、系统视频
- 基于大数据的旅游网站用户行为数据分析系统-项目视频:
大数据毕业设计选题推荐-基于大数据的旅游网站用户行为数据分析系统-Hadoop-Spark-数据可视化-BigData
结语
大数据毕业设计选题推荐-基于大数据的旅游网站用户行为数据分析系统-Hadoop-Spark-数据可视化-BigData
想看其他类型的计算机毕业设计作品也可以和我说~谢谢大家!
有技术这一块问题大家可以评论区交流或者私我~
大家可以帮忙点赞、收藏、关注、评论啦~
源码获取:⬇⬇⬇
精彩专栏推荐⬇⬇⬇
Java项目
Python项目
安卓项目
微信小程序项目