diff --git a/app/_components/home/Welcome.tsx b/app/_components/home/Welcome.tsx
index e2d677a..63aa9d9 100644
--- a/app/_components/home/Welcome.tsx
+++ b/app/_components/home/Welcome.tsx
@@ -1,9 +1,11 @@
import { useState } from "react";
import Player from "../common/Player";
import { PlayCircle } from "lucide-react";
+import { useScreenSize } from "@/app/_hooks/useScreenSize";
export default function Welcome() {
const [viewPromotion, setViewPromotion] = useState(false);
+ const { isMobile } = useScreenSize();
return (
@@ -22,8 +24,8 @@ export default function Welcome() {
{viewPromotion ? (
) : (
@@ -37,9 +39,9 @@ export default function Welcome() {
-
)
diff --git a/app/_hooks/useMediaQuery.ts b/app/_hooks/useMediaQuery.ts
new file mode 100644
index 0000000..91f40f3
--- /dev/null
+++ b/app/_hooks/useMediaQuery.ts
@@ -0,0 +1,44 @@
+import { useIsomorphicLayoutEffect } from "framer-motion";
+import React from "react";
+
+type UseMediaQueryOptions = {
+ defaultValue?: boolean;
+ initializeWithValue?: boolean;
+};
+
+export function useMediaQuery(
+ query: string,
+ {
+ defaultValue = false,
+ initializeWithValue = true,
+ }: UseMediaQueryOptions = {},
+): boolean {
+ const isServer = typeof window === "undefined";
+
+ const getMatches = (query: string): boolean => {
+ if (isServer) return defaultValue;
+ return window.matchMedia(query).matches;
+ };
+
+ const [matches, setMatches] = React.useState(() => {
+ if (initializeWithValue) {
+ return getMatches(query);
+ }
+ return defaultValue;
+ });
+
+ const handleChange = () => setMatches(getMatches(query));
+
+ useIsomorphicLayoutEffect(() => {
+ if (isServer) return;
+
+ const matchMedia = window.matchMedia(query);
+
+ handleChange();
+ matchMedia.addEventListener("change", handleChange);
+
+ return () => matchMedia.removeEventListener("change", handleChange);
+ }, [query]);
+
+ return matches;
+}
diff --git a/app/_hooks/useScreenSize.ts b/app/_hooks/useScreenSize.ts
new file mode 100644
index 0000000..0e4ad34
--- /dev/null
+++ b/app/_hooks/useScreenSize.ts
@@ -0,0 +1,16 @@
+import { useMediaQuery } from "./useMediaQuery";
+
+export function useScreenSize() {
+ const sm = useMediaQuery(`(max-width: ${980 - 1}px)`);
+ const md = useMediaQuery(
+ `(min-width: ${980}px) and (max-width: ${1280 - 1}px)`,
+ );
+ const lg = useMediaQuery(`(min-width: ${1280}px)`);
+
+ return {
+ bp: sm ? "sm" : md ? "md" : "lg",
+ isMobile: sm,
+ isTablet: md,
+ isDesktop: lg,
+ };
+}
diff --git a/content/meet-our-new-member.mdx b/content/meet-our-new-member.mdx
index 670446f..7524d00 100644
--- a/content/meet-our-new-member.mdx
+++ b/content/meet-our-new-member.mdx
@@ -35,7 +35,13 @@ But what if we could leverage AI technology to bring the conversational depth an
# Real Thoughts Emerge Through Conversations
-
+
The Wrtn Labs team leveraged [**Agentica**](https://wrtnlabs.io/agentica/) to develop and test an interactive **Interview AI** that dynamically generates follow-up questions based on user responses. The approach involves clearly defining interview goals, the type of information desired, and the insights valuable to the product team. If a user provides a vague or brief response, the AI agent is designed to ask follow-up questions to gather more detailed and relevant information.
@@ -47,7 +53,13 @@ Yet, not every user initially provided detailed feedback. Those without specific
# Can AI Really Ask the Right Questions to User?
-
+
To effectively capture interview objectives and desired information, while dynamically generating appropriate follow-up questions based on user responses, meticulous refinement of the prompts was necessary. We analyzed questions and response patterns from actual face-to-face interviews and reflected various examples of interview guides, primary questions, and potential follow-up questions in the prompts.
@@ -59,7 +71,13 @@ Of course, due to the nature of Large Language Models (LLMs), prompts are not re
# Organizing Insights Through AI Agents
-
+
However, unlike structured surveys, conversational data gathered from many users proved challenging to organize clearly. Since insights were recorded conversationally, it was essential to structure them into charts or allow queries to focus on specific data points. Here, Agentica’s core functionality—the Connector—played a pivotal role. Within Agentica, any callable function can be turned into a Connector accessible by the AI agents whenever required. For example, when an Insight Extraction agent received a command such as "Show me user churn complaints," it could invoke a Connector to access records from the Interview agent, retrieve relevant data, and summarize the results clearly in text form. Thus, agents with different purposes and tools could communicate seamlessly to fulfill user requests.
diff --git a/public/images/main_mobile.png b/public/images/main_mobile.png
new file mode 100644
index 0000000..c75ba79
Binary files /dev/null and b/public/images/main_mobile.png differ