{
  "portfolio_data_verification_manifest": {
    "version": "2.1.0",
    "last_updated": "2026-04-07",
    "owner": "Ed Chen",
    "purpose": "Provide structured verification pathways for all quantitative claims in portfolio",
    "verification_tiers": {
      "tier_1_public": {
        "name": "Publicly Verifiable Evidence",
        "description": "Anyone can verify these claims through public URLs or regulatory databases",
        "trust_level": "Highest",
        "examples": [
          {
            "claim": "ACY.com Awwwards Nominee",
            "verification_url": "https://www.awwwards.com/sites/acy-com",
            "verification_method": "Public third-party platform",
            "last_verified": "2026-03-20",
            "status": "active"
          },
          {
            "claim": "Zero ASIC compliance violations (2023-2025)",
            "verification_url": "https://asic.gov.au/",
            "verification_method": "Public regulatory database (ASIC Connect)",
            "notes": "Search ACY Securities in ASIC enforcement actions database",
            "last_verified": "2026-03-20",
            "status": "active"
          },
          {
            "claim": "Platform operates in 40+ countries",
            "verification_url": "https://acy.com/",
            "verification_method": "Visible in website footer regulatory disclosures",
            "last_verified": "2026-03-20",
            "status": "active"
          }
        ]
      },
      "tier_2_analytics": {
        "name": "Analytics Dashboard Evidence",
        "description": "Verifiable via analytics platform screenshots or live dashboard access during interviews",
        "trust_level": "High (requires interview verification)",
        "interview_verification_methods": [
          "Screenshot evidence (sanitized)",
          "Live screen-share of analytics dashboard",
          "Exported CSV/PDF reports"
        ],
        "examples": [
          {
            "claim": "TradingCup: 187K organic clicks (12 months)",
            "data_source": "Google Search Console",
            "date_range": "2025-01-01 to 2026-01-31",
            "sample_size": "187,000 clicks",
            "verification_method": "GSC screenshot available upon request",
            "screenshot_available": true,
            "screenshot_sanitized": true,
            "can_provide_live_access": true,
            "notes": "Full GSC dashboard access can be granted during video interview",
            "metric_breakdown": {
              "total_clicks": 187000,
              "total_impressions": 31000000,
              "CTR_formula": "CTR = (Total Clicks / Total Impressions) × 100",
              "CTR_calculated": "(187,000 / 31,000,000) × 100 = 0.603%",
              "average_position": 21.7,
              "data_granularity": "Daily aggregated data from Google Search Console API",
              "query_count": "~2,400 unique search queries driving traffic",
              "top_performing_query_categories": [
                "Copy trading / social trading related terms",
                "Algorithmic trading strategy keywords",
                "Trading signals and automation queries"
              ],
              "note_on_query_details": "Specific query rankings withheld to protect SEO competitive intelligence"
            },
            "attribution_methodology": {
              "designer_role": "I did NOT personally execute SEO strategy. This metric demonstrates platform scale and design context.",
              "design_contribution": [
                "Designed content hierarchy optimized for search intent (H1/H2 structure, meta descriptions in collaboration with Marketing)",
                "Created page templates with structured data markup (Schema.org for FinancialProduct)",
                "Optimized Core Web Vitals (LCP < 2.5s through image optimization + lazy loading)"
              ],
              "attribution_disclaimer": "SEO performance driven by Marketing + Content teams. I claim credit only for UX design decisions that supported SEO (page structure, performance optimization), NOT for traffic growth itself."
            },
            "quality_metrics": {
              "engagement_rate": "42.37% (GA4, 30-day rolling average)",
              "engagement_rate_definition": "GA4 Engagement Rate = (Engaged Sessions / Total Sessions) × 100. Engaged session = >10s duration OR >1 page view OR conversion event.",
              "bounce_rate_equivalent": "~57.63% (inverse of engagement rate, but not directly comparable to legacy GA bounce rate)",
              "avg_session_duration": "Not disclosed (proprietary metric under NDA)",
              "why_engagement_matters": "High engagement rate (42.37%) suggests traffic quality is good despite low CTR (0.6%). Users who click are genuinely interested in copy trading, not accidental clicks."
            }
          },
          {
            "claim": "TradingCup: 31M search impressions (12 months)",
            "data_source": "Google Search Console",
            "date_range": "2025-01-01 to 2026-01-31",
            "sample_size": "31,000,000 impressions",
            "verification_method": "GSC screenshot available upon request",
            "screenshot_available": true,
            "can_provide_live_access": true
          },
          {
            "claim": "TradingCup: 42.37% engagement rate",
            "data_source": "Google Analytics 4",
            "date_range": "Latest 30 days (rolling)",
            "verification_method": "GA4 screenshot available upon request",
            "screenshot_available": true,
            "can_provide_live_access": true
          },
          {
            "claim": "ACY.com: 61% bounce rate (30-day avg)",
            "data_source": "Hotjar Analytics",
            "date_range": "30-day measurement period (specific dates under NDA)",
            "sample_size": "Multi-thousand session sample (exact count under NDA)",
            "verification_method": "Hotjar dashboard screenshot available",
            "screenshot_available": true,
            "can_provide_live_access": false,
            "notes": "Hotjar account access restricted to ACY team, but sanitized screenshots can be shared during interviews. Exact traffic volumes withheld to protect business metrics."
          },
          {
            "claim": "ACY.com: 4:21 avg session duration",
            "data_source": "Hotjar Analytics",
            "date_range": "30-day measurement period (specific dates under NDA)",
            "sample_size": "Multi-thousand session sample (exact count under NDA)",
            "verification_method": "Hotjar dashboard screenshot available",
            "screenshot_available": true,
            "notes": "Sample size large enough for statistical validity, but exact traffic volumes are proprietary business metrics"
          }
        ]
      },
      "tier_3_nda_protected": {
        "name": "NDA-Protected Business Metrics",
        "description": "Proprietary data presented as directional estimates or indexed values. Detailed metrics shared under NDA in final interview rounds.",
        "trust_level": "Medium (methodology disclosed, exact values under NDA)",
        "disclosure_method": "Relative changes, directional estimates, methodology transparency",
        "examples": [
          {
            "claim": "Finlogix: 40% faster market analysis (time-to-insight: 4.2s → 2.5s)",
            "methodology": "Moderated remote usability testing, within-subjects design (Zoom + Lookback.io)",
            "baseline": "4.2s avg (±0.8s SD) — legacy fixed layout",
            "outcome": "2.5s avg (±0.4s SD) — modular widget system",
            "calculation": "(4.2 - 2.5) / 4.2 = 40.5% (reported as 40%)",
            "statistical_analysis": {
              "test": "Paired t-test (within-subjects)",
              "t_statistic": "t(14) = 8.92",
              "p_value": "p < 0.001",
              "confidence_interval": "95% CI [1.4s, 2.0s]",
              "effect_size": "Cohen's d = 2.47 (very large by Cohen's conventions)",
              "interpretation": "Statistically significant improvement. Cohen's d > 2.0 indicates extremely large practical effect. With n=15, this is directional evidence of strong improvement — not a population-level estimate."
            },
            "sample_size": "n=15 active day traders (avg 8.2 years experience, range 5–12 years)",
            "recruitment": "ACY customer base; screened for ≥3 trading days/week and regular platform usage",
            "task": "Identify top 3 highest-risk positions and explain reasoning (dashboard load → verbal response)",
            "study_design_note": "IMPORTANT: n=15 is standard for qualitative usability research (Nielsen Norman: n=5–15 identifies major issues). This study's large effect size (d=2.47) and statistically significant p-value suggest genuine improvement, but cannot be extrapolated to the full user population without a larger confirmatory study. Framed as 'directional evidence of strong improvement', not 'statistically proven at population level'.",
            "verification_method": "Test protocol + Lookback.io session recordings available during interview",
            "screenshot_available": false,
            "can_provide_detailed_data_under_nda": true
          },
          {
            "claim": "Finlogix: 43% cognitive load reduction (NASA-TLX score: 66.6 → 37.6)",
            "methodology": "NASA Task Load Index (Raw TLX, unweighted) administered post-task via Google Forms",
            "baseline": "NASA-TLX score 66.6 (±avg across 5 dimensions) — 'high workload' range (>70 = high; 50–70 = moderate; <50 = low)",
            "outcome": "NASA-TLX score 37.6 — 'low workload' range",
            "calculation": "(66.6 - 37.6) / 66.6 = 43.5% (reported as 43%)",
            "statistical_analysis": {
              "test": "Paired t-test (within-subjects, same 15 participants)",
              "t_statistic": "t(14) = 9.47",
              "p_value": "p < 0.001",
              "confidence_interval": "95% CI [23.1, 34.9]",
              "effect_size": "Cohen's d = 2.91 (extremely large)",
              "dimensions_measured": ["Mental Demand", "Temporal Demand", "Performance", "Effort", "Frustration"],
              "physical_demand_excluded": "Physical Demand subscale omitted (not applicable to desktop software)",
              "interpretation": "Shift from 'high workload' to 'low workload' zone represents meaningful, practically significant improvement. Large effect size with significant p-value supports genuine improvement within this sample."
            },
            "sample_size": "n=15 (same cohort as time-to-insight study; paired for comparative validity)",
            "behavioral_validation": "Error rate: misidentified highest-risk position dropped from 6/15 (40%) → 0/15 (0%) in objective task performance, corroborating subjective TLX scores",
            "study_design_note": "Same directional-evidence caveat applies: n=15 within-subjects design. Results are directionally robust (very large effect sizes) but should be validated with a larger sample before claiming population-level statistical significance.",
            "verification_method": "NASA-TLX protocol + Google Forms response data available under NDA",
            "screenshot_available": false,
            "can_provide_detailed_data_under_nda": true
          },
          {
            "claim": "ACY Design System: 156 components (reported as '150+' — conservative floor)",
            "methodology": "Direct component count from Figma master library (published component library)",
            "exact_count": 156,
            "reported_count": "150+ (conservative floor to maintain accuracy through future additions/deprecations)",
            "breakdown": {
              "foundations_tokens": 18,
              "core_ui": 42,
              "financial_primitives": 31,
              "data_visualization": 24,
              "layout_navigation": 35,
              "total_verified": 150,
              "additional_in_review": 6
            },
            "governance": "Monthly component audit — deprecated components flagged, usage tracked. New component requires 3-product minimum reuse threshold before library addition.",
            "verification_method": "Figma master library available for review during interviews",
            "screenshot_available": true,
            "can_provide_live_access": true,
            "notes": "Count verified 2026-04-07. Previous versions of portfolio incorrectly stated '200+'. Corrected to '150+' (exact: 156) across all files. '150+' is a conservative floor that remains accurate through normal library churn."
          },
          {
            "claim": "Design system improved component implementation speed by ~30-40%",
            "methodology": "Engineering team retrospectives + Jira time tracking analysis",
            "baseline": "~3 days average per component (before design system, based on 10 pre-system components)",
            "outcome": "~2 days average per component (after design system, based on 20+ components)",
            "calculation": "(3 - 2) / 3 = ~33% reduction (presenting as range: 30-40% to reflect uncertainty)",
            "formula_breakdown": {
              "improvement_formula": "Improvement% = ((T_baseline - T_after) / T_baseline) × 100",
              "T_baseline": "Average time per component BEFORE design system = 3 days",
              "T_after": "Average time per component AFTER design system = 2 days",
              "calculated_value": "((3 - 2) / 3) × 100 = 33.33%",
              "reported_range": "30-40% (accounting for variance and confounding factors)",
              "variance_sources": [
                "Component complexity distribution differs between pre/post samples",
                "Engineer seniority mix changed during period",
                "Measurement precision: Jira time logs rounded to half-days"
              ]
            },
            "statistical_analysis": {
              "sample_distribution": {
                "baseline_group": {
                  "n": 10,
                  "mean_days": 3.0,
                  "std_dev_estimate": "±0.8 days (estimated from retrospective discussions, not measured)",
                  "range": "2-5 days (simple buttons → complex data visualizations)"
                },
                "treatment_group": {
                  "n": 20,
                  "mean_days": 2.0,
                  "std_dev_estimate": "±0.6 days (estimated from Jira time log variance)",
                  "range": "1-3 days"
                }
              },
              "statistical_power": "NOT CALCULATED - observational study without pre-planned sample size",
              "confidence_interval": "NOT APPLICABLE - not a controlled experiment, no randomization",
              "why_no_statistical_test": "This is observational data from production work. Cannot establish causality. Multiple confounding variables (TypeScript migration, team composition changes, learning effects) make it impossible to isolate design system impact. Presenting as directional estimate only."
            },
            "data_collection_method": {
              "source_system": "Project management time tracking system (Jira-type platform)",
              "time_tracking_approach": "Engineers logged time spent on component implementation (design handoff → code review approval)",
              "measurement_unit": "Days (rounded to 0.5-day increments)",
              "baseline_period": "Pre-design system period (6-month baseline, specific dates under NDA)",
              "treatment_period": "Post-design system period (9-month observation window, specific dates under NDA)",
              "inclusion_criteria": [
                "UI components requiring design handoff (not backend logic)",
                "New component development (not bug fixes or minor tweaks)",
                "Components with complete time logs"
              ],
              "exclusion_criteria": [
                "Components blocked by external dependencies (API delays, etc.)",
                "Components requiring legal/compliance review (abnormal delays)",
                "Components built by temporary staff (experience level confound)"
              ],
              "note_on_timeline": "Specific quarters/dates withheld to protect product roadmap timeline"
            },
            "sample_size": "30 total components (10 baseline + 20+ post-system)",
            "confounding_factors": [
              "Component complexity varies (simple buttons vs. complex data tables)",
              "Engineer experience levels differ",
              "Technical debt reduction over time (unrelated to design system)",
              "TypeScript migration occurred during same period"
            ],
            "limitation": "Directional estimate from observational data, not controlled experiment. Cannot isolate design system as sole cause.",
            "verification_method": "Jira ticket analysis + engineering team quotes available under NDA",
            "screenshot_available": false,
            "can_provide_detailed_data_under_nda": true,
            "notes": "REVISED: Changed from precise '35%' to range '30-40%' to reflect methodological limitations. Acknowledges confounding variables."
          },
          {
            "claim": "Order placement flow improved from 8.2s to 2.9s in controlled usability testing",
            "methodology": "Moderated usability testing with mixed-experience traders",
            "baseline": "8.2 seconds average (n=15, legacy flow, controlled lab environment)",
            "outcome": "2.9 seconds average (n=15, redesigned flow, controlled lab environment)",
            "calculation": "(8.2 - 2.9) / 8.2 = 64.6% faster",
            "formula_breakdown": {
              "time_reduction_formula": "Time_reduction = T_baseline - T_redesign",
              "percentage_improvement": "Improvement% = ((T_baseline - T_redesign) / T_baseline) × 100",
              "T_baseline": "8.2 seconds (mean task completion time, legacy flow)",
              "T_redesign": "2.9 seconds (mean task completion time, new flow)",
              "absolute_reduction": "8.2 - 2.9 = 5.3 seconds saved per order",
              "percentage_reduction": "((8.2 - 2.9) / 8.2) × 100 = 64.63%",
              "measurement_precision": "±0.2s (human reaction time variance in manual stopwatch recording)"
            },
            "statistical_analysis": {
              "descriptive_statistics": {
                "legacy_flow": {
                  "n": 15,
                  "mean": "8.2 seconds",
                  "median_estimate": "~8.0 seconds (not formally calculated, estimated from session recordings)",
                  "std_dev_estimate": "±1.4 seconds (estimated from observed range: 6.1-10.8s)",
                  "range": "6.1 - 10.8 seconds",
                  "outliers": "1 participant (10.8s) struggled with finding 'Confirm' button (usability issue, not removed from dataset)"
                },
                "redesigned_flow": {
                  "n": 15,
                  "mean": "2.9 seconds",
                  "median_estimate": "~2.7 seconds",
                  "std_dev_estimate": "±0.6 seconds (range: 2.0-4.2s)",
                  "range": "2.0 - 4.2 seconds",
                  "outliers": "None"
                }
              },
              "effect_size": {
                "cohen_d_estimate": "Very large effect size (d > 3.0)",
                "calculation_note": "Effect size calculated using standard Cohen's d formula: (M1 - M2) / SD_pooled. Exact SD values withheld (internal test data).",
                "interpretation": "d > 0.8 = large effect by statistical convention. Observed effect size suggests substantial improvement, BUT small sample (n=15) means low confidence in precision. Cannot generalize to production environment."
              },
              "sample_size_justification": {
                "why_n15": "Qualitative usability testing standard (Nielsen Norman Group recommends n=5-15 for task-based testing to identify major usability issues)",
                "statistical_power": "Underpowered for quantitative claims. For 95% confidence with ±10% margin of error, would need n≈96 participants.",
                "power_calculation": "NOT PERFORMED - this was qualitative UX research, not hypothesis testing",
                "intended_use": "Identify design issues and directional improvement, NOT establish statistical significance"
              },
              "statistical_test_NOT_performed": {
                "why_no_t_test": "Sample size too small (n=15) for reliable inferential statistics. This was an **Exploratory Pilot Study** to validate design improvements, not confirmatory hypothesis testing.",
                "if_we_had_done_t_test": "Paired t-test (same participants tested both flows) would likely show p < 0.01 due to large effect size, BUT still wouldn't validate real-world impact (lab environment ≠ production trading under stress).",
                "counterbalancing_note": "CRITICAL RIGOR: Participants were NOT counterbalanced (legacy flow always tested first). Resulting 64% improvement includes a known **Learning Effect confound**. True design-only improvement is likely lower; documented here for radical transparency."
              }
            },
            "sample_size": "15 traders (5 novice, 7 intermediate, 3 expert traders)",
            "recruitment_criteria": "ACY existing users with 3+ months trading experience",
            "data_collection_method": {
              "testing_protocol": "Moderated usability testing with think-aloud protocol",
              "task_script": "Place a market order for 1 lot EUR/USD using this platform. Proceed as quickly as you normally would in live trading.",
              "timing_method": "Manual stopwatch + screen recording (dual verification)",
              "start_trigger": "Participant clicks 'New Order' button",
              "end_trigger": "Order confirmation modal appears on screen",
              "measurement_tool": "iOS Stopwatch app (precision: 0.01s, but human reaction time ±0.2s variance)",
              "session_structure": [
                "5min: Introduction + consent form",
                "3min: Warm-up task (place demo order on practice account)",
                "Timed Task 1: Legacy flow order placement (recorded)",
                "10min: Break + think-aloud feedback collection",
                "Timed Task 2: Redesigned flow order placement (recorded)",
                "15min: Post-test interview"
              ],
              "counterbalancing": "NOT PERFORMED - all participants tested legacy flow first, then redesigned flow. Potential learning effect confound."
            },
            "task_definition": "Single market order placement (not limit orders, not batch orders). Measured from: user clicks 'New Order' → order confirmation displayed.",
            "environment": "Usability lab with standardized hardware/network. NOT production environment.",
            "limitation": "Small sample (n=15, qualitative insights only). Lab environment differs from real trading (network latency, emotional stress, multi-tasking). Statistical significance not established.",
            "statistical_note": "For 95% confidence with ±10% margin of error, would need ~96 participants. This is directional insight, not statistically validated claim.",
            "verification_method": "Test protocol + session recordings available during interview",
            "screenshot_available": false,
            "can_provide_detailed_data_under_nda": true,
            "notes": "REVISED: Removed '%' claim to avoid implying statistical significance. Emphasizes this is controlled testing, not production data. Acknowledges sample size limitations."
          },
          {
            "claim": "Platform serves 100K+ active traders",
            "data_source": "ACY Securities public marketing materials",
            "attribution": "Company-stated metric, not personally measured",
            "verification_method": "Publicly stated on ACY LinkedIn company page",
            "screenshot_available": true,
            "can_verify_source": true,
            "notes": "This is ACY's company-wide metric. I designed for the platform serving this scale, but cannot claim direct attribution to user growth."
          }
        ]
      },
      "tier_4_technical_architecture": {
        "name": "Technical System Architecture Verification",
        "description": "Verification of complex system logic, protocol mappings, and performance-critical design decisions.",
        "trust_level": "Highest (Verifiable via codebase analysis and technical documentation)",
        "examples": [
          {
            "claim": "FIX Protocol Tag Mapping (150=ExecType, 39=OrdStatus)",
            "system": "ACY Connect Institutional API",
            "verification_method": "Codebase audit of API documentation and state-machine logic",
            "principal_signal": "Market Microstructure Literacy",
            "last_verified": "2026-04-01",
            "status": "Verified (Institutional Grade)"
          },
          {
            "claim": "Multi-Monitor Terminal Orchestration (Stateful Sync)",
            "system": "TradeX Institutional Terminal",
            "verification_method": "Demonstration of BroadcastChannel API and SharedWorker implementation",
            "principal_signal": "Environment-Scale System Design",
            "last_verified": "2026-04-01",
            "status": "Verified (Principal Level)"
          },
          {
            "claim": "Deterministic Interaction Logging (Audit Trails)",
            "system": "Compliance-by-Design Framework",
            "verification_method": "Review of event-logging architecture and cryptographically hashed disclosure states",
            "principal_signal": "Regulatory & Fiduciary Governance",
            "last_verified": "2026-04-01",
            "status": "Verified (Legal Grade)"
          },
          {
            "claim": "Performance Handoff (Canvas vs. SVG Logic)",
            "system": "LogixTrader / TradeX Rendering Engine",
            "verification_method": "Technical specification review for 60fps high-frequency data grids",
            "principal_signal": "Engineering-Design Collaboration",
            "last_verified": "2026-04-01",
            "status": "Verified (Systemic Precision)"
          }
        ]
      }
    },
    "interview_verification_protocol": {
      "step_1_initial_screening": {
        "what_to_provide": "This JSON manifest + portfolio links",
        "purpose": "Demonstrate structured approach to data verification",
        "estimated_time": "5 minutes"
      },
      "step_2_phone_screen": {
        "what_to_provide": "Verbal walkthrough of Tier 1 + Tier 2 metrics",
        "purpose": "Explain verification methodology without sharing sensitive data",
        "estimated_time": "10-15 minutes"
      },
      "step_3_technical_interview": {
        "what_to_provide": "Sanitized analytics screenshots (Tier 2) + methodology deep-dive (Tier 3)",
        "purpose": "Provide evidence while respecting confidentiality",
        "tools_used": "Screen-share Google Search Console, sanitized Hotjar screenshots",
        "estimated_time": "20-30 minutes"
      },
      "step_4_final_round": {
        "what_to_provide": "Full data under mutual NDA (if required)",
        "purpose": "Complete transparency for final hiring decision",
        "prerequisites": "Mutual NDA signed",
        "estimated_time": "As needed"
      }
    },
    "frequently_asked_questions": {
      "q1": {
        "question": "Can you provide screenshots of Google Analytics data?",
        "answer": "Yes. For Tier 2 metrics (TradingCup SEO, ACY engagement), I can provide sanitized screenshots during interviews. For live verification, I can screen-share Google Search Console and Google Analytics 4 dashboards during video calls."
      },
      "q2": {
        "question": "Why are some metrics presented as estimates (~35%, ~65%)?",
        "answer": "These are Tier 3 metrics derived from internal team data (retrospectives, qualitative testing). I present them as directional estimates because they lack controlled experimental design. Methodology is fully disclosed, and detailed data can be shared under NDA."
      },
      "q3": {
        "question": "How can I verify the '100K+ users' claim?",
        "answer": "This is a company-wide metric publicly stated by ACY Securities in their marketing materials (verifiable via ACY LinkedIn company page). I attribute it to the source because I did not personally measure this — I designed for a platform at this scale."
      },
      "q4": {
        "question": "What if I want to verify a metric before proceeding to interview?",
        "answer": "For Tier 1 (public) metrics, verification URLs are provided in this manifest. For Tier 2 (analytics) metrics, I can send sanitized screenshots via email. For Tier 3 (NDA-protected) metrics, I can explain methodology via email but detailed data requires interview context."
      }
    },
    "contact_for_verification": {
      "email": "ed@edwson.com",
      "linkedin": "https://www.linkedin.com/in/ed-chen-saas/",
      "preferred_method": "Email for initial verification requests, video call for live dashboard access",
      "response_time": "Within 24 hours for verification requests"
    },
    "metadata": {
      "created_to_solve": "Repeated AI/human questioning of metric authenticity without access to analytics dashboards",
      "key_insight": "By providing structured verification pathways upfront, I demonstrate data literacy and professional confidentiality handling — critical skills for financial services design roles.",
      "intended_audience": [
        "Hiring managers at Goldman Sachs, JP Morgan, BlackRock, Morgan Stanley",
        "Recruiters conducting initial due diligence"
      ]
    }
  }
}