<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>AI on Tangc.pro&#39;s blog</title>
    <link>https://tangc.pro/blog/categories/ai/</link>
    <description>Recent content in AI on Tangc.pro&#39;s blog</description>
    
    <generator>Hugo -- 0.157.0</generator>
    <language>zh-cn</language>
    <managingEditor>onetick@live.cn (Hongyu)</managingEditor>
    <webMaster>onetick@live.cn (Hongyu)</webMaster>
    <lastBuildDate>Tue, 03 Mar 2026 17:32:00 +0800</lastBuildDate>
    <atom:link href="https://tangc.pro/blog/categories/ai/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>对话：LLM 真的在「理解」吗？—— 揭开大语言模型的工作原理</title>
      <link>https://tangc.pro/blog/posts/llm-interview-how-it-works/</link>
      <pubDate>Tue, 03 Mar 2026 17:32:00 +0800</pubDate><author>onetick@live.cn (Hongyu)</author>
      <guid>https://tangc.pro/blog/posts/llm-interview-how-it-works/</guid>
      <description>一次关于大语言模型工作原理的深度对话，从预训练、参数、注意力机制到模型的本质局限</description>
    </item>
  </channel>
</rss>
