<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>https://parsed.com/</loc></url>
<url><loc>https://parsed.com/research</loc></url>
<url><loc>https://parsed.com/contact-us</loc></url>
<url><loc>https://parsed.com/404</loc></url>
<url><loc>https://parsed.com/privacy-policy</loc></url>
<url><loc>https://parsed.com/trust</loc></url>
<url><loc>https://parsed.com/research/byo-swe-grep-automatically-train-blazing-fast-search-sub-agents-on-your-knowledge-base-(pt-1)</loc></url>
<url><loc>https://parsed.com/research/purpose-built-llms-for-dental-note-taking</loc></url>
<url><loc>https://parsed.com/research/lumina-building-self-improving-evaluation-through-customer-in-the-loop-refinement</loc></url>
<url><loc>https://parsed.com/research/upweight-the-strategy-not-the-tokens-faster-training-with-explicit-reasoning</loc></url>
<url><loc>https://parsed.com/research/attention-based-attribution-what-your-model-is-actually-looking-at</loc></url>
<url><loc>https://parsed.com/research/robust-sample-efficient-sft-with-prompt-mutations</loc></url>
<url><loc>https://parsed.com/research/training-loss-predicts-evaluation-performance-even-for-non-verifiable-tasks</loc></url>
<url><loc>https://parsed.com/research/building-production-ai-for-regulated-industries-with-a-leading-digital-insurer</loc></url>
<url><loc>https://parsed.com/research/iterative-sft</loc></url>
<url><loc>https://parsed.com/research/write-small-learn-forever-rank-1-lora-for-continual-learning</loc></url>
<url><loc>https://parsed.com/research/practical-lora-research</loc></url>
<url><loc>https://parsed.com/research/a-letter-to-the-c-suite-the-shifting-role-of-mles</loc></url>
<url><loc>https://parsed.com/research/fine-tuning-small-open-source-llms-to-outperform-large-closed-source-models-by-60-on-specialized-tasks</loc></url>
<url><loc>https://parsed.com/research/amnesiac-generalist-behemoths-are-not-the-future-of-language-models</loc></url>
<url><loc>https://parsed.com/research/the-bitter-lesson-of-llm-evals</loc></url>
<url><loc>https://parsed.com/research/do-transformers-notice-their-own-mistakes-finding-a-linear-hallucination-detector-inside-llms</loc></url>
<url><loc>https://parsed.com/research/resurrecting-the-salmon-seeing-clearer-inside-llms-with-domain-specific-saes</loc></url>
<url><loc>https://parsed.com/research/why-mechanistic-interpretability-needs-a-paradigm-inversion</loc></url>
</urlset>