From 7178f451a4eba4b86544880a05adcc72ad2ff255 Mon Sep 17 00:00:00 2001 From: yhirose Date: Sat, 21 Mar 2026 23:31:55 -0400 Subject: [PATCH] "Building a Desktop LLM App with cpp-httplib" (#2403) --- docs-src/pages/en/index.md | 2 +- docs-src/pages/en/llm-app/app.png | Bin 0 -> 123216 bytes docs-src/pages/en/llm-app/ch01-setup.md | 236 ++++ docs-src/pages/en/llm-app/ch02-rest-api.md | 212 +++ .../pages/en/llm-app/ch03-sse-streaming.md | 264 ++++ .../pages/en/llm-app/ch04-model-management.md | 788 +++++++++++ docs-src/pages/en/llm-app/ch05-web-ui.md | 1223 +++++++++++++++++ docs-src/pages/en/llm-app/ch06-desktop-app.md | 724 ++++++++++ .../pages/en/llm-app/ch07-code-reading.md | 154 +++ .../pages/en/llm-app/ch08-customization.md | 120 ++ docs-src/pages/en/llm-app/index.md | 29 +- docs-src/pages/en/llm-app/kv-cache.svg | 36 + docs-src/pages/en/llm-app/slots.svg | 24 + docs-src/pages/en/llm-app/webui.png | Bin 0 -> 132922 bytes docs-src/pages/ja/index.md | 2 +- docs-src/pages/ja/llm-app/ch01-setup.md | 236 ++++ docs-src/pages/ja/llm-app/ch02-rest-api.md | 212 +++ .../pages/ja/llm-app/ch03-sse-streaming.md | 264 ++++ .../pages/ja/llm-app/ch04-model-management.md | 788 +++++++++++ docs-src/pages/ja/llm-app/ch05-web-ui.md | 1223 +++++++++++++++++ docs-src/pages/ja/llm-app/ch06-desktop-app.md | 724 ++++++++++ .../pages/ja/llm-app/ch07-code-reading.md | 154 +++ .../pages/ja/llm-app/ch08-customization.md | 120 ++ docs-src/pages/ja/llm-app/index.md | 31 +- docs-src/pages/ja/llm-app/kv-cache.svg | 36 + docs-src/pages/ja/llm-app/slots.svg | 24 + docs-util/llm-app/build_desktop_app.sh | 33 + docs-util/llm-app/build_web_app.sh | 33 + docs-util/llm-app/extract_code.sh | 18 + .../llm-app/generate_desktop_app_project.sh | 38 + docs-util/llm-app/justfile | 28 + docs-util/llm-app/test_book.sh | 561 ++++++++ docs-util/llm-app/test_webui.cpp | 300 ++++ docs-util/llm-app/webdriver.h | 278 ++++ justfile | 9 +- 35 files changed, 8889 insertions(+), 35 deletions(-) create mode 100644 docs-src/pages/en/llm-app/app.png create mode 100644 docs-src/pages/en/llm-app/ch01-setup.md create mode 100644 docs-src/pages/en/llm-app/ch02-rest-api.md create mode 100644 docs-src/pages/en/llm-app/ch03-sse-streaming.md create mode 100644 docs-src/pages/en/llm-app/ch04-model-management.md create mode 100644 docs-src/pages/en/llm-app/ch05-web-ui.md create mode 100644 docs-src/pages/en/llm-app/ch06-desktop-app.md create mode 100644 docs-src/pages/en/llm-app/ch07-code-reading.md create mode 100644 docs-src/pages/en/llm-app/ch08-customization.md create mode 100644 docs-src/pages/en/llm-app/kv-cache.svg create mode 100644 docs-src/pages/en/llm-app/slots.svg create mode 100644 docs-src/pages/en/llm-app/webui.png create mode 100644 docs-src/pages/ja/llm-app/ch01-setup.md create mode 100644 docs-src/pages/ja/llm-app/ch02-rest-api.md create mode 100644 docs-src/pages/ja/llm-app/ch03-sse-streaming.md create mode 100644 docs-src/pages/ja/llm-app/ch04-model-management.md create mode 100644 docs-src/pages/ja/llm-app/ch05-web-ui.md create mode 100644 docs-src/pages/ja/llm-app/ch06-desktop-app.md create mode 100644 docs-src/pages/ja/llm-app/ch07-code-reading.md create mode 100644 docs-src/pages/ja/llm-app/ch08-customization.md create mode 100644 docs-src/pages/ja/llm-app/kv-cache.svg create mode 100644 docs-src/pages/ja/llm-app/slots.svg create mode 100755 docs-util/llm-app/build_desktop_app.sh create mode 100755 docs-util/llm-app/build_web_app.sh create mode 100644 docs-util/llm-app/extract_code.sh create mode 100755 docs-util/llm-app/generate_desktop_app_project.sh create mode 100644 docs-util/llm-app/justfile create mode 100755 docs-util/llm-app/test_book.sh create mode 100644 docs-util/llm-app/test_webui.cpp create mode 100644 docs-util/llm-app/webdriver.h diff --git a/docs-src/pages/en/index.md b/docs-src/pages/en/index.md index 3fbfcb6..90a3ebf 100644 --- a/docs-src/pages/en/index.md +++ b/docs-src/pages/en/index.md @@ -18,8 +18,8 @@ Under the hood, it uses blocking I/O with a thread pool. It's not built for hand ## Documentation - [A Tour of cpp-httplib](tour/) — A step-by-step tutorial covering the basics. Start here if you're new +- [Building a Desktop LLM App](llm-app/) — A hands-on guide to building a desktop app with llama.cpp, step by step ## Stay Tuned - [Cookbook](cookbook/) — A collection of recipes organized by topic. Jump to whatever you need -- [Building a Desktop LLM App](llm-app/) — A hands-on guide to building a desktop app with llama.cpp, step by step diff --git a/docs-src/pages/en/llm-app/app.png b/docs-src/pages/en/llm-app/app.png new file mode 100644 index 0000000000000000000000000000000000000000..1499ccb3ad408ee9bf4877c0e9b93e9bae8c0489 GIT binary patch literal 123216 zcmeFY1ydaRw>3-%L4reY3+@oy-93=t7Tnzi5AN`0t^hppa#x#Z{o7-jP8;L4SmYg&c{K!pVbz zLQ=F46Z}+inQW$t~Vc7!(v^Wf5S$M|p*GzPz z2xzjA2qJya3@!dvIOwGZbfNww^dBRTU2GV_+nBMxCb~H2o(yN838#4PdR}=u?RUIp zg7-6>_Sg8{LbXLOC++2>L+!|1ny_@elosrKz&^wieuMebuc|YEIaZEGS$PvGdGq#s z_lDy;&GHC}Y?<%v>wF&NhoN^+AC=LH5qJIhLw%v4?69Owz))y~v0OPx%&J>;Xvc#; z0?_7Z9E#pI&^a`}*C(tEi`W!>f`_W+&0!va5za@wEQJ=Kn#t`k_e&6`NfthVt5VWi zLn7^s_*n19X|LFXyb#G8Lml>`5)27#*RC6N-ij8)b=}g-s3}qoP{M2F8(T6whl4DvJUb zwTld55%m$q?F~jVlXfKU4zUhr?G=$FNENCT%>+gSXrON4$MJu19o$Q`mK?okoo6-H z8Ap3fWG1*4`Sq$9hY3_ZG+NRmTZ_gt z-owc%5y^^;7-~W$Q`sR)LJy36*N3!tKAwO&5KT0KaYYt5j!1>$F?}*S)h&106;MjyY(cG>%Z@T$ZmC!cc z;tj%(ZG7rQijgKfiR-jtpn>TSqkz7Qb9f18Bl`>mR?63)q$b|{sty;+GE@9@=uHgi z_*;XIIBWE4tTDvq+LE4#~lL(btjLIghHhL6yh^Heuc&M4gI8164H|^m8y83(BQ< zRs$&XpuuoY_QtJXvk1IvYPh3FJvRO9Qq}~=V%}on;+Ws&cW~RF} z+!ryVh9C;)0S5*%YBMgV#8;Bp{ZrNJ4c~VL^}d_QPq5$bCjI>UaysmDrQY0668T`D zcyC%ST+HXecBY9z2bBuhRIGu*;?<8F41Z4UVTtNGLL<{tnMuUs*Vn6W4 z*4bzMggE*(LIi>4Eqxd7XN3Cir$1m%-aU$7n!g+9V&33ygfI7Bw!d)>*#Cjr3QK|p zLrTgX=yyO)6-sFs!%c483x^=y9fwPSCnLs+j*ut3M=BS~YRsV$W-VqJM-nGC=&-{u z6}BRNMlLXzGwAt2Y*v|Q2EI;oAm2JMAUN0I;43?3Rv30(-+^N-beZJckEjEOYkm)` zQ_-CKXWY)2PgJJiJm0J%BP0xIHDIZFu)h^*@Tf)28Zln*==V2Q>YWfN_Tq6Px_zhT zhKU(?cQ5btrX8uG*Xly@iNXi3Bj#>12Z7z6v@3f6 z1#$(pQ`*(|i-9NdC}hR4lsjaOJnmdzcrXSS0sK}X!B7TNAg&Ua$dibbNR)^@{7#l@ z_pNG#rNT{7zjDp5h+kMU9!IoCL`U=-92{&MsCDKXRZC=b@9VrR&1TL@+P=)IHGJ9s zUHqGVCZ*(DMX1Q<%QIG#WKAA;QpFrxyIZTZgs>#Aq>UULoDZuc_Ot49zHGkoc6>h~ zZ;-9b{zUO4;^FS~+iS#YJ%#Y1Br5fUiYDP2*{j$+moXp4;5nEw=`#K@6RnoIaZ7<& zDhq83I7`I3h`I)=oC%-7PTV~tCb3S9?u9PyzJoc^BaPz==W3T>=i4LagX=@f2bArU0)ZpgMa2`8ANXC^qdBt z20u&oOLj@B-3vvnZL)Y%ssg}cw9$Xf2P;sWOa7cJ%|meu1eRbYLX0S&C9vk+?7ruDcK6gX(nUXD z)i6ITGnXKq;41HNV6!)Nx3;&s+jA*%wRkkWYjreoS9a^YVRlA+y?l5VXf!gkce2@f z;dYqpEP719^18Y}?cfS3~0ATLQBqNv9PRtz)=q{ySi5Wggw7hj92 zj!wtBabjoTrE8{pa2O3tb!RCy!mFA#avNxmzKYRCcjH^5(nDWQ7FLp0_^AL$z>RI> zt)jQ^ntCd@8iF0#+S&V{A}cyPp7=uR_hnAuP|-z!QKpS^*5l~pmsv5M{79B7fY+PX z_rZ@{RLgMbtTyIJ+J~bTOxkpIP!nK90F z>^if_)-;DfzWz(}VDWIqmCJ@?hps{Xy`)1&XYr5U8>gj#nsBC8W67hoIYwM&hndtt zB0)lc`l!8USn0;7sr`_D94ioLWY9eT~*iPvwie^uC!so(Nw?M>;Lt>-qRz zO2$6PzKC!VAIv8O-7v|p(w&C}y>(kK%B$j&?!Z)-92dj04jqGl`rcPHy<3O0Z;MUm z)L#0a=g!N>T_XAwHETT?9a>Fyo22vK>9blTi0VQ*Z0-u%T60TNb-ra8?Pu0aa^U2f znVaw%U93&4F#-n8(v^!U){;xhBwy7Y$(|@VuJ%QW6=#c@^}Pp1N$uhK)^)ly4ht=ju8l72diEU? zUX$B~a{C42+|o;1v2+`uz$?ozqrq-_!n^nPXk9K(6ey(Khgu`H>l`K7c+Xn$8L` zU~rpg$e1c9K+!?=;i2AySU|x*_TE4~_>d1|4*3E7*O7N*Ki>Ye5B>4a$xC6hEhs2q zC>e2)uYflvS#aNVRKe{X3a(s>*V)1!H9rPoq<{Y=(hTqap^#=ud?|+NJsCU=nXj;j zh&Y-utt{pnEVN%!8MQF*L@MxyZMK|?&pG>}*+W?^Zl-3gruD|_%uSxl7I$fUZsXoS zxA9+x^QnQ|pY+D^shCfxjFhd;TImkBcH2IgUJCKpNWw$Uq{C)QaYpkGN`z<48oR`lHUdxPpErm;rtmtTO{$=wk z7x`A$GFrUiSfc&1Cq85X)^pVpEx`O}rQ>hL&e(IQptJd}#t6gAX+hVlBY7AAeo1=g9JaENyp)VF?fx$}g&5<1E^??Sga1dciT?c@y_M&Xpp``vR+$LaQ z_JBEdT>q4${4&XBA|^*2lm&mQjadWUbxmTR(f0<}{87bO7>-Ibd;ZD4uJ zS@ZSFQX^<9f_Vv%x%C?|GV<;HcFZ#8vbB-tXBT+e#tC|xw`bmAD?5RhV1B35GzfDRZI15ox_+&Mp!95a-{$vKCZXd- z7pL)hMFoORa2fdgX?3s7y%_nRZeosLc7U##G^)DMkk?MSLv?ZWH@&B39f3g2YbUI> z;44dp;3*bCW1j)1{j}YEq5pBugen66qiQ^!tRU6tOK2uO{ z8n4UmRVSdpAq}}ybZr2t?fS#es@uAaUB|O)vwTIzOYswc*SKS*- z;`;Shcw*f63YTsDV#U*>~>9FAe5sMF89yl#)1V;hb(wS`2S z^4j1BN>0E?zSz(B@>fm}Rg29NiOt4=!CXsog>rrWY&n~20u&yr75n?sQLM_myo{T( z_4y+!wIve0rs3qqOQL2l%S-Lc-&tE&8#7hFvtHog=ke^MafWTPMa%oR8q=|1G*Ypq z3nUXf0S{;Hg1Ub^B*N|pk>a5z+Tr{cf!Aa^(#2NIOucJpq^m;x9M0aIU##4^HO8q7p28GDdM!7R>i;l^c_AiQMe->K5k?! zeWS%_8v##zuofrBjYmAvc`Hp@#V6PnNOZUSE8tf`xg5L2=*xiaT#aia0h?nhnMzw_wR zcM>UPXry13@|ne(brNHzczGu5l=(@ZS*F4YpFPJKf*?OpL5O>G!JK9|tm}1Wy>&9u z;xO3@8epf<$GD?5X|HIZCk=A+r%>}-E~2M2vSa$Cc|z6VE-hL>{hze;8Q0IMqHRv_ z>7vKEK#r-Y_vIo)2$0K6D6S(OTCjPO8$w(y^a=*ndx0_z~B zDf&K5tB}>viye{fJU!`neQ`ah8sNm-U31?jYS{{>Z~cxF)AORHZDV4V<&um{$o=Wi zdCth$%ycSeUeA+}MV@5db9coFcyky61r)f;-2ZUfbN@`xqb3cnhe*jstLS|>J3UBqkbf2_!uHSP8gXA zyf2sgLa*)~zAdNqHE&V*C(7F&Eb|}TXV7Sy${8D0SUncp5STfmB4O&t|E@z>8Nx1H z>Mx^vC(z91;1*1q_6(vHGm`n9WeY=E17^w<(kO#pF0!BG5aO+(Ic2;BBXbh_-jOq! zuISy4YYY0Y_^OS1Ok;ar|IAsSOk~!T)Ta<1_==ZLDQ{E!!U>iu3g1MzaZEU2csej3 zKQ%mE<*{3uwsk$hSI;fhIjk;p?sLvg1GC4O1Zgc9>-9MMT0Y#?7Z>px7BFJ|)hE$J zcSdoorILzDei18uTs`+mQ4OSEAa;k4CW?6z*b2vc8Fq>vND;0oe4j^%eeO5S?S&Ap zV|=djOUWqwKpWyPQHW-E2z{<-C`vrLiI9U`pl_`k{)k91-G(PGkLL~AwoR1u8SYnm zixN1x#w1=BA|=&fDX2;s$v-6osPYH4mjPCK9*2ct3^)uL-%@m4(?~b=s#OZ&=)JT4 zVuGzvbr}A%U=$;P3bq{q*)GF;j0(e~HVuqAZv~tFepBUk zz6Sc@LsHT8b2DtQ)1FB|66b6?Mrh6Rbn+c9E}5qDWvV^@GXA7fUpUs`bRwLca4W*$ z5@BR331Gvo(hBfyZf=f~z%zlH&@9Z=Op2oQ<>7c5w73A+jy&ES=Lp0&zLUy;j%3>z#Eg?Pv+SV_+~|RM=cZyrLF_1!Zbq%zLByn+o14wgH7>MM z=E*-%{;n&+`=p=SiZ~aCMVWoiZAuQ)9DmElARbPMevp&#qR3oAJU-xo;Bsp7nQnPs zo?>S_Zci*m*1a@JlX8<;K}`g6Z{{Aq%tEq)!u%ImG|4 zmKipe-*3^?$i~UW8mZY$N|JZj*ddwn!J?h3;{LMFalZV2io3H6bk6J3{^|F(C>KT? zWRj%gn=(>3yWil=2MC?y7jE)CtR@9SkqE8TFH$|;Pr+k+9)`YC%$&+~NmPQ60!G(bL`ofgK%LBZ|h#kP?s(smR; z!8%9L4~Q$y_V~TN2W&ZSKW-Bl%CVhih=jvyHpDa?O=Xd6xcaGV);rB^8A9UI=zXlc zei_0Iyn*ov^@1;;&ITB=%Z}T$(%@~g;_kvjGwQa1u=_T`;pMq*lOusy zR~JT00t+P#cXYTfWz(yrIzq1c$aR((T5E4@bRQEiTVa# zzS~b!e3?gkQERGVINtM0M9<^kOW5}{Pb&BF36)Cuj^pHD`=hZ-B`vsZkk`>)VP`h{ zy_1tapHC8}yDexp!njD`e+K%4c$f>=0xOpbf3eJ-+ci(S4ϔbpq?&Fq^2#OmyOo(0&-C7!jOM+g+g{J7+EDX9P}ZUxS&p%UJq*MosgjV(Ozf zmuLinsYI!Do6y1USztzaoc0RxBMEs}8ATl{yJyV~REPtmqe<_v$i(~GNb_~87g0nh zEXe8h$;GqX_tNxes8AASo;KiwD%r-ld)PK`gAX#~LK!6Re`Q{)|HG#we|(zcWSSmD zm~_j0y_J_EPx;O$YT+P}mAbsJ72`|Zw+mSciiEO)+l*e0eXP$yMwXM}>WnVC1ZSor zR7RI}(z^=Xc;J=)#%2gL)qHQA1ob6pS#mi&ZO6}IvVC+mpwR16!H+{D=ODBGWAnR< z>!Af$lVM8nDT&8m0$b?;(Tk^!*T;@bi|wKNMW7ESQ3$eN2+;e?$@Om4tvE)CdO(K6 zdWl8?AH-hA{Eq?{COj!>EXyw^^KH;jZ`7&f*XfM@_xrM}Fu$uZY0{W9E5OrI$Bfoz84!+vwiV&H< zNseS%8ZB6jcX)t=;|w6x9Y&|AS^TM-kD=5Tvw!Io6@rY=AG0q@-aD!Fmqo%b zyySlZ>?pUVtQ&cF^`Kd$0)5XvFPH(ZzK1u6wonGvR|aQT0tG)qsn{n;6;&Zh>MPR1 zuwKuo8nc^sTZs;6?%+uTBT?>Fv=Pd! zApVO`qeJ4h;;?By$B`XqmgDWpRxyC)SM@QCzeq#w za!MWp7q0`Ig##c`zmtYPoWzdlDu;QyA(AE+8mYK}d1*i-6rEzs84-fKxi#Qah`wdc z_>cIA`x75PASoO*Nxt*Nh!ln-k&d4uBffK({;*wkxX9uY6C(WlE1{wJsbB~% zgonqCq|6=%H5B7 zX*E5eeYw8a-y?Wx&$xri<&z5v#~6{@fH@x3sbSgtghTyPeo9%ho5GiDE42aznM#gk zeJ}dD9Q>ZCAPD0Bxt;g8ZhY_8pu`gqA0Sg9^)h|-+TrcG6WBH{or4md(utb;05M}t zdK4@Bqwe8-6p;WEOC(D8U!dv-DX1#?wWQR=0?-;_e`?$=Yrkcrs{qyd80YdR^W%9( zM-J|O?X;p?%(`@2xC5S(3tMq1e5&#?q}6Y0#E;%Y4%^TSovb5N8sR`L+8bzyEb^-y z;s^@+-pv!J+W(tE5PGwppkZZj*m?`#FKM&xeW&258oUi(2S|Von0$S?6B4>|cu?t=Cri3V!O@fgN#<`+4cVzt!mI|v_elB#$0c^AZUl6 zZN0T=>#ZZXEUt%U!n}d3h&Fiz>F40`1uK6*(paC{@odz8L1?QUEqU&@#O-AY#J&+X zB+_7=rFL^$RvhY7vXpXKRl1VT;qs`8(ao`|E33|51 z#!F?9yTf9dB3zYkr4~tRKQ=aTO=Ru3mX1`T&*Nd2EzLo@SMJ9#f592Tuy2~-?+zw= zB8)d-{6F~PbnFB9uOgqfNnYWLW!oh`6=(xK@gr5VUJhU1hb02%>C@@7Mr+3yfQ)$A znaW}_`JU{D%o>%Q<&9hKg$(4`<-Pc_VYfxdHdqdoUSFP{yBQWI7aL?AuGOcRN@ z?SI_y2a*`6!uCvGq+69Ev}n$QY!~$(rczD{5)@^TVg|t%J*dUNNr8-u#glQq^J{-v zHfvzEz%pVQsQie)x>BV6-~|AQ;WR(R|Aa}XFdWsN7_J%GElqhQH$#7Gc}qGCk>a~XHueSP5y(K6;gj69EhAz5_)##bucw} zIo5wYy4&9R%KJHt({;r`ig_VOMjs8a12Qi?;MJf@rxz5}WCH!2|1) zQY5EA(wBn+2Ta6VfzsIGg8e^w$GNwW_#l(jll98Uwt+0yg+Wj7Vs{WCo4DTiADFMy z5g+rk^`aXQ&%iP25q{t&8u)UA`SEgsQXpef0`I0EGI-hhZndnwp3{TIq2+@G1yyOl zf2<~(=t8)U?(a)>*f`U#l9;vqc9wL*L|dFq`lCve@+vsTIN{SGg=kTsOm`zCQN3-U zQUtr4$url=@*Xq626ZYaNp*qOhnd%H4F)#fULqwI%1e=~(8F8ZhRh(QvNr^$L=lq@ ztGZTqRe~cA3v}x4{m1d$0OAibn$vV_Z-+-MI&8zYx5^!JL~?e$t>>4fl9<_yPQ(66 z6v7*juD!DHTM!hUxOB5@%1CN!P!(zZT8Q&wVFa7MUR#FrC*16i5MmGWg|Vih<6B5* ztuV-=k*4)|-e0UF+-!P>eVmV-@*SbSbebrk|fzdssG!tZ7*we3WzrS_^xK&{F3n~xi&y;AWADIZZ zRmXDpr;8=h|Lzh$_%(8U?5JC0@L_5v*@*%*`=? zm+=&nRvnjWAh(2-*cViV6&CR=^!vc*61imLMr?zQ1;$2c&G;n<7L_(BuuF7a$4^han>ZdX%tGcW$E(uvcg1dhANr)}}+@r5d*QwT89bT=%|OCr=C z$fiudWt(|(I8E(AV?==Nm|??uEDvHAg(-i#>0HrvJAA>kyVQQ)$GimI&#?m?BRcdn zf3OIwt^J$R@*9`_(e3`kK^xGA<2#qturRu8TkA59XQ8H>L=4Be)`5#K<|)qU!hFla zr_^pSUA%Z<=R5ERss;tgNE?g?W>}wD`PSb^p;o?EPrKfAR6oCw)GsQM{xFfI4;pAI zVV|q0r%l*fjJ1hrAk`*11*SXz*QwTw8|rR{d2-BwxRPyxz1) zF76;Sbt0sN*m<`SRcpErs~;V!di+nwRMw+)zKt9DcJY~z)mWPCO%-E#0t=$qXzFHP zB?Qtn0m#*JzCwnVdlvK%6e6v!9v32&k%--sj*~+$CaZ+R<8R1o0H5SZ#=Ei7;R4sl%!gr?T1XjEMWS- zrhf<|Q48l6|2|Hq6OEc;ZpPO5io|z8(EC#PSbepp0YiO@R8wQJ&2%K$ot)uWxUlKexA0M9OZ&Dv_2l!DSe zspg^h&{s+ymuJ}lyQEo0eS>=Ax>J>wl+)(jwW70yzF2jgf8oCU95HB`&}0leW0seg z1MZgQv15E^liT6RHqT$Oc0$Xm`Bu@BP+yf*wI~P?nfx0CK7kKyf=(tTf`+@SWsKy$ zUvtJ|jc9cEvwkR+k%HBA232bJKTE7pg7AqJXxnm{+%8GAMmqyp&0Kf7*1tNN@9@sX z(3DxFMdbQFhyU$<5Y~Uf2PReK|K{XPC_j=`mjaR+|GxI$_|<1)jhmH zr{Q~p5N9G)iu8Xl&X7wG|G!K8w}bwlFaDnK|DP}Z_61$g|4%xS2ZK*Qkl(WO-&Nk+ zPgp`i0+DpsNeg&NLTW%}lyk{xGvXwbZWCCQvFaV*rIM_M>i z7|p7iH#7P?WV=vnAZNrts8lW~X6jH2rrVofZu4mSaT8!Qk3ps_rwICAPx3cLQZzli z{NvzYSR*0AZI%3GtD(YU)<56*@ex{CIoKxIcC{JIfe>_Mp!<2t9R>xsc>uAHzmry) z%G!`(n*WIAou=F7QCBrHKfnB2wdBba&=mx0Aa-9aBBf6I z2k)ST7O4y?N4IWvJGV_rN>Wo*-ECNm38tm&xOg%t+jpTIa&4>Kx}$ltaS@UWJ@L59 zDA)Mgr4SqW{XQ~OO9DzuO0KN*3OpeY`|RxOA&w>KABx1UF6_JK36>2%-|?~fQaBcU z>!;9frST=I?x_^DgIka8IoqrIh5k%ZFjll|63JQjj>UP-BSXK?zmWO z^0M0m;ZHTU4+8(>Q)gFb6H2*cT;_2I)rRr$&v^|i z1ei(KE~>Uu*StqYU4DOZXk#>r9zoDTk+K>1a(Av+iDX@uFErL*RpkM6<)JqaW-f## zz20*s?9mdi4oqxg)%pI9&gj3yV!9zv%nbms%Wio$(Xd$E{hIltaA9?dSZvFczLuuO z*89QcE(9)yO@8YIrJ!%8{o`Wp)~vmfx6e~!SuO^vpX2m0DUh=Z*)J}vk&Vvk)B=&* zkKBo*&57UxACt7}YF2?5WIe?TBJ)au;DXcU<8r)0zKhPcr)!?)W86hV|LcF#1VS;_ zI}hyzxevyY28pX`{5gB@6~^BjQ+yz)WRy6yayZ+&qpN7#uD02uf`89jLEtx?^|K$= zZC_N&V!O7gR^1mhe{LB)BnK9=al+TgsBz&;H`uE05MO+E2Ca_!2&IUJ^6$ioorF$mxnCC~G1mWn>@Dfo@xWHcszl@p*8ci1|T;YuM5a+W#hX_4&q3|Aj?5OxlcJg0d zqlKQy`7}qbUOsxjoBBSt<4?O;d5abr&RLQLw^P~|=VNif#jImEIA}I;7n}R$fkK9*>QOy~n=hDviFRCE3mG~0^^k?Zt z4y*ppv31OTi#2A;5R(r`;&UkoY#Vc*kJLNfw-8xAKV1Qr??KxcNnwv5W?yhb;Gn6o z^FNY{3Ue=VIWx{$?C*f%=s!WUj4<)!|NYB8KPfsXH4dq0zh)e^#mi+Z-La~!L6l;Q z^J|$IFx%%zH@mJ6AVCJPjHr06?vC&3d6atp+9M6Vl(h0&7xen+BgyN1NYL&dt~wox z$olwp*$$|c2S?&H0n|$7`2G*kg)j~!ns?RdWWE$m;RKk8U=y&`>TW{M9R+rt2b%c4 zKCYi0l~mND3f_NIuT;y)U^NN%KKER&;X7;0pS9QtzlY3e;7g@!ejckzM@Jus_QC6g zx&7X7?#U(38OHw(*Q(h>=%Cq1DSMh-xIo=WM~_k?Vb6C>Ba3Cu*u@2PeIXzem!Ck} z$;3miA$VyC&&;MV2W(6sUY>PlmeNQEtKM;3-cqf149N=Y$);+&yrh5OuM~yW#nq2 z2}S^9O+M#p-+npg091X7#7eVj1DZ7x$d!MZbSh9F4wcv$YU^UF|0-(Sls(T*XODW- zmg3?Co%QS!Y%kMns_#50aSIU{Q27|HKkad45EZ|(S*SD^i@xfi!}Rk)c)II&txIDw z+e3;T*nyxnS!|L2K&}2%94QLno<{`g%x}IgS2^CGgQFn|TzEK!KPa5Q^@JFw$Mu04 z772?(j`yt$YE2MYnEDQc$~{YwNuXNf+0UH*U8_xC@kcly^a6_Me|i&|*LCsj2@>Q- zEsgCDMT3{kUR!gH&p1Z3Al*NwGxZ&I+-US z>Gnov4eao3GsI+sv7xrCercE)!YSDG$7o4^)LwhyPT&%vcsd~UDQj5s;Iv)gUWSO* zP9TUPstiRr-j7=dzgtW)Vh>UG+(N<+fMc`ug=+Y=uEKk)A)1ovb7$9_-m9v*t_g|H zy+Jbu#+c*L%rDm^i{e65(i5AG=Ua$cs>*l><8^V>_w`v);*CT4a1TVuWA}&uCh~m( zwcl_CGP`EYFnShexn5#b$8sh?^|uwMB#kjakhjRWx#3~iKWdXw&%3i1AERJLt?)xw zHp>~B*R(9J%b}O%_^DABlCd12b)UPI^~K|g4%_pN*Y=yU)#V03HcZ}iM1iM-(4*A9 zCK2H#_>EuW$Bi%$DI+hlDC^-n!ySkY#&SmW*EX@&Mb|Ba{ak9=&$en4UEzgO_Q?(=M~O@`c{6_qiV&@x=&4p0lTUDTvOo76E{3 zdb%8mx(i1qmlRjBYLSOgTw*qbZj_=T%gbd@Bj_WMVjo$R|sZtHde7#1UG38zOO> zXF%p6aIa}pthL0Be%q(hfr*{5i&@Ux?#h@rcG+*w9V`Tis*H&S(Pe> zo;qlrGFF&In2J*DRFMAy_YacbE6mD>bw0t{ib)PKE}tjRK%wkhvDi}i&yPMYqw7R zv1lfGCXl>U{dte8V~Be{Q^bq^XtiswgTn+Kbfw7$spbMpVew$`A^595%B2CfXjZ&| zMGNG8JBY48P+)&`fk6O;TME@T+pVC$v)d)$X5ZZ{HYo-wCRFw8xXR1KXuFjXIm+|N z(0zQjookMtZz1};;X@ApUWB#7|g?ZpVb2%|LQOe-QFdy*( zTqn1;YjU1T@8I*>6_`I#FR_onp>lbIPq2mRu{-M*?klW#Muy7gN%I2Y+b)Oc57)$z zPQ{60nR*w-xYkcR31(+z?sI=|-$D92u+yZF{>x>Gx5v!%A(`9~q#55M#6~*jj(h+& zbUfRv*90?7jyH9>6-E_d)*qhw>?!NsMWE1HWIE04JztD$7`8(NTQ?sTN!kIeJ8Z6@w`yn|#kETe#*w2Oz

~kNoRV`CxSR1Z`$K4`9rU|7J*9lXDcN%8xIbfB<*?4{g#G6oOl0fx??c)vfCIDzl zQXN4C2{Q(B)h&SEtX7u7y;`DYf5{aq1<8(63uaYesZ_okWBNXb*((?QoQof3+hVH> zug;5~OwQrC>?YLf1-wt7D^a-UVy}A^kq*dof=C6`u2~V+T)WL`YIP;1iZQit-fDBV zgd5NC$Mchktt8I@B)4wx>kQAxFP+{xNhM6z|CPG`OHg}JnTv85yhqDx!$m74*~Xv+ z=ZqO}?=-`aFNxR9b0h?EgPlj_!EcQt<_TPe2vqS`aXo!9JY0U3D2KTE8+xp_#TGSW zbuJ$0g|kf|uKCy+qLW0kN*(W&$92RJ%M~aIPCA_xa*#7}xf_zZbbn4ZZ<4;#e|=Dw zyjMAGO!3K~CXTvbsN(?vi_e1@h9SWzK1I)Yf;X&wAdnN~Bu5-qPS6NsYpv-=JP^0^%|5nInCn%t>JW$Vhg+;}?(biW zz{huMadw~EolD82&#&=S_-Th7Y=mpzSSUiggT+tQv<{ft@}5sWxA>Ja|3YTyTkS56 z{a}p{cduSjk-(X}^$^YOa7nLP^p)%xlrx12Hj#GHcFQK&5oiZC`v&~gNeg~s^oe|+!Tm_UoTTS z*hiwP47qWVAzCFGqF@$<%!Bkbvj7KXi)WM3L5u04v}4(LhioF#g8bbo4#{RvvY1wQ z&%Wr0+sw*Xw#!_HdE=jkA^2E#E2c-@V}Ow(jj_h@QqC=mePhqxoO6LaGq- z$ov+GDV@b*8@U{{PhpgifNBJS&@BT>E23GEAf#fYoU`NHYg-4hn2MM!lN95~I>7VI z{G%I!NBKryDCn$H&_$7&?B1r;uu|yxP|0VEgc<{PUNj3WM!J=T7Dq2bN2u-ld(e^` z^-4GFja@%(FHH@m?6d2ymy5-XTybx1z**mP{}n{0?Z$q0X#;4u9Dnx~DJEAZI}rqV z8+;%lyb32^k}aISXCZPEP+#Xncu|~MmA=?@Rp-U+loJ@_i3^dLkKsN>zMeJLQ=5i_ zFrGu(4*sf@vNIGWV!(Sti0Q=0#oxT7*jzX6z{x5A>&m)kI@6u% z;`4q>S0*w<^jr2S$bY{Pu!bn`iH-^qcYuDj7`SzEsucvF#Wn1n8GL^YUehl1#TF*x^l_pY)Dq^rpU-Bmlu#V@Rq)eoLkIHEqnU5+VKI4wV}9XdCHXp# zwqsjO51=i%Eu!GIVqIfH{^8xWpHJ62oPr)+mt_rmtmxN|f7$|zH}t(R72v4}WD4BX zSO@SfRU|%{VI@V}=~d3FSHCf1FT`ho=02xd_pGkahq&fd9s3Vl0~Y~zEW z!S=F+DR9~U0DpK{-%y1Wh-9Zl+coWCue(HDR>yQo-|30>{8UD5-AlK6BHap5FAk50%;~(&RGw*&m8II=3H`q~C60)1eR;ZI_8??rfMQ+M}|Kt&QOohvJ zG$A5gh@#j8FzfWWt;s2r;D%wKViUhF$X0SQ=ijaMsE9SK6 z*>8sd{Pm-$aqYKWN9QkuICgpG2r<?RWFETIU<3P{SwX*@8dGXG;E{L(%AQmW)Lwd#E6A! zhhfD&^@(w4OV9-(S_$U=gdcL~XUb#k>Y=x+9lZQ#XnJAYpDOmXUu>5>FyB#1&TFM7 z!3cxwE#=?u`tj1e5$8l=`P8})RITFHeMP~P2y&Dr%Jfja_<`u4K`yVd-(B*_7G8TY zMQA+afykxmqi8T-Q>Wfx=pYw^L!5)3ftG~1PC4HPI6@;zcStH8uaBmZZvd(_Xq86F zO}-EbEzraFr3SrOC0XdA0^bNtJq)vb^H93RHBoTp^;LVGjN&JD76`*wQRI74Ge?R?zO z5G_7$wqQl}(~Ke`Or12mKmG0;+H;N)1;_n$5-LcY63EU@zy*^@@;240=c=vlFa*$7 z&UgH*qj?h4AmQcnsU|E|2JBR0pTF4-#gIeTGy~$KoAg+lI}*jyBh%!kdkhY?n#4mHz-IbsdU4nySqCk zlWwHLGg$lEd%3=I*5St=Gshh7JKlVr`??3I_bCzo>rwC6`Khso!nJEx`GAe*DT=dIC%9j5QYHi_ZPTfW=BpNq;Le8Bjv%dZ?M&ys`U zdrv{oaemVq+?O(e;`j$28gNPNuStsQIombZ5hv`3!W!gaG%}qN6rvAbyz`DnGW2CM zUFuN^vHb~+q3-7>*^y+@?}k(KvZ{ENd)JEKKwh*BB3DQ7M~f;zggWheQDi`qNh$<# zcuSFBl(eL^FT!GMy7PtwlBSD?`v;K6k36XKdgHHwil&dSYG*Lun zHp-VGjp%K<9Ot0gUvJ_wMr5smw6#%;*rS#kBr78gnn9XijU>l zA9BoUg_Gu6dygX0vW116<2H5IpoSFhVr$udT_L-cgCbTP>7sD@K_F{$S(X?qUTx~K z+NKl#;{6hSRzh$@9}r(UvPkblVf_ImC*`$j)t${s(&X#87L?zjos$<>I?$@U9)^>7 zIrc!MUy(EM>M9`@4xe`^?;M9x5`iJ}`Q6vZ7emAlqqM;fZ&*p|@_ucs|EZ%r6o-lB zBYdbDULY>xRFUa%K*zOL+~PCgXaN|>$6)m1&O!`=4c8=YI3m`PD5a3o?-635Yu!A0 z4UiB{xT-)p06F$?SD;D516>}Avca#M8nZc1$fd;< zo^s)R6@1KUQ6tGk1A~`^OkjrF;6OEgIFiRj{B-Ru(suB@MDy~A!}-;ndARG@vl5Tm zV1*d3-cU$!XTym%J#AKQ$Tv&<@xznG&SUwd#S8~a{KkVyg`Rh=Pl9@~v|%M7vtQ5i zLTQ9uwRemkfq2m(AWl83vQoMoKdW(TtXdh(kO;H+L6hic@h&$JY+rr(j9FR;TzCTI zyO>gYSj%siFg*~%^}V-JK{LVFjKChLnZ1~d(2E*Z$^-a_7z*Es&!X$PTIXy2_97i_ zr85&_9as}QU&Lhp>jhv&LYg~nGJcrLx9&aB@c-^=ZE@UTAxMt$7wr0T8@PMMVchcD zkafW`kQ%nw=tp0C%IjN%>587d%_fnD50>xZ4Ac52p%nkJQjsx@-hJCc_Y9Wg|olN zIci|e(}_60FXVg)Tk_2XZM&B_wQlj)@3+^gS$YNTdCevGF_%oyoO>0B6JSS)X;q$GNGw%V(71NF{_vjR+Ap`cMT0%mJAKrv@9_TveP-YB%o}){zp>4LY+WjN- z|4JeoBQ_z|<%jjTamcmwY|YMf$W-$eIdZ!u{|!sAD|NyRf{@oAi7moNn&vQ^1S*Bk zhd5h6sC8W+e-#snKvn$ZRRSKQQ40NR*4FzoRqFl8wdEZENIYK=3Cjp0wp$;Si@`*mn&bo&r9FHEU4dXQ-y_b%VW3^1j+<7<-UF$ z&H{VvdP7xcp)|i@cY-R}zPVF{@N_o4)MCH6hCjQP%ps7Vh6+HF)Y8zZ0y2Zh2id5E zP;N**5$Nw3p=$j?4>4Z5i_i~DR4bR5L_dwtcHg>dm;}}z)4~8_OprzPM#O;jId=5V z+j}++1xFns9OfQ-WRrTfyNO9<(L+X?YPjC+@m?~&rPaBcq$=SuQ2{nLdPbbF>!|5< z-HQFzheP`ov%JqPJK*1g29K5VfpnR)=*iR+2`|VqlN}X49>#g{Ti+M6Ct>Tg5*(ApP8OB5faj<=Hz0c9 zaZwasaXFkhn=9$DIa44opRU_LVWT2<#aBTQkr)I7b;fiHC(4@JK#RO%p4pY;F;XQN zh?>J7GcQW|`s4h(MV7UW&2cOh2&TSE`jxic)zS4sMTMS3jEIKDA5a2n-!BVh_TyaT z_qa<(EmhQd-EvWy@}-J=E`QoU*VJ1W{?ct(>=r4rwzhs~|aBqJ)H)RG8ST;2p~2=8=asrj?3KXk(( zaqSBp4h6p~O+iT%`-?Bqf?$V9V#Ker7pk1H$NOSvPE+UlBpI?O;hh z@D?>%gaQJ|)k(c{V`bN^I{60*V!^eO(1HJqq|29Po0UuqwUnl5s3H@;#>H zQ|>44a!9-DgTmq!k3y09BiDe$M#`%iNfJqFQzq#iHrcLD6pUC2{hb_ag6i5a(C6nA zcx7rU_J*Ve_?^Y?8iCLprwExyEC_sXew1Ju!bA zS!_l-00g(&7{BcmyB0jiwB1x7)%;narQ4yWKY*RIvjZQr^eh0O)0HNAk-+oUdH_xq zvPhD`{sg8&tRU->i@TjENP6o7OKZW(kuUSb(OcxXEv-_%ulp+6>*wTUhTfrXjDYL? z8gZ5AK-up$F-t7mf@Kvg$XVz&2ILd4kIliO24WxZvpV*|$3Fenk(Munjsbn-LR_@H zS*4LMseYQpCbg|<|dA1F3RYJ1!zku^uMg5a+} zE@QY-J`J@Xj_kXtfuE`;>fR8lmm|9qkVCPaNei310~F@<^RD`S4;L-Qmq5g=-ICvs z9}>lx7Ch(%GoFwFtsjSec_BB^5$ks%5#5u6P(#y+lU`qM2SMm>b{^s1&Lz_yzpg%` zc2$cFF-k`yWoE6e=lIO55A%LZQo)i-NFVNhT2?@Q#S5!%7s(BN2f>kd*A|jJDg71` zB8SuC8M_@QONch7I2BgkRc{oP(_3#0(wMLmb~~7J8SfbM)+$i0k9J@xKIg~l2yqp0 zM8fU}<<|RUX<0Bx)oTgros9P6n8hm<42s=K9CgE&WD9~e(^?^KqiAoZ^V~=?>!KWM z4m}QA$m@M^e;zak2r#Dvk)5 z6hwNk_fA3!NmBFe7F*loi?Tj8J4-HKdf6YsvN;}>f{8GcriXDU!E$yULA_+lRTHc? zVR2pB_V0q@Wv4ng$vul}7Q6F*s2W<~<} zdQRFOG{WZDTO@pnqzIuio7gR0MkLz=F`=$}v7Gpn8<}25yIHe95bVl~NZ7Ml&wbg| zm%bGPcLwCn`5IrZp8y

CB8u*U@mIi0t@6UbO4A`7)4;e{t2Ba5Jci8Xi>tUgQ&< zA)k^oRef)$iBYtn>x4XAuwJVS%myNJzg+qZuld^;LT2;A>!nSXz^xv6j_2rGA?+{v z(~sy>QjIsxyI<%L6$(k~x$?)Vyk76w9(-#}Y_fflgTrm^W{yi6#f}M_lx8c*BYE8T zr*L4)BOkjM;Hbaxoof3U7V6v?$%*9c(wg1PX;&SgYPYc#03?xFdjhWq2#!W>@IB%* zr~0TCw+TDO1r$@i6*}6pt(fl6D&d`bGqR7nbnrktWprW_j&d+~vbZ-c1dp2%GgX+f z_+I?MV&7n8w}h_sVu*6JOfYjjcBI&UK#DOElHF@?G+}ZkG$9C$EA^j`0ux*)>mvn% zzA)R!3crPLuwI=grbU(Kq%_VsIkK*+bPK|Nm(amKR$gk|BsfM$W%dK#aJq?-hKc+4 zX)?z#%$YsGd2dI@avup$JZ~jzpdf)IrFAM?`%3DaTdaoo5Ac( zH__k5VPa2HN`+Uwoszj%orOg4e+F1+xXgvxoZdutd_jum-a(r0aB3R+<(!8jJUf59 z?ZBMj8zpD=2`~#07N&QqXKT@Qxv^x0^akO!gLwA=OlMWAk|5JH81y3L_SSYS+OSly-TK@i*S97u3(oxTzs;>!WUktDE}-fQusADKxf~xXyPPu09@|N z6)Kl1FCfR=bR~zMC9K&TS#RBm?-6Dk997ZhT87pm{3;`^lDQPwB9xl7Q`ZCH(YGB) z^(?apiS5uVDx+5r*MG zO>uqGMklijya64owX^=#SEKuJ=rR02hF)jR*70ef70*icEmq=HqH-i2CW=3sZr?e0 zY_(8gG8Tt*|7gKUw6RQnyH50e9etM|yHXF$=jIE^59a72{NONKq@PhZs+K4pf0U7- zw-z?Ku$WhCi2^EtT0n)d^&-VFW`MzLWY*G5`D4wHY*=nEl?XP~4J~r|_!-*|B?Z(S zrvMA4Wn zaD8*(zX%5kR%RAB=rfTvCH=2b*nrU(Pd83EsWPa&GrC^_6iR=wDQWilEYw5`tyHKfw7)O>} z&^^A1Fr^H~3JkNIe2E*eNt8HPaFPS}xRO+v^YyW+o?pGWlui>YX7|oxl)L`USzBOI zz~>y9$D|B#i>ewXCz9%GcH9?~h7+#RlTmgMM*?1v!JT6L)AR9%XD|84!TWVLx=sXv z;~V4h-2(a=&(HFVm3Sj^*+r`I6nIb5m&hS5gRw-**RQd62mTfY0?iTh0}Vxjx>+AX z_JeiQqHLWtp`X<|izeZ{P(||op>`v~-!u7TAj)?PaEg`#<+?_40jhaAaso|tXCy{Z z!#2lngL4bYnX%gzymXX@Fr?yICAjq= z@+xMQ2$^)Ar?`H@jn;|ScKvpPhm~3J>NFAP3{Ej;DV|W-yu72S0qv zCMutojwA9WQXF$`JEX(cFI`x_OGm!pHg+%dVz7$pyXGs1N`$4lZVO>i{9_`vWQ0SC zojN?x`YP(zVh^${I(=B!BTW7aNRvWldrRHp8B9d$1pO&f$drG+U`JChJ~yZ@Tf!VR?o!6t>b-z;LUo74t6Gl~jtb-vS|Cs~gKO=n zkEh^p7&+QJqkX(xqZ+1rXUDD=|96rQ$T%m(h@A|rLsOnxgk%~?NPw{>bf~|w>czZ~ ze>$awESL?n%vf=fv0G3RFsc0ttRjm%|5~9BPPqqf` z)GG-=@lV)Q+k!+JO88qvDbO$FI$EP~v{S8e?qYo?p+n02Cj^xHE(0Nj(;!p1sx>r% zpsr8Yu~K8_0&tS)1n>e7K)uNUp_+>MM=@PU;F^{ygOG9Xd!z7n%m!2DH~SNrqp(Ln z2~qAbIGhzqvlK;)I8RDEBq;=GLQ9Z-<=PECyxhz|C_ln?#|Y@RZPyBT9ML!Ap7ymD zPrq?1Z`r7TH3tB(HkSwUyVmHU^t;ax?AVaUeeVuaF|7d&(a_xNAE)8+3U0~=gTTXa z%M4xxOIeP#QoH73vS{cveoBn0UGiTj_(Gr<2E2BtyKH6_NG!x>B)KzPcXa>qx@-1G zXc6M8&uB5M-)N7mm+_C%C1t`$n4EZ;4r6u6&S+_+in!;|KWXJivS7X|l^6O4?%)T> z&Wt-yc)y+ngU{Fk~b>93x?7`glo{BLLoET$zoTpC=Na3Q1eKaTw;g0N!@fDLXH=(?%@bs%{X zZ2t5AP+UyH)a#vQ=KuBj-%rQ7k>tV-jLGU*{;)>)dzAc3bh!}dAJli5T`Kf<;_?qM z=F^9>BQfJJ=DS54^!`A0{uX5X1!7bq0+_|2xSq$~Xy{+B|NSvBJSu?Zi1bYeccsOV&l?ImpV|OqQ7uQ@t zmhVTviHb?u>COM_5S}gx8Zs)G$kL3SyWajxNi~W2ds&-KbQSIYVVPlK;mAqg7%jk$ zgZcd7YPGIA5>H23Iv9pj4X_WYG)6##O+@B{3Jl1>a@};Nz`waiy`t^{3)~|U#@P|*$pG_Zl41NxDc|C~p zjQYR#`9>^YSsJN45XM zW&B^|{m;|GH^9izxS_fA|M~9Ii(}kR!zE!>RjKMfX5>E$zK{&mom+R=(7#umH3m>< zZV?N;yniiqHRIE88Da3{?*{zOY6%mYB?k&^?L7j1^RK10)CB5o5vP^oUrky8-(R56 zv)5l;lmETcrSJ@k-0o&9f6p%&L2Vl}3brfv!L^3*I;Y zQCHo;X)kHjwn2vBWQyk8FH)j~M#hlEJB8bYj-$Ym-t3EF^Bwy1Ypdqv^RdBv}b-d~05o=~L5{%3pm4*&ita)SPMZBXACr^d_w zRegqk8!%ZMLbtQ3j*fi|i}bzQw?@BMbDQwicr5y?XcX(4?Y}%S3M3S%34V~B6QH_V z6r08NYXACtR;jVQxB^=@JK{epkN-@0akSZB148L^JD{(sEe-r{Pnk`H1-b&<6?>kc z&wUMjG#O>pvax5@|L9!cV@%g_7vkf)E8%5V=TrQ9Er7>d%s|Jss^lteg~z2&ES=*= z2QCw^07aZ~=^9l1B9+~VZE0gu4d|9;w}!r$JP{7+>gul797X>BDOW}oaGegX2;PJ- zQQQIM3R4V>j89s88pWEET!A_Rrs0J@uq)V)(uN{lny0{i+_pqw>=- zrE+xl`|nC$xCd#Q`P*qPiYv%QKiiKgg4$;mEwT4&ZdQ1z4On&f++(6YUe6I~sLu`Y z5894@r{VFG`t9^e*(###gVI_~tVpFKy6}{~2NcII2M+Lc3ub9L&r)L^){xbR28 zz??9z*>aZr)%bYm3ug%+kVyzJ=AiFNIDUL{D`s9nN^o6546>4p8m*%rAxY|#ZJcG~x&=*fA$z^=QW#>!5Vpn9> zbuGXtxT>UTM1aR>Q*O~TQR$xZ8w#Y)bNA<+ne?PN9ko;n(gm>nH%!&{{PFv=6^N=G z9#oBv$azc^s6tHArF zyP?QgSHT`#F9j(n87A4n7qR>pP`~%si(8i@S=H2{>QSr!HWmjpysMR@qr&#Ft@ja1 z&ib)rQGUZKzUOG^f?g@p%sMtG#9=gnAq!(i(KCY{Vx^OHsnah>Y=ny%7fo234Iy+) z<>0(XTbsj^Qh&u+dN>*?|H8{*d*8{(xU8EY3!C#{-a5punrBb;4~u|-=fO@+?Asx0 zz~Yg|aYM?_4$#QfPHX7UghZ0+R+@%4ZxIr+!Pf%LZ!DhRPMhp|GW$uR=#@z;4FY^49=6*Wj+PsFvAJ)CG|KdCi%<1wN(`%XpN#;K zRx4ng1^$f|6l+yscguO=65YXY*PIiJ@8D=1c%ukqy=4_K@cr;;NkE@E&Tvy?@G$V3?HqC+X>i{G$RdlQ z$c}%g735bLG&q@gHgXeXd7p(dYqb@{&%fXm^DXxt!0ae3*;jSI?X9^u8gmT?+;xWl z{GsrdQ4>CnNuFZSXQw0vmT2?#&QDwb12g|?(C)_>Jy%9B%GRR7Tkiu=!@_3QW4+7U z+w?gXUY1S7W5sHO^R0!SI0_|>uFYsgySAOZwpLYNE!xLge2VicJ04mz=b8Y+f}48} zp5Hl|HvV47E0!)ddj!Gy=^gGia8u8Iuu^8eGO=$u4Qc-Uc=src4I9ET92_Pp?=d?) zeZz$=bkg>HFg3~d7ye2dAff(k1AtYvng%}8S)PCw$N^CF?+*{hexnRhE5KUwGKcJ; zFZ3ziNk6)jRsBqdq35OsVrGzzuad5J=yWCiZ zPKXwW_rBHlK>}-+yHy@37=Jcv`1(H3F}BjQ3eUiM{3H`{-WenM2!dpq3fr$VnZG5N zpIGP1azBZHF0&YXy9QX0GdNnSCjp|_^pEIKtbo`np_b8ZzS3%3_?7y0dh`xq(mGjE zkKEsxE5O)#*)Zxe0)QjptA-dIA>3D}#~aK9`e325@1w;#7c#9V%z4u)BhJiEC|7eG zo7rs-Z_9YTG|H+Y2UL4ZpOV^GU_q`N7!Y1Rd`0SAur2``c*Wai+81C`Vus^O(rUQ~ z5}PO$4L_xR(WCL%J05RF{Rm6=p=_iRfMDptZa-JqSALmtAl{v8I7T(b^2?hFrTxDI zHur|@II%gp%hD0U&bCl3CcN&aOBWDeNwWZAoguVeTFWY=$yvS=62Ucq{!zoAq{d?pK$A21jg zi0jKB!HUNz3^j!Xy7WsCzxg@30zft`3M`*A*f;3Vu(U8L=Ry!XCUCgYrGu-zyCW>? zE$?kSs$H6Nv#edb6$u>@f`R}%YE0zu8sc*Gaj&HP5omgJo?-fq-8fGk$76^^rKK<1 zIcLHmKqeE+zJOv^86|@P!>S1Ybc|-pPFp$xpNP0BfW|U-#6Q+4`0DgVre0UjdlZ9> zy(QKg@2tk*&;g~xro~b4BOf6>NG8bIdf!QgNUr0G%-)O;8G{Ot@Uw{K@6)8}ZU{T& zKp{7^N8o+WibmO3((qB>hFX|SH!qu?)I>ML6G%4|s~WSTcQm8EA`cU+R7oL5 zA&S?2zmI=Ir};Xs7(vcw!XFhN4H!0&%(Y1Awk;vASN9D`%#aYLA3EqJ*k>}{hDDh~ zHU>LG&dI|GNf`hm_o8K>hxN?i=JEJ)?GlP>Vv&OH{p&(qlbn)n7j7t!Ai;4wB}~v) zl(QN!d5^kDnE-q@OZ%>`>Wkyxcih$3OvWpoi=fcQbAL1~dlN56pu7gJYntuMJ9{7V zp^Dfg1=$>I3ZiAy`KU^_j+1t`fiV#tle%V`uID+nHwnQ<-KGIb=^~vXE*((1EXBIy zLEO2z%f*VNZjIX#a;fF zn{=WTKuVUAnFA-y{zu0o!Z~0TSaw*^OFEp8ON!gnnx0|P9pH7;(2G%B3lLp*yUKoh zM48U67Euwd`3D|YuF`E7#C z#Ca;qX(+K%7+Lzs6SeYW_;vD0wGTT!;aeVC5N${{Ab6NH@LT5YigF(9M1SB1_fs`d zy`bD-d!*Crd+Ppo=kk6S6oHljp^PIWHy`RRcec~*mtqK6yi2>63dXG^O+)&8y_YYI0-gN1XTJI| zfFKr$cBjTVN?ii}&7a6@#;dDrIWv(JymVw=%X&G}^Y}UbcZ1(X2zQ#=b^VwY2VMSf zq%T4Wn(`p9z47o`_Hbv?tytL7K_^=GBzv^`MecP!DRAc?o>4NH)6>FH9&5YO*pq)) zk%cC33a`WN7c*?YHri}BT%(+?bR{t~M8LVMxX@`cINY03xAvx49Ue5#)N|$|9=$ED zX$0O}KgEv1^8+tTknqm|*xi=FTwt3d;8#QrKz->HBkTl#Ow)_H1BSKHyWoY7V{CyB zfN$QUy2e1JYXLa2H!#VJY-GIYB-5z)$OiW!EyB(M1#q!Vg}4=3P?a zO5mE%Lb$(}epBGt#-K#0IMK%SI>t+|Xy~N;Y{*xa)PYZQ(S@ySqprI2j7i?@q#dyU z-S2L3^-_X5vlnU9J5=)K%R$I79~{ppqYxz~>@GA+7Bux;!ZeIbILn&o*x{(xiJ+Nn zob&@nh!T#wpIaAEJBC%|5S2Brhd{D>a^VyYdK3N` z$K%aHSx^2m>(@%t4<9I}M79NMFutG>tT6s{b#_F~$SG z!5V)0P)pw(HEU4gm+72>cK_JT)99p^-t6hGKJ8;us6=zX1LJ8xZTt1~?+ zbLpZtaEn$O`%<_ggv=*t{!E(3eZ(f6*DoGBW~+h1c*_&kYHLXQGVy$*?(yNawws1N z77m%sY6`d!$GSn^$(YeP0=_s+RB&i0h6Hl6`S+gB`7XjtQ%4R}Wt{zQK;O0y4gjyi zb8OIBc!vTNWSe4#ZHAE51|*e$SCwH53LAHLJG+!yF(&y1!}bM^oy5|0TIz=$zFzvp zC;!o6eKnWGo~p1?Xg5;!le^r#R_JEG))5j8Ko&(G-+ZECzg$h zdJzwL8-nZHqbhgXj=sxI&X+736{=F592kmMb!}8rT0?E6i08)Wr#q8N!=S%hbd<>R!Ie z*-hanu>{L6R+l>asa6c}^e3j9> zai~`S?#ju}PUU6OSbO4V>;Qh#6HcA5Ol3P<#XY)pea_ZG236lXl;M3`MXXykj@3I; z^D?Otw&}%OTo7b!v3J&ZSuI_aKbIYWP&-nqvq-mSYvQcQQWc=STw?U#YBQCRzDUoj za2et8wk<7^cT<8+ilth+YWv#pG2c&Ml4rr8`0#*sCeBqC?tbRns~?MZ&y_C2B_S(} z;%iP!pBAVD^SaQQErf7XGfF#ncZ0CWw~#KxlX`UU-F7ai^Q02*ja_k+Rlj@MF@e4% zQjoX}vKf=9lJ@m-S7kHC*;(wnu4C4axB*V#^qQG{mNe2<5=}i<^;-<#&)@aqcdQ8Pg6bbc9v30Ayjeb{aocS+FGop!L z2}gSjk;bE_A}m<1n6B6^Z1#R6k(hL#(kAt*(|s4gWMYt_q)!kv##l|YoHgE)K~leO zF<$DNI=@~Cy32qp{XQH&kzQ%X7fiG}=8`yh?7m`~br%S8Y(lKv#3AXa%X|CGeI{71nG|UdRfc0cVZV%s2PSyH_I7P{V3|S4ePF;p7G!lfTH=+ zUrzSjD;#O*ZroSK?Ur#;2M85XRAlX1VZyMOC>4P;giK)-MrfGWM7Zxv!fCR#EWabk z9WPb3ZnwHB%^bL!j{s)dBVH?B77s^W79&G;Rpz5B)}lo=9Y47-A36i$1rMv$2%WFS zdYrUgRK{>;W3CWgBmIuna%V#XVPIuq`+lmJ1NG9f+1|##&ZhUnB3NF~bpLg<>S}fG zni*JQHT|KlV1IwGk>7eMI2Z~y0K*YlAM2JR>9gnLS->MxCFOZx2i|$*cdd-CQ zD-R%|QkHZkcm821v9{cW_4l`6lXwkm``y~-6&snxQYlBLoeM)Mj;^>%+n-?i3j3Rq zX3ne}qJdVVIzUJMU5*N)_`Qajpqw>S<#{eXtI8)?$K@mI@@T@K9NN#SH#g|r-BB&V zDOovgys=mq-{FV9-_r^2%6nkev~t7Z+;}%U>LzNvZf!-($}5RZ1K}p+z;e_mW6Qj5 zeX!if$%`LUq&c<>q|KsNYNa9BFO+8;I5ZMpJS3orlWypJpQ3kB3N6Lhl}%YCqq&!b z)l*|Fs{ACXmlP@Uich6l8KrAr6U#D^3v2Ux+x&nFR(_-lT&98*?Rp7LuJrpU%BsT7 z^F(K4M-^Q76z}33Tq2chFEf$}5!n{ZOf%wvyRe{JdWA50SX*%)W$z8_99>maZmhE$ zp7&aDAr>iXcnG!AJQk$aBEwfbbM_DZ>{5~8*By8*{3(>}guhDt}@z zp-JF#Jx6 z&Nn<#^$1zP2OHOs&VIP9cY&N6c&$z!?sp1AfO%}S5yBYb>*{@5l|?{ycJgeaVO!}k zTKQL-+R##}&8#N>BX6mx0s&lvWp!EmSHm=89o0~0co(moMi zG{F|4angC^^b=N9#EslDDiwx@$$aKGS;X>K;*>w97#;6^-e(JM2e_XM>lv_bJ3|VhpFqM; zzS(<(e2GUXA&psZ6nbG=JOpQNcBAt~nbZ_+Ye|JVjtiQMDf{|||0IIhEl&+Uo|qexgaFsJCVmoJ9R$d6tPaCHwK*Th!E% zzb1&ugWr9y-ZqIZ_RndnriXDib%ooE+#a|)?;9e)Lb8dCC?GPkB_n`9J+oR=Am3m% zuK_e$bf84!Mfo9Di<_m4t*Uk$p0}ZQ7A;!kDp!M!o|{4C@&|j&aPy&|xN*~{iI$yb z`mZ_&RT8}?=ry=ul+YZx$3Pd-0KTZZRu(blv`*^+EX5JwQZz9?sGxmPubbem{<&A! z&-{~T{FVKfo!eoeSWPxAS8cU&mL`V|Z|WxmWJVT@$<22N>ar-hvRcV1yrpc_IYsgU zEHquISYz)rrT0)Qxp#?_jsxHv63OFSNPGa+2<^!>#IUX#QD_a;wSVG5Wbh2<8-LfE zA+EA6d0v z1dmF#&TLSvy+BZ2_&{~banHYc{^U8tinCpBHI8S6{t#5psUCDm9El4$u9?csKx|RI zYxc-AjCULpN2Jle-gk@PhH*?p!t0=g)q`vgJrh0DULbZx>AQDN=SZ8lzwD$7iC(4B zoZv3%9GEVW(8ccDtEXj}b%ZiAb}E;(i0IaD2NWg_6}S|lt29BuRCY5BG)SbZGFqdWoP<&}1O zmvLm4(`u5&%Tk!WrP>d?Y~06;WHEiSi9*#ugfvxn@vA*B*mM4Zgb}d*u&yYyreq2V zZ$^Y3U4#7BjARJyp<3#sPC{oGW+xBp{GVhI&6s$`pZCCZpsy|WxNBHohAZ0NXD^#i zYhIyh^;G1Gla??ha%ZS4w%BL-^!c8wjLzHScOT7ZshIFz29%7d!?viJE37~^f0jD9{mtFuP+X08gsMkX;#8?6DK1-Y{ z{B@Df_H2c6LI*+P)G)#NWEO-aohsN3#u1 z^5D;+iVPDBvp7|20*Tp(6!XX!?;_-h>MYuXut~#z{{jMrrH*xN2sk@%hwK;$?O(Oj z>p|;pm>C<}O2g_uNuJ<2gjp0*u5deskn3V=ZN`!Esw}+FLXVGWo2*P?3?`A|{r$@z z%k0MK@N2CHd4iTS|0y#FFsma^aEhjdG9R5r(d4iB7a%bpH7!Yk5I4;~ZKul=o<%BK z%T8j7cLc^oJ@eM7XF7f8q=IvLCUf?P!2=`GOS8g=g5D$B#<^6C(R4ksONZf#)fnm9 z-;okb^$>QP;cI$vZhIQt_o;E}YxmY2(@MIZA+Bxh3C$9M*rx_Q_WT7#yxR>{!Cu4OfKs1Tdnui zs_Bkd|Fo)0T-vNp#8O-*KKG7gj)-NI+{Ud4YE(o*`l{66;-dWFr+p}N{c#;^tzkXV ztRZoGvA$eB1IHgQf9Tr9*{(ABp?iYaz%F<)eiuiU(px|6oG9s6Iyey#Ya^`YyGK;NZvXK@k?c>&`$El_5AWPjAI29tYZN4}RHaRaPK$gewf%}*cb ziQIKJ;NGSKvH!1>T`yd3lvwlC>)g$31Rt3$y=ZVvlM`O>!x(l+Op=pPHd3*kX-=oY z>VGauM}okL;ktEP!qW#b2}i(m2~nVsqmu!yNAfafR|iX`dcHZtCqZ5%ha)fXLni!1 zS7=82syOO6V$Yf@^wVS|d~=p-(MX2Tjt`76=X3H0OazqZ(~Q`2L6jCFiGO>gP? z+{M=`P&Qy`_}+_v4$5CiPVmN&N48`5!wMhDQ774AT%J;2B?7_@9650aac!o^<(x?( zV{M+yh6fo6rau}ab%@H^TWEP zG+*N%4z#`m?u@5homl&EqCfr162*|>N@h6KQF3OQZbUOAPnK|o0zGqsj>0?U zymoH2Kz= zHP7j|E-F1);`LXqby+1<=^L!QbL!|;_ME!yF)0$q(P89N58X(n3}9`U@Y7xC_YbEc z6^yg*!~>J1&T5QB?;X0)kz1v89CrKVaoxLpyxerjsF{)yHZ;ty5E5ja+m8e5yg4;^ zRs*VM^T0RleAE;Ta&#E>;%WIP6MJ|bWyJsW6TljRTV(iS8@Cr!LxlNe%^Ma-NgGE;RUG{S^Ts|xaNJ_*=tW;{ubvgR^=9^ z9xCPYHsys$Gn*9LmM)^D)Z1)pzw2huh~yE@Xya-IB=2sCUEecn8FGB&6AEfysf!hz z-*-DbcV7@`G1^}261HlPp1p+IUWZI zG`kZ0Kzc&wcVrQonO8BO+gVW((aRsP-Pp>HV&3P-_K~cjd()@$p~9l1pcBZ3B$=yW zQ$20h8?sR@bhFR8+*Ufb3>Fh^ih`}~I6WD)F>$FjHex9sDPMa=R4-KLzqW!1cWi29 zR9=VIH)7dIM(k_|K>U)0=Cn@Wd4H@fBnWFB)DcmOSg(fycc1>}J5Xh5CwI5YvUn}Rdt*EZZ zik!vAvpwtU%WN9-R^Nfis@>BK!TLek!BmXq(uFCTbp4!_j)qkSQppHOogiDqu%-Z6 zIdfJadYxl2C^5^^y8ehE0Cy*YGI&(zjet9z|2Vr29l%A#bb zSA0?qK3nLNUx-L4VK8LZ5M1h%cp6{Ir!6ImW*Upf+)-&TY0WGMiTQn$JZ~H2Is65TKe_jH%8Qrx zTakW5r2<>sMoLZmHcj80N)iMwIUf2+?((OtJIo0E%2J?1+PtO6ytWO_bDo2X14Lfh zS}pCjMi89A&KVsWEVrh^6rjUk^r}i`0=*F3` zYR^(1b;$(PPV)Rr|Mb0BzF3HLC*#&Drm8q`oWbVJ_9RhWjmVYG@H^10VRX1A`f=p) zBELj^RoYOjHQ!7OhTZrtl`Y?;hntm?88MN-xK(;Y)|ai=O~+a4XYU7=PO`K!pfjJ* zw6|_GT4>wV(9o;EXvB*`Q6=+ogIdO<>gpcaonopfxOMTI9<5^qfJ@DWK+2*I7DrVu zkzuor^d*xz?zZ~wxpln`BpL2BDq=f}PJZPRh^L~|P#7+R9O_fK)^+yzUKQ(AdfW|M zlXY#0@cn~T#yQFS^9<0(b%dR8{?Ox>{MZNZ)*p-YpH-wW%klWjU*ay9v7kkJ^RmJ2Ul4tzVSvRBHh&d}h~~^_(2I5*(S}Ys+z5U)S-pu@&iIi?(_;7^Lb3z^fm+2Jf7+PN!~sv@y5eZp8l4f~4NGP=K4;PfTaiLtRf0+k zDNxI&lTo9{vkb0zlOZ0K&T49&N5g|&+;#eyHHYU5RnxDN4@DxBO<6WSz`W2l#r65@ z-bv>C1vb7O8QTsXZi$;5OPWO}w)nqZ0JPZ!bYk4bHs5!o8iu^L#pNa@(4!aNx>7Q8 zd`7oqtXqB3@|@nb^3;K@gF0Y5UOJX49vjCC}&c_PQg(1}!w`Qd_4q_~itO+F!A+LXe5X;ZB0Id~62TkJh-OXSZRg zVJN<^@@klz8WLjH)c331=0?6lVk`8;|}d3|3U|IMWT;<)H# z*g;isQ^tMD-CN|aYAyb1v`84FHngIb)u5AS4fj#*>DOBg8j(A9&fX84YZi&G8jipr`~Vc=T+831O`G+8wZ~4RKFt=qa3IxN}Wxs{@i9hbiowZcu6>v zGKWDa&_CCdT|5mj1=<~_;op8OQFu;gxbcwU zQK1tgwmrXX*Wo*Rp^;*HY{q!*=P=mEvUdBs%Q5Guq+wF)Ul>O#4D;+p{gQCK>Aau4 zu#WrL?K$Ib0(0O9O$J+>u)o}&A-`mTe2_90gNa{l8rWNSX0UP^H;Wkv+pay{_Kk-8{~_xw!`h0vb?pR~;#P`N+@UnMLxJLLad#+$ zAjOIWin|prP^7fDySvi@#XU%Icl#Fm-TT|uIp+^Qa*>r}%`xYibBt%)4~vkAlySsm z=!gNlmYVocMfXi)r0O%~72_~vv) zw|M{$k%xge=BHsAB9bIDWrzBz8$~g0>q(dalwghkgbyK*?#tJ>M>G2UXm3vV7=0>y z%fZf_zO0;GAAB_OSu!pPu54mS5Q49&x4`i`hdzT={iSlcV>AgRLRK~5DCxv;!$kYj z5@x435*!nZ0Yj(uFcAbA1t&^pJr|*VFR!Y;&>&%!ZbHfQp+lG39*>*Rh%)VOTY6K4 z{I1ia35QjmVHbO@?WK!b|0aGf&#v^l6`pPHjFKf}xk3w@4oLVhx)aWF^?e_k$~QUo zS3dVeb)spPVD6SBvA7HsmT$hpRl|2;9XIBA6;>%8KkJT@!1Upx=HIBR*fn#vC!I11JTq z(l?sU{)?x5o}Oh(j_u9*DOD}`5{G1F#Z;)`H`eQv$vaHbR&5cdf*^b-QeZKX%rlQ% zs{Y}<)~K%=*hWVIom44auItn6hLKo1gZjg}_4m^K7RA~cA3?I6FMF=A0jj~A&WKDL z)Z)I|*QMIMea51j5{GBx&F8Hl%QTgcBcv}ZU7hHL8eoRCaYgbXp~bKKN<0cZ7Z zCq3}rKVI7#>aTnupE%l&d?B&|#$uSFa7~|Bcb)YZI(H&270ntU>Gvq+l?TW51CE5$ zW0e^gVUm6?LSUY{P~VA=s9p_-u8|+5g~V7?4Z@&R*^Zm{c-71VnOSWxB;5A8ITq(< zb%De0b7&8eJtbXxGVgW+je)X)9os~E?hk&YZE7u5xJ>t-VNROzSQ#5opmahgQwclR z0qkbhA?FJkQVPp@B_&a`u z_T%{)4D3v9zm(6uzI#}bE>Mw;-FyA*H`OF=T zoc5ANJm~ozvO!X^<1@HD*dr+M0>fV;<HE&^&zRzQQ;S^+{-9-Hg6j`&| z!!aIgtG%{KuA&m8g_`Xj(tCM>d3;Z_(I0M{Q)EU8BX-3TBh-?!#t>ew8|H`BJe!&x z&}`Pf(Xw~Gmn3_%29a;jXU&S?#Wn-96zs7P%=sYRaD z2i@}@AbE8`V%1=?`I3(^snek3*#iD&u`Ifbbr>LchQ{_1*_>Pm_Zr_sQ!=kdZA6NV z20iQ2w3njHqldkhT8S4+niMfF40y0dycRlKNPOd()F9e4pr|><8!iBMdIO!3%d_0e zm#AZ;G3GUev=ObRb>-I+nZHRJ%`Z z(N>$GC%m0hEoY$*A%aCojiI3{x78pt=lA4*|M*zY0&b>6nH{AN90=$+-7T()oixCMkFN5(u&Hf8>VbmA};n#>zcoe0hU0$GHZk z|IO=3h(d|O2u_qI*w0>GiC&VuP*p?#aXJCS>B1eQ`^Nwm867z@(~rk=f6V4Kx%13( zw&vHxJJaiOh8HVu*EwB}`@BplLjKbGw6hDBuXUrM;rQum^Ar<0CO^@=v1;)dGCYJZ zWSYHpk-INX(#wg-H}@DetJI2=?guOjl|k(VcFl<5F@~Xa~V)YjDmgzZA#AJoG&b>GV?d zaEWcPnAtGG!<%PgTCdSIAocb7bG=HoEXZkay)hOzeeh_#+E+Kvp7Ciw&a`JE*>-|L zSFlW%)IqO-t+?@G=V-Xi*U@9K$hcf`_V;A#>e8KKd!J2@2$QkJyXfjxyw7y?~AvVKzpv9~v<7{U@@#;cz!iQ-(&F6hS1RI6eXiw_W-7%-2ggF!Jx!+tk(BlI z=U8aHjYfUyGa!6=nQbGmLqm&pXu5Y&Z_KQkM<Jvf}I~T)i+B2=y z&ucgjFQQ-SV)qtjMBZ~31%Dvt8>;A(CcX%nmcO-YLS9Q@RcGRsawDU6;%amaE4U>Ou8ZWE^4k4^_ zl8tMwLz2Me^E(tXa$Z%kVo_=9>r$DOa$0uwLRws0Qp+Tys1P;xR zZ@s)WuGQG@*>Y{Pv{TWp$W_3W`ocBAoJkL&4Wf|qL^ zUpFQSsOoLL+BpXirZbsP^01o~IKypnA*8>O+<KSwOs?B?%5Gq>YK@W=;cxg8fuj{0Skz(3o4F9X7@{Cgp z$y~63TMP_Udu_Vqz%}1(&L_GW(-9A~4*iFBnr_yqHySC?Lw}6+QKGt9e_SzdXkD1) z&pHOGzeelo7^w||FFW9a-rqnNVRyFQLt+)33NG$*3msbB4H}#n?wXP=9W2pDsV?B*GQ3kA~_cZ|AyqM}}-~_HV0nVUYaG%W{tz zmGAXSRv8whr{P?WKJUcP6TOd8Z^EUN=iEUp5N&{Cp_TwQo_7VOrlR? z8K8GZoawD-$VxqBy>mU+tdlEd=Op$KugQ#`bKh-JtdSf0csF--Ws+R_uA3viJlqjD z?LzCK@Mi9&j@zS>`}!OzHSW%RE^Xsh+BuY_o9g7 zm}onGzmlQJ|I#WLE~B1=jL>;URO%e+$rhZ2mr>^1D=)v22Zw23xBukf>=77s-4C7D z;X?f2@Cg?^2@Dlg(vCl=V)?R_jA}Mb&!mcnha#9bM^|pQ=G;U~3*s0=Dzf=Pe6NwM zyh~4tXkp9y@W-fE$6r`u%Ip}Bd`3GPM};Gj57;+tEjZ2mVtn?@CtTO1N)=hD5As>CK=?nhE1Us*Xydrjhu{bPHLs&`Cd@J{-U2OGH@H=(Ziy9#PU*t&g^e4|RNyu8>8?%s1BJeB(>;w*{hL*LWB&%4E z{pr6EpTt?)Dup+soM3%0h()E=yT2A8X3=^Pv;M7$whdAq=5753sU`fR*g#}(MD*k3 z_PFr#hM}?^R0e$w?C5v)#Fp6hMviSGK?R~4ta9YeP0@UvHVGxJFIqXhjFt-ES6ZH^ zQ%N2pA`UW@v|Kv~Pv?_e4bQEOx`K=J2V3)mXYoI@Vn{^$eS5=ZDR1NVr*@vly}%&+ zP6C1QlX)u>(J=R-f4(;#eY~X+Ags3WA)3PkIwL7VN8Uu$_MGD5 zFgOqyA%Y?Z#{#At-kn$-BnG)zSA<^0)Zd(jpk^Fux2n{Sf9BIh@+;Y)#EIvw8Vx7p zOh>yaq-QJp1N$eTDeHGyYIY`nV@Y+ct7S-K(=>v0l;JXpNprV?{{9_*RJYLaIwyni zd7(DRvh6|AA{ng0VfCI!VPWmv&4uHr*XaQ>bXmf)f*w-n^|&ydlgC@%{Iz1H&vW&* zdRd<&tD^p=*QHLQUKJoEnf_eZz2)+f&h>^|`Z6g}UtmBuXJfAu|ZCJCf;ORJfmv7sn zlSl90M=pL)1x$gb1T~xeXqR_@K=MGS6>CTh zr5~wMtZx_0f*8DN4#Pl6m|5srBA;>;w=&UZ!ctZt)ju0yJ4u3CN%dAq>+O+po$DZ( zrG{-VQplVbny6@7n?{l>11(;9uH#T$Q){KDVQofjW0 zoQ(j*13x_NESx_Nb>otMFkp0WD@A|DHlF({XTsf2_*Uzcg>J4?U1D_Q@J>IWKuvB5 zDzW8jU0G*ihXV~Rocs$L!YG@=*KvuZcJDZSW6TM{YLySba^yM$--AkWn8dfX`zk^c z;fB>;8W|`zdzd14_n+zJb$u);ZNcW-4XDeLnMl{q{ti90wdrGtP#FL1=ZwwSupryO zNDO8CW}x+U?$>J@kxz5Fz;^S?ccCWtgtv$*0 zP5oxCK=)q(lV5(H?zUr$Nbx=&eOc?$w#Uz^o(;cR6|0TVZhhch6Uk~-dos%H;a;-6 zz3OT@B=qx*F+`LdH0Xxbh;oJjwMl9Gy1Vi_u?Ea_heeGSlW5;Y?U4!x0Kb zL|P$K6`;UsO!;G65&h0^`7H@8zl0-F%g~WB{@sEe_cWJhdaM(L>(y?Bb zt*KB=Xl!LpV#s+?CxIp-XQ^#&F+Cn>r4AcK);$5Y47h`pX4eQd5_$eJ|L+?X+b9KT ze}f*^9o`%~`nqyl7=6PUnKKyi=465_`_s;Fybn7O}Rrqo_KrH#0{rF4_>i1`~ zIiN>X%bV^c3Vin8-X#YU^my^Ta~*Nw3&G_F7YI(f$&5RJz@Q(`0#6w7myu*9yhAs3 z7`g=_N$T!Rdqi)x>4Va3f9x(9k^8|456%B>_bn20_S2OnproSP#ic@VFsSkAIF^Lb-mwC`k_MzNo zY>6_fP;!6%6G5uXQm^1jx6h%&M#FK^Gzxe|XGgLOzS6=i!S1ke^R$=Ph`GcZ2=CIA>?tJ2a3b4b@xf)2OqV{6|#kW;m zG#ZL-Wj6Jm9@OaH^}M7Z)*hg8G%RSfXlflKzw`kuTmjs}$3|)}<2-(Zpd=6J`J}%$ zEq4GR#xIq9LL1pDeBCmV<-nS-Yb7myXra;|s28Fe@fD6d*crvA9Z=0yEkh6gK$D6{gnp5L~15dzD#E%FF#KHvF=u;Q%_=1hI9;GU{2C_f{*oxFucJKN4n z%4fYo#V(eHsDaxJTQ9+6Mjn zr7^{_m^xm1zKQCZvTY|0_j&TTJ#ydNM;gCgBd10_m^??`-BS$Rs;PC#=oJMt2MNvJ z2Tm>+=Q%xT$K6kUxJV{9N@3$f*3}oWp{vl+#xCv>h32t?8hOg~uQlZ*(fN(31r3#H z(skO1Y@=zV`V&4pFQA3VqAlVfMKg%Xaiizwlh8ZOz7T=zoQRSSiMDqh4% z-}zD@Fcwm;R_b^Vj{ZywPX}Kh#9UKoq;m z-O3VHc_l&FdkH`8jVfgM#M550yPl}-f7V=pRgo4|0n)3thLH+zJGjf3+URcafhayE zD2wQ_r4kFfk&UlBa%+f_+eNa=BbLS`LJ|IcirLi%U-CD*OUN1>myPX zG~N5?-Ry2`wI~EN8JS4}U3fqC{40na6~Em>eZ%Lve>6QmDyMnAxePUyT&KO&bn?2G zzKP#YqQtjldU%leC5*l=kI22E7$Bi_hm9DM2hatu>0uiK?QF_gvBtJbjtmdaYvZ{ZL%nVc7nR99VeL^GnWgKj!S^GXBVBJ+QeyYs}5w|^Glf_<_ zwjN;UN@Il@T+ezSUS~ycH}ypy&(^hSiXxD2dxu&t@s3G#g?%p))p~O;8m#YDjhdL; z_go&<<9$kt>9|3m3+`nehz-AgOPMts^2X9u#v5lHMw^1gj>KN_=oYekm#_6Eq5;8?RS zUv=Z$wQJo>;zq4V0sF6!tcsXREV$iR#~PI+&PJ=D`N2=UiRVWhH|S>OWEL{(7Co9A zC7d7(G4RwWrw1thX65B~tDR7{6y*Ve-vs$Yl}vU_6sN33xU~897G7TBnG(zkzAa=4 z`5U1NV|AjqN6`!GLpu}imL-rYbIL+oEDpUnw@)L5`;d6+it`NKpU@R~uJGVBS!^I1 zea6EbG>NuK=+EHk{&}+V6Y2YD^BtP{aF^5{wUcx7U=E?SyFh(Vp7k;XHo z(UKq$f)#)uJ|7-zh_>8>4Jc>%Pq0|#4mFZ_K${p>>tLklRLelW|4MLRn z?N02*r5}E^P+|sf#pYqap|*DwW0l?wr;g&du%bCmyIooyk>fa%6(qc(_$XHuE?z0U z`LhvQ4T5$0uQv}mK_iMkfI~9>vi|5tR`>Dx(W?Og=JxEmOF`~C%GVH=M?hs{J7*$^ z1RKp!ZG$x&H#!c)j6KsgxM(b2{RY7U%2Qk%`hL0P_Sg5Fl~D(z5BILjfn$`HaYx07 zM~;OpxJdBdp%^T(hqb2FJ?%D@g~#CM4)vxu;WOpFI;1J9ufGOu+`#^Z>jyz4&hyL7 zn)XF%tf5N(X=Sma@FSm?os1coP6=VA(txrtBzXBqC*E$0kLqzG65~__}2tGh8K|V%*q6t)CQ0V1hCtLxBdqjFkE%@&%I5 zua8UTcKodA69OD^Hu%qVXZiiZY!M9e)o8lGINx<%#n3|@%$h4z+$?%ttS_~g;B+$i zA!SC4F#9wVM_3xZxLERn`8CwJOnq7bc@Le*kS8mDfH=pjsiKj8L186Dvh(Uy&`m^} z!k(m~^(Yp;I{4S*or^@SpX|TaC9DT>oKuf;WkjGIX1My$$Vz?7j~9T(|bVwtR*w!doPy`>5elTXPYrr-pXN-GO0w1dZmTxh}c(5GvJ>`}h{ASF3ycdV7SxYdm>w%eVe!SdIW|h zsRa~v(A+wczUpthMEV3elY}cBp*W^J&|%-?4O{g&wnN~*GC+;Hq`G-$6 z#?Zx&{HwcaI5^+?RFk#im^@oaSWK-USIBhb@v&A8tf3)ycyi7~wf>xbaDF9~zB?T?zas0iQ+EY>Sto$+SjTzV~Bm~KHK zcx~6`FFUC&{64OY;3Rv$TDbbrh7Ght1Un9G6n$Ac#E!Vj)m%rz^LCtB#W})Zpu?GWuj?A$XkTRDjYZ zn8}`HDp5yfG@Lkx!{+p_uBO0b6{(X+?ENOf64+Ejd1zu@Rh#{Cl5EmsE+J0iS1u!= zVGu(q<+)3pZ4o0g$v4Z#q0Qs95X7Z?F396(veG_7IrFYmnVq`jy-wA`TU{v!LM=mI zMIDz_*VS;%^pL)0i_3Dezvrt9Sz?d{+s7~A!bvdiUB`*d-H^X}utg3GLc4yFp|eM+ z7(9`-9i0ZZF$X#={*Qv>|8;pd0+jUaZX}I|bOmOZ=Sm*9?lVRhnAZvp48%r`>hhPm zpa`)$O}7Td>vD0}b%+Nc0oVrwxYc3LJv9j7uq+)y956mc(P7~y6xK4bPP83b!?0Ac6`MHEWbZ=NsV4=UkQ z?67){;Hq%zXa5<%RzcuQb$Tw&1RVj%st$Gl z;sb}7Jx77x-cImu=$OlMW?IF6r64|fV z3h?d<`aj|aQ3po~*)MDPb=8x1airLpf zrw#sRZTAk)FT?|i+xL8(XJAHI4`5!p zIO`TFyYkS2z*yw&o8?RW`j8f9v17M4YFWeuv%!G?4!e$Lm$}H-IhcaX;gk-SImFnih~aKnfd%( zbMaGQz;xQs9fw`37-P)mz{Ps1ysmkJa}_}Fe7*wy2v-R=eW>=nJ~?^ONOzC6Q3#Q#C)x$4}+%OrDX;}`V$DOi=LIr7~YJ8Qm zVrwz>JHQUt>#Z};z#8OtKkWy7E54Ba5|@@z3_(7okfyNA!M}*i$%&icu@)9|+X;Yn zb{Vc7?w23$$3KULPvt1`V+Razw>8uE*mH@k5yW6To=QAU=**TlxE2A>I*~xH1q8LC zx;Jn)4bmR~=O<^6lY}e-ueO6;Gz$CJrB_ILssc+M=7NCBRM%6tjN@NT?TxyL!`p=| zaWH#GVXfG{ty;QAg;Wy&6I|AuTHK&dnssm4HAMCy|GaN!Oks3As9np}>0xJb2X$~s zN4?fL138+Pbb5IVn^PwQE}>$&cC_7IHgAcou6)5OeG4(55G-d9GFeu7j{vN7TH4*K z0iXDgw!5J=<%qSy!*IU2L~YIKM!wqPJ;2Y(+XFZhzGrz>AqY%_k;0jGjaJS3cJ@^# zKLaa{M3%2rf3^OgskK38m<8-P)8a4O@QR)j#}B*qm(`0@je?6o9J>)IFTrad0?+`6 zK%O8QdH-e~=11|co%O&fdYU@+42VWU6C+gq`zLT#5Rj>$1N!r`F#Jvh{D`P8%0r^q zoHY)+j9)HspRVv@AY)QO#q(2U?JP@aW3}djAywxBE$& zEVu$k9v4u!d790pkN1~b)p1v_OS22$)k_v+|9p~ZN%{lSOo<2gb-OTNHB`!tj{Q~jxUq|ie;yfennkI0+ zOAd~{@8W$2PCGOw&nSN)Bp+HP^ScNvT_`VL)9?GZxc zV|Vo^DeVCs#2d}$mRHTtK8P_jT%F9U^wk^yf86_2D1VcQM-zCA;x}B=U-G2pMY=kA z1{S4BE|0ze0$#x3!s56822)Z_+f@&DC-AtKf}hW|-OslLu>%ZdZ(vC@0?D3?^6a&5 zBn1iVqi0i`hs>|lOdC;@@8v{`!-;Q5vLpWh)9ggcr7Zu@dDy??&ONzulXL%Rlb&ZX z1Aq(@a;n~v+D};B2%_NITKI^@Wc8=r<#YS|V%$9sjU3R?Jcoke^u{?WmSVbJe?ZNj zJwFRpf0{o;_yeamyRFJs-hbrC@|8evXn(;=zh#Rz$eNY;W!R|VaBeL>AHra{;!EX2h;59mL5>ONfJ%CKRf}bw^=+o> z4WQJ`TKu(NfHNu;Hqq!E4Z(<@#8@>aq$S+tdUQh0e8x1UROh~0c$|=R$DlG9<9n8? zlr*Ar`D?Rk!$!!(lss7~5JOEz!X!16CmeupL^Z-Ze~FM;{lR~Akrsqy3-j64XO4w? zNb7tZ7tp~sZRwmj&LL}I=^^Tgp9(F1=DJTCQ_%Ryb=qeMvh_p;VI@!ELE&UJQr8|N zo6$VP+C}o{fJOm{Cav$z4`Bkn5>CK|4CyZ@b+>9EPPar7IVU#FZoJRa@H)F#R7 zG0?t1S4;a_V8&(invZexHbErw#=1NoO09%yd9$96ymj+lI?YT5HP^N5L&2F9DZwdO zGsM9Xp*=f5BtIU-08y(rkjOQ9)n+YmJ3D|K1*nOX3{-4inV>_5FyWO^9SWIaj}J|4 zw@t)p=tJd-|8QU)GDsZ7&mxs|UY1+IryuZ4p2vQ<@~O{x>GkQmw|BpvJFDH&c#i1n zm`w4xVChr!9-2kKRA*w2x(BCpt}18Dd-K)6b2Ljg)qZuq+d98=sKj*Zu8LOcI_!t&z503e9v~}tI@c>U}do8 ztoJ{^$4kG2wQuSe-Mz*1S3`AZ9yU(QLY2KMF}rRS2GtUqff|9<>!QeU59APlov0C0 zHMI;gQW>2^5X~eh`j28(#CAn}4=)d8xiS-_Q)fqo(XSa9bn4yMyenZ-pbk*3)Nszta29tFm}V>VDnbE*wZ;OA-q zCK{Kdg5+D$mruK~1kE7s1i2UcZ|G249`6tF)I0~@#|JUAg^}sYbL8R2(;5tdrhdrj zn1dYGWjXRnB`v66e~@xJ`T|gPi9@dsxF6QI$D@5aG<>wq1t6cR1~KmSsidu`-c1 z)E$0=e*&Qv=MU>e38po}Hpw@M6T4m47x8w-BO9ajU+uOkVyL62;i_r2|7wG64bK|` zPfw||lX@b{-L;=CBu^a`ZkB-5^t28W^~>eo z?p3M%ddQ)BFKjxzGWHL6;@ed}KhsKiSU8Cht{wUE)zonKb1xQ1@rD=SPtJmG^>5U@ z)y15sUB582!96WA+%@T36RI0&7ud|xNtjjNq_b37RGkM`5XV5{tY*RQR0aCri&GXj z(Z2f&Vt4x%(@%&y$MkIT_qbJX)yNHz^wK!9weJ&*L8=_wK7(Q=(I^#EeBBf3}WU z$_5UXluqi!cLK}RVH&Mio8#Hv=+e91H`@hu;hg;Dwa@e_ymEWTth<+6 zj+VXnp{;g@mF#MYc-anzD4<~R0884pFvzd7$^5ZvZ_TYh7SK`1l^b|!cj_8`*gc2# z4E@8F=kB=HU(4xOjbGEDTkG@f$c{7+4Ly|?tb!x#4J=@SwqcXJ(FagC!p@qcYe#+8e0leJP zd;Cp~jmhxv*k8`?eCrXQu_USZE!Twiz2aw@Z0eD2-aDoUkAT_)x5OX#oOP+2hX9>d z(Mws(;EUf;Q0=O8m}K9|epn1a-P_PD$Qq?KW^NPE>iF`@>bQfMsFZjq2K3mn;;OEF zS#w`rhmIQOm~EB9uGw0CL+aF@#&C|Nk47vOM|#>+OA-eqC%MX;yOhj;oIpe`OF4q=@_fXrnuPhp@$Yr z{dVm!!aG0V6iZJFae)(AkxH@PEj@~*fnWK0`SI73IA}md%tsj-^UHeke?MzCF2UIU zawF}AU3L0jmGWxi@jJG=fXK`e+52T;d$><+Zv>4g`XKAJW?*T1kn~==yx5M$wgAG2 zM$&>!`SDz+mQIBJ75(ke*0NAI&$>40-=}k4ymiHA{stV+fa65(e%9wQCPC4|bHD?H zJb})_C%wtrXkE4#XJidOb*fz`%OP_ttjHSVH}AHVZ!#99+!HLAfPe5~ldk>Ndv?eu z$-)3xir1mSMFhz=%@o&C!mLMLA!Y_L{ ziV>;F|R_kpKXca+5`HRixs{+&s8`sz}42R0kzOaEl`KM z_@jQ^X8b^G%VHc+kGfrL6N#kucDtDK#W2Dp^EBgU&{TkTE?56f0n2+DO{lZ&LBuC# z?P3*X|8j)_yzE5PW6B)TgSZR5`FoAd59>m^3UhPxSxgBmE4&{;ETtQkwjG9SZ>NO5 z+WLr(=3{bg6y4B85_f)jpI(&_zHj^FQm)=7So)Z}gj@aO$)CqbpuBQd9{nKzxx zHr8!r%3x9F#JZv`vLEWc{0!hN%PMg*WoR8fG>a-3Q8IcYpAZW?e2QBxkl<`apluzn#?-S_F zN|bpyksyhkm_uy~)!!gzjw5E26VpqP!*q*tn($zBbhoHL9BGMIleyaP>KEqjHq{L$ zL@p*of1e80knd&hL1W8Yyt0?a9QiYbo;#?Ye0cdJF3Z^~G*NYqG_ z8)Jw53hm+)9;ctKVYX~J)q9?=h$PKy!SowbjoGSAV;XZ87&CIq=hLSy2LSy#yH$e> zv<_-MJt_26vil19qhYDFsGWV%-`Q4Z;HN^0MzFf@o{aXL$c~(CiQVpZiJ?Kdw_e)m zy>p1=mYTjx&%vL_OC`^ukCNqdRiW^H+vwL*nWZMgNMF&7NY>}W-}V6t@po&%G|K(( zm0qRRueQI7;R?M9UZlF>Kjcj#YpvIH^3P7jL~&sRy}%IN-K?2SyTelZsG4q{%aq`# z$m>#ZsI!sdo#djEqzxJ*AO4rf@P8t+ea#E};e=n+Q@rh48PuHOt$W$;q#o2iwu!%yZwhN7=b0{;#e zNN~p2=qGBbB)Y16tP_pP;pk?*NpLlk-DY<`W>KnrOK!aYG?YVHK}*@qFOogf9u_OE zUn9dlHgvuvl60@t8K}8hJYAUsR}e!Of^y zwBP;F+9fOVTHK|Vyav)FMg~=0lq3lfs}PA8bh`gx0r-|y-1c)uC;|mTCBjf|03~nS z%roi|zwO{;TngTXL`ZshVrivYhdGb29&9CPa5Qpq%eRy8r|RhCM7_JK9{1rGm{a~d zg(1v}(fU^dHPNia>T*An#99?d>h?u^s_w!lNyq}Rd|Bp5+;B_HM2{UF+{ExmNC04J zoJls$hG9BdbQa*eS+Qu^yEitJHSgVSPDwK}=3S$q9N$6r95HL1B(7$b*3&|4Jf)g6 zMRPGcoZ5RB7(uRuwo(Kv&MTxKodbG}iu|16yO*7BABr1});Q@EhXT7z!mP5Kb%?jl zQ@%3Id8J+ye8MOZO)rb_GQ?!pHAY(zBCQQCBO{q$*1aWHmlj4jE$#NYNJ7U5(h1=8 zH&K!IS#qYYlio&>ovTt0R=%8Qda^>rE)Th#v>cOdn?6X`*0l|JyiiE@^J^W;4tt4n zh+jf_K0w8p6Ew_ewc%U?hvovaBIfl}HZUvF-XLDdlfKS;!wK@4)7WzU&iOaU5~Wii ziyxad-l<)G(`)yWgb}Ni1|m1AOJ8EyRCE6quvRxCA{Tw4fVKKZt~4s~eK0T8Z}8Hf z94DXK=G#|6jy7x#-`b}CtCR{g?iXB+F!Lwz3GqePW}JBY`nA?ZLPaN@GKj>YGH4KK zIujSvNCDt^#SPGWQQF~^2_oZUvFrGMA>Rc%Ho|A52^_ck-fAmKJ{2o=u+B2 z*1d)V94MS=<4;o3V@pV)Z7oC^8IG1n8m#;L(W!MrvF-k59MxA^e~obV^8#q|{-c zdC(k7{21Vw-0!oLb(~NUFOG9Gl$LTF?CY>hZT^v_TW@rUbJ<$FcP=gEHB$;mFfhNs z@Kho4cIwS0a%9S$fc(-e;HaW~qR?F)bnbHSbysRuSHcch$m3Dy@6D+28JWunc*r_q zyw_j$M2rSup3L{jF&}k;iArpv>iaq?-xh&AwnYffUFqg5u8a6FL4|J1(ep_%d zP*jczZ_0|j!LYD_FEaHWTkt{}9oCQ-dUVYGFYQ~qUHkOR5}uLc8C#yG%S$Jrjk_!` z2^w!)QQf750Nl$Gwoa9433jB$0`C*Kmm%|^%st$!wmvL)<$N?2&m-@0FiX_5&7`X@ zkbY=l%Imz)e}S@B$ggtFH>>X}>0by#!)AwVL?re9WXBJSuq$zC#vrySrBybS zW!O#8xX#Dv-|yO1TK=FD6gw4Jat!n_E+;{@szCJDz55iB3McnO;L!t3-IZJGqh=`L zBKjOT0?ICdhxl{8cw0N;IbYFK#yh!FBJ#OTyH$sp$pdM-x@gF zI_RWgC%S*rvLbFTN$i6>Ev44lY-)ciG3r6aSN6%={=)R#^ReCj)7qG)7;cD9al5X_ zW#cD|(C;O_dGoc8aLNtxquPmCOSGBZ=PSAjRav8vF`FbqWgz-L-S;LN{J4Mc?&~6- z1>ZVTNb_1A5|C4of1)fzqAxLkdrmR;%lhCu3z5FpYW35?J23kfON7Rf1k-Y%!vYl% z!RrU@SYX!a%i;#ey;R{OnTa^9+()X`n9^o|{JJ$(7|qe!{&&xKlr- z%J;}FLRVb-KcNVQ4)cX2ZnFO@iI2^V zR?jBgetdifk^yv!UQ=kOe+*;)8mOhJdvJ}9le^@+*Aue&9M#;jNEbqBmP|%@eN^H- z_{WMopPxDr%u-Zk&^=7k-Z}RUb)njDIO&D@sW~Mr;fP8?`6{PP`3az03E){Ft8Z}Q zMW;UviWVcxZa9v3`67<3^KaKUuY$YlFO#_Fsfe~8TbFaIY`ZgswX?5Le{s+K<22iE zr2^BGa2iegMGZv}y`;k^wvRUQ3dGl8Ub*Q(aiGrh{{IMj>!3KIFa3LPcL~8QxCeKK zK=1&Ay9EXf7Tklo6I_D(puyeU26sYm*ZhX>*4A6K^=@t5e`mU<@0>n;@9Fb=o}92V z;P?WL0(vI?r%W^oCGp*g0pgW)!YE0B__q2BdzVWz@#9QTj4Kr-APCf6#T4cqUX!P! z^R;udJ^J;I#b3T6x$*=U<<(;_)TYagqL|$TB0O%1lp}vCJ!!i;8J~p2yya5Qmj?&Q zFH8J0_(n_2w9|!HUnc)6>`EPM9+V+x2wP-D>{W)hKkJr;3CcT!6&LrLd7&N)84(X` z+T6Ho{T{)vr7^+|PehQ`6uGu`9EoHcfK5egjp`T*B_uo9bGVw~vf-Jx7x6KJo+XAT zr-q~F91u(A5m@)0nSMLTELe>`?6HntE`6+?%0-oq&3`Cc(~rUViiwTeB4^xGIelu(9k>iNf_-cs1nC%CTP)#&5F|DCmsY(0eYriGi(`0YVQmbYJ|t zh>{#ipXmMh6Y8un%r)bf>n)*8_nn=f-k>X7%WJA;IUXYb_7@V{c% zq_F7^SGio37c)M0Y_>PK*!g4sr|iKvSHY2A!^3Uuu=~-t^4<^*A8<@Mnt7jVtrswD zDL9;RN9^0lwHErd_3+qNWY)mF17MNnIp_jT2eZ{f)iP+jhezeLiBX_&NCRoRF0an-qyG4%KKmGu^g$O=CZ4( z=buTQ>IZQ%fo%3U#z}TSSA(@*#Te4CQJImbCzIXc*^_$R zc-3HN>|C{UU!E)f3hFOEct=I=e0kO}E-T~d>)Z6c?TJ*Xe1&6zhtO<*;&EL3hV)Qz z=x=$}V@k@|{TV_WlvL)X{MW2{3@obFnC{O$InyXmRcS`DO4ks6(G>=$;|qu~yW6Vg zBdp;j6?qHNm865?gVm%DLm+|NuQtEc7k0n44dP6)c&+O{9L{dt7#|_yz?pDH)GY*Y zrxgD|jzghbrhgdEP&FTq0PwwhjIm7*`|RBXgM@D}o!Z)`f`or{a=tEL;JFnb|0r4q zIPDS5E|~8Z8dRuJ)VsR%h%{ITKJOXo)TVIAoDy6>T$xA8wR2q{b0TXJO!soB&S>=p zRXy=A6FLKFOp)RIU~a5xIinp)nq>FVo$@HcHWM#ckP?nZJ{EkqjMhDV;_AZX7=d-@ zQbgaO8lV#%?ULuf1lZQrV%Ma;@B_=03jV#Bfln*B8Url{v?}zvBx`&tSETyHu1vGw z8#PjTUAee$*eqeuFQ&b24M{SfWH=G(oH!pduS zYW}mq3oaY&=dl%gcO6f#c(5leZUfRNOlLU#l!lDU9l4=NKeLLT>WNfGgp5lRY3oMK zsuY);{lY@#S)KjoPa!%AsYFV*=)=-MgEpaVsqjcrh=pKkXX9@(bdgyftQoD;}*eq7EFAPD|>g zJT|I&8a#@c#1>P8Q+1N{fEaBJ0%N{nRa25U3JmCcufrwegjTw80-;cP#X+6~eP_=B%FxQur5xZiiaV z8pr5s;tRHanlb@(p|fxN2i>?mB?X$)#oYU?l*Vsgh;q02EU4NRqdzquywBi_5?C>} zW~>ORh{076xRt2ji`wO~EzuC6rr~t(`kFB8)A~%B4jceTXcZiJS&e`B;CJwF&#E|= z3;fs5%7{Gc#mM&AQF0@d-az&FAs3rbLhH#Ut5$TdiB=`ezdhFnH4yOX-#!(BANpa+ z{mm_bUx~U@b0(`?qxJDKU10iMjacQuZnYE*52aY zP|`WPDCIxCN-Vn4CN}ns(yr%iUKhnR4^`SS0nYac5YU2`G3+L$EG*Z_w_UXXjncE| zKYp;9aCKxpO*TqAbv-yJ@X~~gL`Ip93|Cy2AMcTYsJ8HzV`PLapk7kv6 z<}HRvRAzQ3a&r}+;-|*7TWLwfzK8)caiJxn2gUhlu}k_&DrpgO(W3FecF*I2#-y9=d4*_-UF0uY-n&K#j?wOGkyp%bPrlBz7!r zZd9#8qg2s3?{NwZ^;n*0jMC;(jI0Fel5J=oBQh5Y>LYcR(X06lDS!}Q2eVXIiBZ?e zToylXAJ--smn`E1s#R7NJSxopYU6_Eud9DQ8;*ou)U71ElSw3l`jLz2(-d1!fTxs+ zylnALEm$$cTY*sCM(rwjn;2O&`tl##x7JDvYs&g-2UAR!~ zY;0QkeGh;69~X}J*q>T;VnIXVvUTdUvj}oJStYivTr_opSBVu7GtIsCK|MV5)mbJL zmwjo`?R~HsLbHfQ??&i!BoJFWR6?2mGdfeD_5E|2yd`*%KHwakyk z^AL+8c^@haxz`1vo|mx>A*4}9wn*Ci;t-SITyN7_V-l$o%Y_4kSBxfPnQkCma)OXH z50-Tx;Py)yTjq;TrGb9mha1@{S-EmB;B-M@#1x>nv z7|9m(6ryiH^K>K<#fwL{gdYKnEV}|>yOKosdLMp5{G2~Ab7!N+Aio13gwoY>&_j#=5MF3E%q_!&C^>D(;$A6*9dM9I}d=A~mS z0)P+%G&PIwGyQ1`R|z4_6?D4NoiP3y*!+%E@?-m>K@ru=j z#lN^GKS^AE#-x(=?zg#}aS8`24qp4S!=9@Mm(g-7im1nEOO-|AC%=LBRpU^zP8Ya*@FE}rwf5F5nqs% zai|i*zkSGj2Unchg~$)ET!3s5q84MZbOV-KF!!LMGV8pEc0gZw=DpGGRXi-1O{zU- z;_m6n#WlGG zFy)GU-xa5gX~G12a6fq@yS!7J!b&1^Q2_1V)8kB*g9qIEpU?I;wAIyo_&#k*^6U@1 z#q|JsQ>V}EGUP7OP*C$;q!^4Re7vtW^Kvk#0mZKnhIl29(B zik!F*+5@zjMI_oO2719&x3kt1JIDO6#9Ad|CQ7gwPUNe6&`z3HD zAr$crF+}X^!P@oI{Rqj9wuzDoBXa}B+w<$R3Ha;pKIMt9qA_<-bZFp##oAgIh8X{c#aK@&J|jA{Kk@cexWA)hggKLX3F zBZ9r+7)F#4Z)k1kbSO8dUc2~8JjPU0+4Zy#3*h^{W|SCik4metqH?TGj>Lf0R`+yk zMG&LsajMPS%p}5Ms{)w=)3By$4pH3Z*#vXW(m&ZuOQUVmj3ls{Pz$>A3pr`{;XdrX z2ZOh7gJ0}}Qy16tl{fXeW8_RdhW8U=JIinhf8SQ6gO6=0HJt+5kI6fWIbPv}q9z(M z)BzL*0adp-9G4PaVTEJZ1ys(GVT+{W5IEvPa~Wr zY>01&3?YGQwh>NmXzwqCiAP6l-mIN6#SLoL8iZ%b%{Zt03g$X1jH8-E*x{9p}wD45KDu~IYTysW_&KO`Oe zF!(2}f4b|nxV3#RY|)KxZ)=@pFhdQr)#0}MS&J2!{~E-7^oIO^n3D-%5KCz=n7p3} z^#j%sVSnmB_u~wZXGfnQM~FI5{dBHHVqWOQG6768{nXAIM*&wN68hNR;8y-QMY$OJ zx}6&#r;|6U+KaSH7hY!~sV>;n7(Kn%qI-n{%xUDs>^D$G*Ca)#XX#Ufft4WiHD^IJ z@me4MK~}Ul)1bZ}_6ogORgNV|jy1&z(#M5p2A4#;1Elya@Ff#4&r>q*>Tl6W3AH&7 z(%U=EKu%W?I-)CQ8dVg_RRQwjnoBP5(3x#HLub^MvL3dvJ&`<~N3CI&4Y>x7(uNZ` z(g?r|_=Mj9k|c_nRn|A~5tDnEkx^SOQeJrP-^vgY9Y^t$0zy`^OsPM2N)fLUdFMA{$SVeIR%KY3y@g*+KI^FoR(m*y+Gu!^+<8ICPyYN& zezQJe3$VPiRjaBMjDGVX+sa(oi#na!d+FE2T9ctl-6WB&4tpz5Q4fTaQ(VYE_N%NS z*$@4?s^xaTneUh)r@JIPO$Ayx@>Ku!3(WmPNI3iC<@YPg6J3kc61Qt(n$5EQu)+dQ z#rG8w;|KxVuL9ohx#tEq&88Z{eZOLH%62Pdq;f`N@<;leq2p_E#(95OcTIlf8K>;% zSg$%*$QhE$y%NO69W{jSw!uzBsoKhUy)IF-r|rN}DOeXB*@ax7)s}IIY

6DI8Zn zG}IH>daprSH^Ao?LD=fvfTJ?uL_@E)J~Dhp1`U?bvtL|3`lK^#bjZW}TRR}C>zZzr zHHr+Ydi%T+fgi(-o+Yidkh=q$m^@(LxO zvY=S)Nu2_+8BUP{a9$>@u(#V0L`8-p*P(sFU@q?70bAfE4D1>Mx|x zx(iSb?xaK>U+`1Xx;}75fmI}d+v$yh&gq~>fluk4hR~v%vc6C>uni#jIPO+MhtPF- z23sz8zdPv8Y_>}Cf=k)6&$?=;gJ$12ueC$CtSD;QU+WZIsp7?WtrBt`LLS`LmFsY<$P0i~<;`d3W0YYr$dMc1mSpz8#*O6bP>W)>lwp`VT$F9s z>(r?*a1b>8wk;tq%&a1?m*nrQ)G1-3O#IJB*R$Og{oN03NNQ~$5?Xxu%e~2sn?PL$ z{5#lltJ55US{MRE<4r#QxZsq_%>prpS-OQB;h?=8pU;YUQ^g`ag*r3hwB0q~2q-vL z(+xr7g`?@x&xCjYn=H-4cxDH_ zH`!fsZgK%6Kj#xBMomlv@1uyC7?}6JkA7fqV(};CZ zlYEg`AQPCl zPlvO?e8-7DJj2V`DARN#t_)02JfANH@3aWcVtAN*Mv--FbyEtp!o*BNhVS6)H>1C( zq8sDS4PZCEdeV|jQXKV$>CzC zNnwrGw~h*JgNWtoZyhHwcSij$O6bc(now{Q_Ku$*XM|d|hAVakz%{w+5@zh)9U(RZDXA+V|_#EorF>UM$R^=3q%w2&B1ri0# z?C&0DME(WcnVedlC2@8WggA2kU~SHpATAN`WkoelQ-fJotFFaqr>iq0g_lfI>Qj7f}dtYI~z=B zVS)2um0(Z47iykIXv2?FqrGbN_mX?GR(9d5j1c_&gCz@pDy2^0-a{iwcObWJ-#Z)b z3%2HsTUeLy;Ea5hmd`T%wB=(}*l^Q#A6MLSl~bEq6+J6pfp9R!=X8<|-f`n~EAk|X@X|8CyBG|Yc8JEaYuu-lK` zNrnXF2s|E_ljNauNWE3QY%522Pb&7+J)eSXzQ!bYVTu>UA?rG7Ye)W6I?hxL! z{|(P7={I(&W?$D00F5Ma<$GZ2D8F;AA-q;2DFvz>{W6G-6fi)$U{J02=C?{QWTKlT z8(^%akQCofiG?D}I&1+eqPsVamKtJ;f3JJ)U#mKRhBh!JYy{a5#yV^NN=6eoH3VD@ z#b?T!lYNX~iC8f=h(q)}IKkm>63;e@bBaP6ayp-T9bm|BVo{Hhd#r2>5uHu>2O^Au*lH(1yoKSs z{!0~0XhFpY4S(z{iM0q_y;~}E>6aA1;?N3cq3`>O2kYX=7m;JYI)$zL0xj3$k9MSf z*J#uN?8a27_`yr2_MwVQ!?J3q7<3D@^UL@lg%6MUH>G(-^Nin{dmJ3R@%jgZyEt`5 znKHVUGuvs(l$GL1jT&FZQipcGk_FKKV%@$~vO3IRU~Q_AM_!QL7osCLvd$~&_IFmW zW>1_zhT#!@(4kddvqTJPwdTNa;s2FLFYbpg-!5?!prUT14X(0sM+gZA#p&h=VPaQyG*jOVT(YS*TTI{pN5f$`9I; zih&F^V|(*BnEaatei#N&B9l5V6K$#Qwk zOi3~jv5yX@{h!%jJ1IH=A6`7pJcUq(Y`u>zPgzxb|8lwAafa^S9CRMNThQj`TF=F3_bH|wMj_EZ#LSLFGuzZBl@M%b!>aguZUCyS{5Yb{%bG8;NomV& zd)qhY-f{I4K`gC_gVGpN9OXG&#hV(2-`^8eB_RpqyBZj~yVdqbl0l}C#^q1vEn9_{ zdwO}spQL$%q-5ZQofg(+H`IF_g2Hcs3BOs##Sbex@>E`mQsi~XA=HGj~tGh?Sz{y%BP#FywHa_I#pI#fZHv$fLGVf zJiajp$3L6uoLfp3A(ZV;HQ+j_F>wTmU|&i4-Q@Mo;F}ukCSdx?jl#I8KmvaiB z=D{X0u@>U^ry*NAMvdZ$#(qy*r5P^Oc^ufh)zC<++|&2Xn9T`r@JSFTimEGw4qQft z9$_%VzWduTJ$1V1Wfvj1yS1?_`hll`TQg`&7$#mAZ)Ho(N2lr#<-XSHFRHY9tm*f{ zPb(w8gs7XjroA$tkqL-I__S8<_;Xm>p61ml;z~`N7R8El;U2{yNro15Nmw9UkR#l3 z_Re3ay|h*PW_+qx&1WtZL3Jd*fK>_Zhxu3o?=V>{NJCwa`z0l{z{t*X{k9f(_IO$j zT~jTs4HD_jBl<-P0`?;il;M;mM9gK9)nMUWnGxt^FxI*Rs$3_9Nt0ud#rt!lae zpE)k~_I$q1oi$q6Y1nN-@eApDy0(99T<5CtWPFq4WwV=nqCf>BaK;Vph`;t&OMh*g zYLcnp-pV*@+Eigf)k7C1b%RGnwThi+^gTjcbB$9r@YlqmScf?|NU*c{a(4 z;WS_!E82&U**2BJwmW)v;Q9VFTWoT6*qE?k$6mIf7SFPCr<4aj}Q zRNDrpo4RwJxc{@`dR=Ymg))x9()?uW>}zJf*=fi!_U8EB-~BxsRS@c5--3K4MkD$c ze$kJ}>1n5LWaxVeIi9@w2J*uznHt2sa-x7FQEe4mV{p6@KiodJv~AoRj-DSFj2!(Z zy~cbcan1VvrUvGdndpK6E4~Mi-sNn$az*m?SvxHEcG5nxXZ1Sq<g}qLTblZFC}w0AhpnRZ>@;F>TG~YMGg%oW!+v;8KRwR-^6yAg_m9eaX%;`} zXsnukracvKdU=fU`V?i!s~_KQ`JLBP7x%6?qFTm`SlPNnyqdbIv)x9)r<9G^>G9Ai z`wfZIC)ARwRR!{1`7}|q#9)zQ@z>?C6x}4^G5)NDuHSOyeS0(K7Rs@4=Q4&F!v@~5 z(e~Li)+6PJ3E5V7$7gh}Z9Mg}hH$4~CD9lAo4+kxX+!%8&Y!Z~@Me}Y8eu4o{kALX zEX#X7v~ov1JG&|i&FhJ76L86%8zP{Vj7Ej^Y0)WL5pZT>8zs1I6q1YqA1cb@ul6qR zMQ>|ki^g@Kme^J>blqwVTit7GL2YeFcbgJ$5`k?Yx!r+nwy-Fe-6+|ck304#@b2zu zo$GW)8nY0ywt8oGgGm>@`j3@7tn)^i+D}r97xV&2hj^BMr{kI~NJg9V3Bpr%4x>8V#WeTqeu3|uWo(+o-=|)yagwB)} zi?7ag#)9?oG8?N8rl)a!o>Hc#m!p<)rXeEd4|1nIoUJe7@E5M&M$VgFMzKT+^OOLL zlBEWOS8T0AF-tBnx})_Ph(*OwrL`Pj1p_ugXE@YXPBj?2{%bYB*9C1NW_P6ggQM}- z((ee)q!x_4cxw9|V%^_%sC}5iN3>#ZjR0e`Whx)TA?kf$zImJ`k>f z`#Hyshx#i(X|^dc`z72gqF7?j=Z>3nm(4n`p*#q=?0!S(mN)gPICfSl{Eb;~di>1+ zQVc0FU^IpyDKNw?fn|*1TU!1?kg@tq-^~r3# zhCv!eUqC!$cKNL%GFDW?(|Vos8Ab&}FAr!;ZL#caMtYc>;6M&5U_ougs4Tz=`-{~) zp2u%Fe_~X*yAFngNtwr6@qZ3IJBBPLg>a0njCglzgL}EMd?%KqI-b*oni|QGkB&cW zz&4wWXOJHr_wJp(KY)q18LWT+&NYJ4^xGM;p)#6Xdw>BXk4FZoxiMo}t^&j}|NHi# zwo~`ikkk}rJ!n8L? zi3V9Ll-UNr>NI=}X?vJMBOLr&x5O4aPqevH7exz3&EeQxvPd<6I#Kd%|u*oW!&cFdL2N^Q->Tjmp}qNY2a??4^2l9f@KPVuKmhl$T+g%~R3kF&Q; zYD}v8&y~Gh-TyX>*)S`JgR{!xAj0Y})J+daZ1c>d4FAVig2zm%T+TcvPTEUZIY^TI`;~ie+y;*m-?#@(b4hH2vJx9)c>>-?xp` zYt)TEWzG#KPC+}?qmQO%qX{Zz+B2`CDt(2@z<)v2ggL>TP;tKziF%oO)c<$Im6HJ= z7vT6Cz2XmWP*BSi3R&Inc%z`#G5kzb|Fm#X;uXVFd zL&#nCN;-J)jo>deN3wB00PVWCYyT4__H5$%CL?L>#;{jigQ76f=$E4-ub`-o#z)A; zE8&QYn9$>NQ`pb3H{tppD9FRtHS}Xvo*-m-0_>LVo=nkEb^PZI{&fQ-OH>PPp{M{2 zuJhj+=-$5Rw_SJR<+SL9mWRtSS9j?M%x>L^G#786M|V{nIjI|E%foELC=IoedFd=*n(ge{j@T zGdd4^hrY-Ea-?GPvWX;?#7?&% z{i0@6<3VKHMQyB4#R?RUSpM$!q0Ic7^uC!A=(SY=un=Aju2vmU<*)G(c%y`r3r>eC z6tO5AMvwDkBV7T>%`hU-W`ip_i1 zpZQtk`@{GaY&I34a4ehI&_cd8i^tOA?Jm7oi~DqaR035qoM_$VBQ(s_M4(z#eOX?T zpL7xEglh~sRZw@HP?>S)+uFN7T&XRefpJEc390CjXRg}$QS`^Ju*akS;3)3JLmIjK ztORG%11*N9W}f%?I#IN6`Vd%5{xW{ykK5_ujB$#J=x6o=Bw$Yhk$>Jclwe%Jo6(qk za3Q=NR`IR--p_1^bK3SAUu@3fc9L~dVReEkdMC7B_vU{(56va7=k5h0YYf0M6SCm# zSW(A+Pa9%m?x1u^ZFM;}X3{f{!E$9Nq{8Z7yC&trchHuyV<28|LydY*&o^dVi+7d1 z`2>nw(@xO;3t;JYC`D1m=2jaSG6?;iP*0Z6{Z<$=@Qk2;2hHfd2XOzUvp&P_bLY4p5(;Y+`|`Y_=hlTu0Xawu2+mWLF}Z{MEqal9`#(9n9Cxm*qo z);7=<18-)SYc|;UgtfG*d}?A^w=3}m?=IMJmwTPISlMBJ)?MtlZ44TI+JnL|POg32 z>A zf>3FK)J2tZa(OU_J~Aa)hI>KubA>?9yM;Gv98@LV6jVDnW^NIrlnYP^``)8EyzujU zwJ!l$%^$2I@5Z6;6NMIY9M8C?ky-}*QU{;Jn zLeJ#EB##~uIflRUNhuQLgjS|!DAE4O)`m5r5@v#=jf`zMyT;x!T?EJt6kJ5VSqhi~ zJASVUsr9Yi9tn)|*1_V4J&LsgV&-@CRMYDX!Dx#3ypcs9KWlm6Nlz0byw~i#AP~fs zxsv4dE7nbtlfyTl$>aBH2P3!SEAIm#BN;O5RBy#+D3W=@H>lh2tOpfgk`8-Ag4V*{ z_lbGu@g0;?@dr1xxi7b);k5NTcGzoY;t#MZ6wgu(%VPEFGhc3~DRSS9Wp4o^3-jvL zh&a|Bmo>!$eN6#Ha{CE1P;@;z_&q^PqK0Ujren1X7OTDYnB$@9lN6=U<3B=UemO6H zt(d*3J|5H?TD%tej1&EZt9a{wg%{!tlh9e@U+z@&Vx z{h$%7(`-a}=-Fb=ifL>SXwmofoI=$`wOa!sQC)XHb3{2gG>EkQ6W)w37S*C8Jh^b{ zEQlr3dV4HzB!6^HxK_;qsA>cIU~#G9KtzC?&#lNW!0EWNAoA;6nL1XdkECgj5X-tH ztyrM9*wH5yRJ|IoX1{{P!Uzmj;PYi_XT_3dk7;n{sKKeT#B}r?MZ`yFD=3tdfWX|9 zn0;ONd(X0#SvMsMmH&@~{764h?9B()twINX4P*qo$WP!OB`H=6yX(-KJIvGnA^qX; z-|N{wrqwiYhMtvGidwR)$_{Kqm^ZXWAa!~RTuse*Lg;1fru4&@G6F#Q<$^?2P5bnd z;-=Y9#lZ%YbCmObD)~%7_c@vhFp#%NeVXVjy#~OlU4Z%FggZQkUI}iaV|sF zPTjh>-jSO{NwoU2{{40iu#9q?i&mM{^Df>hzE|>u*}0%mc%l{?8OW*T^Ty7jlvVC7 z`}v5SbE`noYek33Mkey z;j&f-V~&Hm0jEB8(SRYaWkdwYh3v%@O3~3g52-pn6ncAa-s>&bHil!y>biUOG3Vj4 z>bF{7NrCX=cF7%qnhyc;@H99SqGYQ#Rr+Xei^*6x;P&v{f}mnpaZ2Y;Sv$@-Sab=@ zZuXm}z#6|y$SNHWrkkQ75SyWmkWVN&wy5KH3!s25P6$%cS7B7B${d0rM6pBK9ic!n zK1UiV5+jR%`E?Cl%ZV`P1B#{J_)E;M>fa~5*6}E|*qs^t0Up$?Qz2}HkOgM(ccPbX zJ{E|Z^*ozu>5GAFiet(stSyiLhIpfmIVEhqM_gWRzT21XtPC%{qI z_3u#l8Y=p!-e#A(r^4rScORc;Ad)dy2t%zOO1?A=&!iEjy_bHvUTE?k^nw4CWE*DF zt~pOo{-sNerHJ9(P5w#V682UkVFluUkazwpMi0O&QPEizjf_0 z)CRhzhd^HD>ZeKaK5q_s~92D!R)yx(Hz`t%{ z8OPB7K|K$MhkdrhJf{9L4!$i#?n(&?Y;Vj_fxz{>J|x|(^sJ>6BOQeLdxDP8J=f>8 z<5=%WB)$LDXYH|c|Dz~T!E%ZeZ!Irl7UgTh6bZ%x@G!zS{=zCg<9*=EB@oBoq%uQz zA^Cu%U=JXE6m+I}!7GTB$ci zzc*H%x;OWd5m(P)U8J&fUB2PT&Q;o>u!T?_ILJlYK)UN4*$T60vcQr;@P>EAziPL} za!pc9+Rh;c&vQHsM-+w$dzckIf8H&%zYG;KOpErIcJC>1E5o@*(}0m2TXM#DRFs?? zT+NDrU3VsaMqzD4u?O+4qg&5OFT;P|i|#Isx+QS=9R^nLFs_Bs*yvz`CBVPC#GYMq zOS_^x81h7{R86Mk1P`D~>p*`wL0_TSscUckvHiCO-~qWpXcNN3Jl-pD+LzppvrjtH z3scHzl%;%t*#$E zKei_ae4;|7Iw=NrG&aPdO1ejY1sQ4u-QOH#NKaqS@KO?rfCv%I9&t}%Y5|v$q<6mU z$IN>2T`yOsn7|5GP*W&9OVqL`x>~VJxWf=X=GhjWZ#J8AeB>BU2bJ*VSrn`OTnKBD z)Qdr7{iMdW=3Gwx9mCH~dHlYqoNl4>Fhc9{5y2?(Id6Tg^VU)B;zI zxsNW6w0}fei=E<+2}K)Z)3Aq4@jU{P^ofOD-lKi*K-hriK=h1xz)uhG8gSZ|s2Zyb zK8URi?#B+^pl8{OcevO1eoY~c*8?6G0yvKi4=IctJ#R&R{`Lm)8=}qX8(R%y3Ee|J zfi?AFB_yyD=^xp)cW={c{P+6A{5Y2UpbCmiWr9d2jk^mKCP17aB-__|zewKOSZ*Xr^*?;G;AmcBa&-Px!gs`QK|E+0} zv}w*(Ph`)WlRb_kl>>+_3D~uRDIy;VYrq+9G8?TcYb_&}8b#sHXcO>g` zjwNF5F!^MkeSR8ng%dYR|+sps?X=~(825}X-1CjcI|M9uV))E}@v8t|k9W`vZ zqM#~@#_UH2V>`SBrVn6Bc?r#)LDjz&lVDtWP{q3gb3f=P%xoTWnM=s<-l+UIcc z)tIzoqS=m$#h#OL*J_2w#WOE}v?jC(JKe^0e4}pI`*KR^`#(EH1%`-+%pWF7OUEqP zKgZtpuJm!>EvUZSuLtK0JXN_z`-_O7fBOYuRva5#oTuAb3GR#wL`mn23iz=6MQN&{ z(rga#SAUJD$RA|C$04xU0$q*OEXP(6_(W=%HuSs?ki60rlm=EBX?#fig~1Uy>7D-K z1G#Gm$mOl6<>_F>KYk-ZE_3xN?!m-T&7Low8&1AJ4Xb{C1qEsFR&rvES8IR(w!Bz+ z@(8|Z96yyVQ<~Z>qOAPYK#>tE4Nr|2*B31#cCZE@Wr!_aOlr0kFWg<6Xrc48mhfIj zkWE(I$rK?`BdhRNJXvqtpfeB*_Na;80`UbpY2=w5iR6yKV=w$Kq7)}4IkTq(o$r0z z{pG8T)pi{$=fklmbn=gjiodPeFNAMgIV~i7pd=CA>wo%GGZ%f}^*Kbrg=?%F%)4uC zz59cN@X3X>1HuPzWYmex2A6*YFRLTQKM@M0@OPg5<4a)3kM>P0vDnP~GTs2rlw_wW zkTAU-T(Bwdj}<`O)@DCl$WBcobBg$iBSf0lXN2KZ-4ZUoe(P`WKG6gr)iOu9H~)W1 zr&7oE3eD#yAg7Pq3LvzSAuqTv#6b;lxJz5ONz2EC4zEQ5fgyn62Te$zKuU;5ls0I0 zP!{e=_uvkE|0=Lk4`MIJAo_(tgj|Poxs>ns=Iw5~FFzt@5a0BUs(z^y!~BOZ7flc) z1qt?|`~nyp_7LMK`r!(WaEAn-N8zyg*)h-go(0FGVm-q$VCL$An_%D0q*=@2fLWnv1n^*@4H@_#+bhJ zC%Y#6`F7^D(IzX(jUY8H+%X^SThQ8QkbZWEwC^W;O&{{nLgm0)IEAk`F2=$}FO^!1 z!EXVgmz<)bJs<(+WV@hm*-!M^_pvHU`&wy#sfsrWJ;v^6$JrqisS#%AV}+5`2InG9 zZPq4P@nJe6#ZthZ1Y04*P`(sM4yChL&;Bt7^z#{p6_ruPWKTWqk8KzO9Qina+7|KvpHIb zyD}vkKu!}Le_1a>NxYbWqR6TI-pmpxm~smcQ&nRpbweD_&9Wy`KUu0v=U-gyp~;(bS<^ja~HA9!eBJO<+Y1G zjx{YPS)C1~A&QSMrHJ)+n|zKwncI6SnGCi51!sxHwvX2 zDaM*}l%MhIVfiL}#xW61V>0BRzN#b0m30qK1WFCfIml=?H1@gl2t`!gfK zz7pJF&*|@L;~Z|ohZm!_HC4i}0sSDgbq!HKn#XU|9A)@_mHYdzR{rf!{_i?~N@om^ zMn(A%J55H0=nD>iJ9gp%YB!eL4z!~(fGLtV*m%a&)Z7*ajt9c!nIlLrINgjy^$D*N z8Cl8>4Z$1l-vxbuSk_#(r!Mb{tg+-T%k2=3ez`Qu(T~BM2=&)!iTHR;$X%Zz$G`lN z;lH7zMU!4Z(5t|f{Phuz`VO55`6cb3iS>3f7EDU{47UUj?9u+i)*a}`k8Bla3yCSe zEYSw@cwi?uM6;uU%PymJR<~WkFB2%x?7=EvoQ)_CnB9NTmC=O~orSR+x)wbQZ#*uxda3Zba4vQ77@k4IR>9B*tHajac(%DqxbP2CTDs( zbbO4Y+ULMTp7n^KkTb8O8NhGH{F}3v zch`&$-q(`{zpWx$!Tyt(h$Bd3($d7>UK+c8t?vvuoAg&Omza7dPK^M7Gjt)%KIE0~ zQH}DP-joUjFqYN#g3Y1g^ga_NiF?tbMbqRt`JB3c`V3sYLhouerXg}aP83>>xkBv! zRr!5FG1}0F5g54deDlc;-J~jF64NNnvK`Oe4kI*tQoG^8f$H&AL~Q4S8389p*D7Ey zOay9Zh3_KDx4aQ{LvbVS&Oc^|VQ#nysb8s@fg{e3FWfOK$eTY2=)!qRc~(mZfs&a6 zw>_<&s~mlWtxg6gD9vNy9O6n2I4OsVn*LaoS1@lV^fevR)S&N6L|H;XXSL0kCYXJc zFq_|zE6dJumn_GE`FF>e%fBdNrkZ+(Erm{aB-4nyJK{#AQSMK|O7HV0ds+rPN^&RL z?59cA?Ddr4zJBf+lD*MuT>)P*Z3{M8n!Q%EI`%h+eZc!-3mQK+!2sC8^Kf^@r(nQG z-#yoa*Meod(XG&4(@%sEZ~N}-=;ep$CRW7Ti3a>bWU=vXcZ52V*Zc!kg7@xY+Le~9 z`{rZ6>5+B1m}})XKKMo05IX`h&qeX(f}aO?QNWvN{Y)R@ z#7FM=RvV7iww_iwk@y+{-#q}7(mhI(kp!#S*)=K-l8R-k^5Px#@~036vRZchwWGLz z)@ozwFkZkKQ70{&nCswad{7hXN#5j(uzih1ZIVPakK8--4Xhk$E<6vm+aATr8E2}y80%d8&6@H@U&m*~7m6E` zsRCVQPS&4gpJXw?c`JqsrY8u6dL9UE=+s9~bak9ZRR(5?`;Kuqg2DY>JCjKe6ED6S z3(OEZZTX7SliY}T4h|tvzFa2Jv$k3?70zt3=BanyTX)6gu@6TiY|a*I+(}vn+R39V zS!O@un}pxj5N}{?1;Ox(KBoL0^ht8R>*u|Z8S=l+a81z3kKU#i=2I=*Cjk-Yx*Ei% z?&|uN59IUursq@n2CV|;_zv#BY0tYmR_480h3a-7U#zz&bl>Q_OP7%TJeg!_Akoy{ z2A!G$h#W%$9VSrG4%lz9mIw)rKtw{5;SM+GBP77l(;~n5C3j@io?j~d#P)?Enp7`E zrjBdc0c14oPzC&z=Ocp`NpnM`%gGG}N}8U)n{fKe00`|Pu@}-->XcjT)FW0*bJ+A? z3rjZzGBkBuA01o67SHt;B)(4&gq;KRBWk9$^kS&Eug_z|Dio%%OM+#Hb>)yTs3(rw zkd?eoz4D^>yllJ(v~U{&*fO{(tGplAr@=LW@+ils8>fZy3cZzAcXf|5?XITGQOJ>4 zV3S{G=b55CIZcS&l9;0=ew2YThix-_pbOH#c)BpsT^6$1T~3OhoB|c(ZQ!AJ`w69P zgR}38#zz&wHK7*`M?Mp&Jil{H?Nny_de-&IAMY|3+;;#L|d*;kN_q*p#{>U&x z81l&4``LS~-@-Kn^OYqOv2WwuVn^VCH(gW;Tx)`ZOkkD2Yi9;-$5WcUh6K#d8t`=v zcXj|a_-z#Qm0#-$?KHL$ZV+^v+uRz1n_q79+{j&+O{}oe8+2{FLzQhxgKUg^kvmP& z_x-(-qr6h*8{+QI((}eUveIPUvMEU%3NIUFtzf{rW4u2=p^S+#2Z zQz`kxg9=MA!wF}9UHcR%(z9|j_P=mCIvAnUZzQ!8#@rWWCwm6?#MwO>As#qx@d9IH zBWbLc<(D(|6BY2HWV1Beh9^&A4?cc!Tg~hXP171(1;qHnj3O5_reS)Z?)c}I$R+GM zHlGc6;a;|i#<0KME8R3*5{-mM5UCZojCS>(li%50BbsNtrI&+3MD!UN&9_dltL@69JasQRb z=l#sf{RY3w)oxTM+!bt6#yffe?DWJ#n(EK!Lb>!82UWXzCsj=fhLJZt_qxqef%r*6 z*EAJap2ZE84Kyy>i0hpeahpzJT4%_7pKBFxVPYrRGP$+)*8|Ek9~O=SXGZM+@Fue{%|vBHy^-2y+nE3mcsG!ONVA0R z)~q_l1H2NFy5PJ`;>BgW_>I*SyWqDDL17_y9QIsj%OLT7Q)XZ%jtI{P%dZF9oM6xv zQgOK>=ni?WEP+e4B;oa39r)vh;2w#dl^>lC0GjpHl`i}YAb#CS~Krznyn=_%>rC$$D z@3&3(%YE0W>Uav(P&7UWaIcYblJ9FBejfg$CE`6^!;pBl)66*uBAUMB;{yCpdUe2L z3rx-L-@t$T@pg>_g2P9AgW^q3U`cY2<~eBCtmJ^~oo%pnl%}=Y8ZIH|a?vEYIXQW<7G%KnmP2tMtgFv%nv3Jp0Ygu(7E8ulh7vb`Byeu2Jg7L*49 z9~ELl*kB&$_$oGe^PaOLE3L&0V6g+E#&-o~$Rz$1B{+^u64M|ad$&)5my{k#$XMw_J#FY1b!S(?~o>y!%5ZEJ2U1FdqxOh#X}%1+FyEbVJYYfU|8Oj z^0OS;3^*mKixIOnexR7F8vz&IYynI3GWuK*TD>sze=I*~^-(wMBw10z#+{-By)3(7 z=w|!^ygeT3&E^a`_>@RWV`la94OZmeZ+EE~rp7G(+FQVI%9no|5rU9s`k^kS#Nd6)@k`Dwsz)zbPc~*%1O2 z#gz>5reL3AC)K9yvCM4o8XlD0xP^ArT5QAoW9bQE8-nQ%=M(2_!}CY5w3WWwcuGh5lM6BZ-#W{`Qf_ zt+st6$)bI$TezFKn>@8eiPqP4VZ?PhXIbb)TKtYwe{?cz3qQotsWhk&*+Q4p5>Uft zisp55$_`oDqxJv0W-;zrbG{wzl(dPRB94_<7gL)kzZQERg91RyxnJX;rN@%NyfI+` zqLgojC=YTD8fm7|6%GcI4N8D@Nt;h!W4|jG$5S+?`?7UrNSwQ|DtR4~ixc@$a*weS z7~GPF6FuY)OoDF4+!E`xR#bD|)R#@QkIQK<7wc_No^=r0r_@wZB(c`<4Y4{=Sz*DO zNe_^QtQADu_Td%vpqvK=X&|`1({^H3Y^zJO^Hmc9T9U}jDj0TBNhf;TO2aV7P#^#6{g)W{4BkV--8#U5-x{o&C7Ko z0TQcSC9X~2 zi&|>HIgwcSeyB*1;TF&Q2{l-bfp&wIMNddYP6^J&R$FF*8fhz6%xyMi1Hl$2mSP0M zQfu}5#^ZDELA6+sYC|6D^?4Ivi89gvb*?{R2c#&dXpNiQ9XwO(9h_>1H12ZWHYYyr z`+o{eKLY-gz2&+FNY@pXbk#*p5Ed?1w;fV;@~?s(=AW~s+tdvOtlxkNxzS|Xgr^`! zB|w5uYvM#c)~;ciXI35%HRsp&-X$8F5dz1U9{->M77=7gpHlft#KTL#tXu(CL-k!W@`cx9@r1Vw_4zU`awMr?_CY$>N@K=Bt*}T}` zrr5QQb)z;F>mMGVd)ZfwhliHrh82*!59(|)xGb&bgpe+hUQvwo3=`PE?rYX_Dl4j~ z@i^BJINC36ZbmsxQYU`wf4hz6wRxXbU9ZwJc2wPz-0Q#Pl`*BbHhR%TO)d)}nNS}D zItadY#q^6`eQlpnxQ<^1CvQY&AsaZWFgwzUp zzONRXcQZi7Nt=Y=bv9rbV8835kt;($1t4`*I}1i+RwL0$d&I32s@5R;@5HGbR+>C1 zwO^)r9uAk>C?~9A7F0h)-CFu!hL!Cv8%ai-puA^n3sRUVAVy)AGI|zC^hUF($=}zb z5JyQ4Z$CZ6rth=fGOOv8ezarKwb1|^LON`N!Ph=&7N4t8a$a+qrNq9L#chjk_2Xj} zE!(iqKj9*p_dtoUeR8wz3b6bcxdAmC3Dh+QO%gslzMfsOGKRn_Y_XIsa$T$<U~x!F=JpYKoi`hCI=7`G5=hA0Bz^flVm7N{~l@PvZ_|ms;jE zn>=VV6bmT-T+*&*kx5zM`geuMl^JeDk<7H7iIu>coE$Llb~th%M|<8jbHBCsCTixenYE=7(5Cq0Cp9`?s^W-#u%N-Y>gq;h`In3XWTRbAM-HqRKFvXT*~u$4i;$paC5L4C$!gMI{L z>>#mQOdrq4%6Ek&AS(gI8V+!}ytoqUkdNm#sUA5_g;!<zB=zl)z9G5G zY5CSkl$9YWznVs@g7^ce&+78m(`TsH!=hJnp<`<{cwe>sXaimb(J8R8G@&J3HX<9C z;YZ_H0xi*2ORgb=;_Ya@pjw}wXb<1^qk~_@?Hc-)1#P|SgW3{!%o-Aizf^m)vciY< z8xETGiTy0QS1#r`e=b{#TteIw{5FuX61XXrWURnSs!|+Z+ z(Jh~EorB&Rvp~``3eFH=QBG|NV&vA_`KqI`Ot=9Grx^Uh8(p8GbjzjcKjIg-L$(os ztm{@wRe~_GP<%=bSn!?uT|mV0bBpJZFmum=3#-=CPxEHr7b|c_*N5i6@B;5#%rZ30 z=j@?*^R9Q-IOz(|;c8gXdBE>BxTKvi+@I5;oKm*zk7~^E&jJB8320cLiD5^9QurRf zyx@Z>xa8_5Oqpgou5CB4y23mtH{0Fm2rd}ddaf?SMSs$Qc%< zBYUJGn?B!Z-3v$MGy+|0WtgXn|5wo01k`|l#a_VM}f>1)&W6ZN_m+%;0hp-gND6U zYZB%ybd0Q0M~Q8gjv;S{iQp36;CX3?ou;HVg6@l)&n?bAPw=p|fy7y-jY%hF@H&d& zMddymxhViDByV|(xeMU(+5T2)Sju6S;C{suUW!K6K(V#~w;u~{J z2q3R5PEWa!xSxdme&zzWUD_&Ef0$Ovr5u^&aK9@Qsq>lRpuI3y^wxSM@K`Kni;IU> zA_7%x=UQ7+Xswd15`{8@TW^KW1_gG$2>q*iy!&I)fdbjO3PbNzC99*iz_`&79O-hw zj+LE~;jU?RmhTfnRa2QJKc%uFI=FwzX(XrSewAY+8KZ#8(wL3uGo>LAg7OM)c2Gr- zP*8er@XinJAROQ`mH1U=GSHMR{*K4|EZ>FMDs5%AdDDn$#g;HsF{fV6 z>RzvN*yoe5=Y(5Vui)DvBm0vOnBMc&WnRDE=Q2tUOPvSb1~2)g^2-n6UkB&c8cP2` zHCQrUNtcs--bWt#R$2?MdkTzE>ZwLXUD=G^6cTgh^Qba!Z6KO4$}Tz& z;8=ah7Y+@EUAPRD(`fPngFp?-frvNIAn0EX*)AFB+viZqXcq9KjH(Kq!Bs_dg@yoY z@aA3WLN-nvsWq+;Z;$8{8D`YS=0Sq%Z~>>qS0d)s+2u7qNI!)pRtjD`JZR8dN6db< zHRz0*$c)3y0~++J4TLkCT!A^z0FF3|tf;Hmaw=RSaTAZxF6ul0@d+~)E)WqO7A zC93WWbxREU9^fZMg4Qz7yw?%GwIRq3*(NS%-(*E1Qn+GIk{I1p7n1HW9^!7u*|2SO zIDMPHW3I2pDKDlGY9PD$@WYK?-ErtqgoWp&NXyK9Rk9ONU(uWEhUxWsBLr#h68bIq z1hmvitU;{;qD|75<;wSbqEYIk1Isw>%It^|XcCl3Dp>}Rw}NDMJ1L)q=us7t^}PuO zX;E2KHS}LS71cRXVNxDF;O~ny0WH@iQERT@EDuW(_SX$^Uq!(5ooAwNKT)3*b9Lf= zIO)IN6I##e-b;zQV$l-qy*am`Z{v?MWy%eqc=X%|I_kJ`4!>~dRc&IgKBmmyiFC0A zx#3KTQY(lQ4t@8sRCd&DIE-3jd)_6z_+rr~7@42^MP#iXBkHQ`BP6^@XJcK``M=h$ zF#?s_%c^`%S9)OOcLsu2EUEn;oazM5I*&rQQ>_D@uTu3u7^9BT092b6J=VaQL`q*?hYkoVV`ZM??$FcTM|%}=w^EQSAYCiKX0N9}v(?iq++N8U(*j^ALaG!8bf z@7rENSpcf8a;(OV6I`uI3hbHm``Jo&%9HYJ$<#00&ulv1Vm&{^g(5fY4H8|2k%K+; zRseDrdKt6%kZvp?C_cAYN~cuNUC_i4QYRq(c^&B zILbE>3%9D%i7)+Ts~KJQTm1N->fbs4mX}DUu9pD=Syz6R>CH_ovtQL0%azvNV~qD- zSDKOk!fNN7W233ry&xis^sGK*)Uo03vl{iEL|xBMcLjD;3|ilCfdcKbZ0WVf zM!|Dgm1Cz;oe^DT`wVkULE|>`Zy~SH;)KS_U74f@&pLx?YU3R+I% z>z0RX{=Ux%vnN_u$46FLfdk2kzdjQvBT#@UPuZ*$qoeR;JTntw6ue8Y!pwP&*+$xH zcJygxL~WSrMzFhVONjd=r0g=zHB}gmkwRJ5qIz4<@hu~2A)40l+Mig)aMXWzP#X32(%`2?QYC(5>Joct#jqCpw?qIcSs2eiP5q`aIfDnh++=D^`$x!z>Z$J zUlvNG3g^f5!U5rr^Fo+zs!vJ?e*`rGlX4bz*zSI~NDeF<4t-eJ5LJ`CG6iGeSLWLSQd}ky0_G zGXx@dfQsJ+@AekPZptJ*W3`=Q6c#`!EMf&KY&lj#?klvf-a<&H8Ki9+bQz*>!b6R2 z+=O@T_~{zdus5Wi>2JOTf3KG1O~}5>oTx1U{ct2zvLaON*kNd>c1SIdSeTp6z5)J- zzjq@TF;|lrszt{#8 zzP4?eO|RJ)5nP7ysRTotKQsmih;|OMzToF{3*_ChPn?$GsS$6ual;oY2kFn0zq!e0 ztCh+U_EwJQObe2{74coTw4<{-^-l|FCzl)=E*l#fMy>48>5DX#OJbdob-=%UY}Bk7 zF5_xUfL)GIP9)rfi(Xl`a5+kT;M#sI2HwYcne{b z+sF(0p^v67Aj>C{J?j-)ew17-O%j|IsO0Nzhx4|*Jm4{*DbGmhw(uk$7Qr}JQyS^m z2HH-ivtBp(^A?%6V**O@$~O3Hwdi$F==t(_6twWwrd+ljC<#+zdFkWen5CnMP3Yht z%)VbjD`>^)4Sn)u4qh!L00$0JEQ3|y3S)L1IRUiTeYwf32m@+q1$F*Xs;b~0pURLF z-$6kvm@XmE?H@3Ql7Y-MYh-e|6~6;oU0(}O*pS5cS*tQq zPT=o_EupP}#w9jBy6iN{aRqKOAT;I-UnUhm(w@awFWJLwZp~UPkEk>QT9%;0>VG{_ z{l}WkJ~&<3n9hf4-J3QZlmP_6{f5~Xk*FW)!hWoif0IE1bg6dHEef8eq)Tu?ng)`& z01aXIP;b!mI}QeqZy(CG@kx2NGx&orHVMv1guO-B@GpKR=`9GO6usLLpLEJG00g!R z%N*6l=VtspPA&u+ESuID_ncB4Pr$Oqhk@G#+yFKBb8IrY;H)b{yh&BI!-s&J^yiBa z;+8lJT|i=NE~arND_l1{~5 zj5?i)*YyT#LT$mwB$q)nNPFTeY@M%*&%7x7Mevy-g>9(rrxM6kY6j%y*z|&h*Go>@ zT=TEY>Hf1by&a2ruOHTcSzY~Vz`lY5i6@qu%djNAmwZ2Y((`TDH6*KS(?IC=w-~ z>6U!!>w5DuA?Sk%Y242C+>32RI{A|nN|2=)j#yUm&0E}8V&**>EmsRR2^946Lj%33 zEpnbOePt7V9ZXZZcg45D(y$Y?Z740dBsJey8)W&s^#62y`8RyZV~Oe1NTuRX(&3S< zo}B4=VkY6DVF;6adbhdq;Y--wMooLlDVut5+he{83^50Okq3SKTQvPVUvA8VxtcPv zB%~gigf7fo5F}`nrE@pfzbP3_DG^ikVM>p3mxR9CB}A9V@!~YW zDim4LLZmWJ?SauVR^C?(K6)T2Vqqq&TsmFtE4zX^?Rrs6sgUhDabCzPX5)1MbxPwj zMyIcGu*CDg(^6xiI-eiPVvO-=83W2$)*bxRenAlbkkhZ4Llaa=vGdzR*_FapfdY#NRLZ+8g?XW zNVLg@<^U`ciTkgT>SvFIo870l12O+JxT48`3`WW~m)pM$Ezu6oRF2APjjfB0$f_M^ zA4T2eTX6~bQbaHT7?Hr$xeNQw1M55hZENpoJU=xvnTPDXVL0GOYr${z$9w|X27&co zJLfa~$`X$)PK%a7Ms}qcMc=hUbTF=YuWYJWtmJV2vkmT;g{=XbLJ4}mO6DBc{PC&hb z_T?QF;ij&O8O6&J!I@V&WwYrK`O^`)JWJ86OO4Av=h}u%+`9YfHmlxDxwCOYWw_AZ ztYK7r@rn10g5!Kinqmxm?SO<9k5AJ3Hy1C|NtzF$yErVoX5|b>v7nJU1WWG**L01# z7v%GN=F8NnCGxd2I-}q&<*z4*15<8upNVcGcm!YxyCxh6Mo30|@n89Oc0H2#g$EB} z+o|Ck_J1b((o)86BO zPV#Tu%|FJ&Uj~dg$GR$R@Q+2|ymE7g!<(@Vu|?nUYC)BhuK}NK z?(e0g#t(WOUnJfu_nhZ4zbCR=E7?^HoanF#lg2w-bbC)O0fDMf!z6+ z|KWesmkf*0lL!_KTkVg~h{P%ze!PLgof>b4-)# z!R)-FTE;UP)z8x$=KFRd%GSO*VQtAKi})!HGlD*a%*rT ztjr4MVh8R3FT=%V4QVE|+{C6D=E#ZU+{1KXuTcNf@inuoMgUg_(;lEE2GmXZA{$)x z6=x;a-TNnUM|i8@_0d>%oxpyq)(cP@M+2$@cfNZ1+6S>)C{{cPt1?yM56aGi0ph?Y zx7gDk7=N&1h*;0F=!#t#X}1J_u;mUqOmvZ{`?1JuP;DUltyOY!f-*R!|4fFPT!yLt z@o15Vzxhutmq4R*PV+%Fvvp=)>)>CQl)*zlev8elovB_B@<{5GHoWT?XI8T-)Eh9x z=NHYE9f03AYKX*VqnRcqscDPrnA-9^{}mQsPii3t6I8t#(NwW_+W6o!_(3B<7|#JIku+EB}OyLUUD%B?2*{E?=eY>!~i5AAPhe{=83n>Mh znAI<}W6DeIE0rc}0$pN{fi)sW4t%Yck8Y|9LlY0cp5iBPU}ERv;7woy+c&YV`LZ4uQ|#sCyW?m6MxLdbi1gFwPAd~>r9jYicXPOG ztm?VZ97ACJ&!B_~#EZpx{j;wPG_a{{p zf?sy(G4si<=>|<~)pdLfw)=QvZ^~Zea+(Mhe^Fa}zMg$5wLF=&5T2B-J#Tt)o&Gk; zB&(tf9sou+4RNhBS^L^Tmbw4p+Vecm^x-zY&b%bQGLF2!TLKxn57_A4%SE%_{PV*5 z08Bclo4YH+mZCPPXP>6bxoYiYO3f7hFig9K(o?Lp1}4MsvXMie!+b8AUj;lV&0!;lD&aqLmsEp!2zW zDGnbHgVrCgVZv>QhY3!!Pjg62?DQQ3A=tGe0n-MJ(%pU3MPeAWFVq|2E1e?rc^1Y5 z5gpIFr4e6=q4`;WtsdAG^rp4y2^}i*0f0MK(Rj%LP6!$pWM8V$8rC6xt_sxzEGuGt zj4(pehn~T31AcC@#L0S^MG)g{(W3q^eYfLgiF^b($!|IHv3UO^rPO{Dht4g+_( zNI!VAUH-p$4@3rNJ6y(a&uN9Sy6i2CS-@BhX0%^b_Tp?Sbc+dYFX?0X0mabGYFs$I z7A|1Gn)kC-0+ox-5fYX*(XMZvhsMDxnh)BR`L>Q0JM#`3TXe$&aNW{)KOwddLTuFCw6sqHm7us8NPmIx7S;WCSD(q@mex4#+ z3$r!)Uu2qE1u-qWibV#xR^5jGuIK#EyCo(@nfpr_JqZQ4X)iJIi2TnNR|W_fXzUy> z-Me&Ncl^IT_rIew-C|nM;VGQ|TUPnseomhVL3zoOH^7VpQ2y^%RZj@gPB@!=;t8s> z7v8}C=Pz&%AwTHcqN)Gv|BLYO;bH_KyCdm8xh=|nSwvf2d!EEtNYSo}s^c-I*}1MF z$pY02q;%2pWR}2UHzlXECSHSk;NkVX?&_BWalh|1-O`qPqepVT` zizhuLxqZ?L)}UuE)<^Np(c*5YYWD#NZB#u~DeAI{kG%d?VY=#W8~DfD_rk-WV~X!r2}tfN%$IG9j%6`WO5+GF$ALi?0fbbxV<#fn~Wz zVXTW}2b$CGTabC~;fHc;?1d-FBQE;9X}|h=TsS!6*xaj%F{WYMSlOs<13_W;Hx!*v{p1{ zm1J*!9+y9t#Zdf<`BV|!fdp?lI4YHST|=;S3vOr1CFC{#?ICFpU*&UT^f&MfeV%KX zjH8S_@Dq8;N9WGXFY7b(!a*~w#~*o3pZpihU;s)Z6YTOI@@jC1Te)0Eo~{2!M$wZ- z`ZVvb_49eVLu!zm4@n{6O8n)hPv2Ep@Uhbvom-@&S1kC%Em|kHSOR$tEdr6Whee^t z#^*QxFr@#;0pH0kj%bhqMozvuy~PP^gH7?v1U>;Cf9%=v%1UaGPReW_?PcxWTyv+V zCo(bX1H8n4`YB&f%j}Ol*)zx<(vop>xhAso5OW#5~ZcAjH4v<(tVKjTH%SlqXf4@zydV)jXau{P8;TJ4IU zwTzRuqHRVCA}u@l{`$z&-?nd0!b#EJn6lrbbgMg0y#_8q@B<2apC#WJAZyJ3W1j$Y z0ndQ9TbN*%=HFfw$R1d_K+d6OQ6HYbzq$QoZ?HoPP6thoA3c)sV#^HW(93k5-7~m_O4?1@v ze7Ka_5l;x>`GB_Bg;bt?w5>vY}wW zw&Z0DzP?Y`-Wtg?e!f8MtHTjwPya9Z6)6}PqQl^F}gJA<90oOa_qBaZ%m74MPb`W`M z<5|vL#JL366xkXYR-#1Oh6MFdE%OxD65WMEfF&IiA; z6Iscmf7Q0~&^!0{DQ~3**=dJ|nHbmb<%uXS@Krps1t|;zpMh>-h>ob48|M{H@spF^ z8fOq7R{ypLH)vb1;Hw65KN@3I7+u-4+}2N9HRpZkZxf3n;C+Bs}F9WOAaSQ-S9kKj}Q5UYR(E zaY=ubRRKL%nY~)+fsosvNiFFnmkQI-A0P$*{{*XyP+E+sEMY4kE?}wfpl9j!Hy^7JtMfa;PHhonY+MRgZ z6kSiCb%l%xzT_q&IsEvsd!c0QmYz?$WUa`MX_0qc!fK{i2Q#HLc@}Bws?>IwIV8pM zf$x5)Bz#Ukh5xw+8Nn{0V{k#4QBX|=$fk4GPqp>_7dCJDe;s= zoHIGbp(nINcTMLEJ_r6&68nd7J<;sK)ZL_x#<-XsgU0>g_rAF(waUj{;3{fjG=Jg# zUByIY7X|B|e@~1T=-;upc(|5RWNoWCvounJd4E3Qk+*N!Q^0T#&%g2yxHK)(iNps! z$+VF`U;I0;UHp5qKh!M}V^0?}_V6^ql2ebWYC=Dhi;p|>GIh#ESD1PKp2aZwV-AVF z^m)?JiW+Yj1@%|4Q`}l1k5fK!E&s7$#6hC0Y&Z9!e?}cy+gILzU(li2FWsxlbw0nn zW1r%2_^)YE>{fT`24bOI^}092&JB46?c87`h#?@95&SvV)4;;C8=aKK23*l8u2evf=E`)5wy)tn+Fu6cVSKno!7ALLILJM?t_%Z|0G8-GIS~+G z+@kwqB9gZBLr!+6mw_;=^R;=65(WaIL`O{zZoc$(z2EJS_saQBpUYi%!y2cZDI9pS zSst?$ucw~kBpLLjG?m{#C?$O|WEL6Z0zbXNUL!wxd3~|Z6c8_iyi05TL#uE$bRI*F zPOLIR?=xD)?3O}B^KC9ZQK7r&{l=_ikOjO^6yHj^LP+q^f1)!SM29M-YK~RKCYVPJ ziVhkRsJadkaP?8#0Rmh+D@;Dten$oSx(`V%^RxNs9#(T6Krf+F@nPx1bXy_!NK}M( zx)x_8`?{;2@(t>>s_e0T*5Bb$wMOwj$l=)W;{~^~el4-=koZK=AIZnoE$A?NSEftl zV-Q?Q#rVIUF8=$yVh8NZFVaZN#h31orYOFQoCoP$*PRz)TK3q`-7ORiZ-!#=WN_**g$SQmUO#neiuT88=@hspx5?~~Ux_K=gYz}hmY zyT!@neSCb&ZDd)6=|V5A3jn)4?YZU30FtYjfQ$lO8on)y5CO;!!$(1NPHE!@v>?O8 zN!6~8844qfd1O$a^_@5djofj_L5&|q7l5aB=>T*gO&~tICRwC4A`8%6*-4&1VO>e* zhaRckoQz<(=rB^RqL}a-GzklFE36znk={L1S*}p9S$XkQY16R3Mh2PN4{b8(eWS@| z_HqDAH=;AE2pZDv@@labZ|H${%cxl>iPvG^iXe=}=6Cxpxa3Gy+_Vpr6om7qqAM8x z_%CC08OJ{-8v4QfplaudB~ zkf!KEzhCN2QDO^V@x59%-Lvv7xweHl6arsVl)UV6@AjQ#I)()FNlhp7l%1of9HO`! zjW8Q$wOipezRbzbdWBA>2fZ&Me0|9HqedqQX<8x2WdY#|Y?jvjL(iTw1)IJ6y$}s3 z4N%!Aic8cdgJhG*yIIE(p!P6HusjEZ0Xijrnwla>g#*ubQj?-)N;Cwu1an_L|KpL` zF$qBWk@)T1y16c&z!K@(Yl_(z0Yg|~D;N;&2lR15`L`1RvT?HKqBq^h>WO3q-RVn$ z)i=EsL;*HGI%i6G(mL_FvUFc#%R?a^m9gEN|6VjIE!nA@Td}<8{i(F@R?aUVqx!MH zak^yOD~>l{4STH-d}^E2qaUVtokg{xo1oVyd^+X5mhUn@K-F7#V|(`u72iE&zx@Ee zM^oN$MGRScip|g^{W;D%))!Nxkw09dGI6^uxA@Rz$o#}N>!7RZK;V^*%b=Q23uVD} z*60_1I?twk*mo|e+ROF3=LL|Y)D(s7rFTh3Qd`%FIcKY90^9c@=BYb5<3LgsL*?4S z;tuezlasxjn@DP{IqFOKx#EAxgHrvy+UY9PM8zLBq#Pj4V#XHNemx!`Zr@|F_q0g1 zd7y*TRuG?Z$=~e5@?6VxT54FeyI#$0W{FK%)9$2ION?2A2e(zNMChU@&<04qADR(8 zGZ=vPYL5~6M!Vo|k?WahmZqyDSlozpZ4y^JP4<=u$`l-c5A1RAo6ZT1T|L_{soNsL zUZ*~;0NSejw*|1#SACX$Gv2HXem{#%h5}$3t`on(5YKgqPIKJzuC37++Zd**%t`9H zvGMn#o71+;2O9)_Il0D?89i&#(^k15mr-{OVe)ME^1niB{ z+5PCKb~4H+rSU9SDqmlaz`;tuc>#}G3&8)`4ldc|ES>hYYfqAzh+0YwQSf|$XpphQ z8Cc;l$*1U=5t(l6khNM8n#;P6O7y*fiBnq7=En>Pa#6V{GCZz>o|h7Apre*CnTGQ# z8a`i`0Af&UJ?XG(LT+=^l{6<*QKj!pq!o%Kw+e*6d>j_?77ngpqzX@5=q*t1XE2#g z?6>#JXv@xW{hTSn%$@Z6-SJp!!V-U+a>7F8LhGTWP#` zljYO&lRSrd`FxSOA^X$;4GEY25oG0gPW5LzSYm=_!)tEcf$%;j|GJT?poKnKeS~FZ zh5LSkuTN1w&WB2M*D7^z^wv3~FT8T8QM?KGI$B_UHeg~#gwOtrUKS+fn8RhgR95;8 z@{_oa>O0NK4b@U5+!5pjkq-%`IYK_9&JVh>kPq&(^5YsqC@aWcIk-2Cd=~-KiCC%i zP|Y{0=eVRS-P6ye4wd(HU{zl{S*Od7zlOUXhT~lPI-+533%w!W_mvqpn|havoRBP$ z7c$gHv?ItvPNe)%hD`|UMuIAVnYi9Btv!b=|0o` zubJMRCW0q`Z_DWO#I4uySoco`y@^lS5~h%~pP19L)mWK>!gRL6TPKsAF41AxE_t7Q zyUtQKKsNf$6FGhple58a`~R%o3c{QN1!fn{$*IbVpJ+3|gPfZYWZw)sehwO)bs<|) zkdps~=D3AP?i{-O#dZZ?OqlZd-&8m0!}e?|`Pe=&0p7)^-~2Q>f97;xaWDaX(Zyns zZAYl5$QSmm&rTm>g-g`6gFq2`Px5OMP_c>KT|s87Mam|Q(VW-)Acgfy;>~2N@8a&_ zqD1vZ5nb0>4>ONj)KX>soOWu<`Bg;2A=vQ4IW^(N4~IeX+WnaOF7g5zQp9foy1pxR>i2ggJedmc zo=6=Ty%OZbkg@zLtkw3m%27XdO%s{XaPaT4xvN7&A#SO=WlopEBe5^g^l;Y%V#6^> zv4LC}v7bRVM0fg(TcF)qd6GEtDd5V~Dr;Zr(8{F29)5M`-6c8Km#W)h`@WWB1+Q5p z2d+OztHF#YiZY8V;~4%Szey@|h9WFvEv!2|H%1&1MF&)}YOQ{IYC`o!Cgl|Z#Nv-{ zn`v_lD_m>25scd6+xZb!Tc($A)CyO&FFD(OM?p^uAnR5$E~`R)c{$5qcAfqDS}kz; zR`58!$QRLU^e}5LiCFX-Rk#^*uM^>1;va~%qJWMYSBcUI+PFa1wiTELR*(u#K`qy!Gxhlb8+1^b5d?n!mtBnKlUZtu7KS7a_4c(5M!&2ZE;OIdj z+dW@Q#ga{9TOWk61%fArvb6oC0NbEpDT{S?P7KD}-Z|G)O)@$WNJfAIE%&qL3W*i@ z)JO}K!>+Rwsc2l{LQ`n;G9jGYYUueiL9t?NioHgQ-Gndn?q z4aP5^nAc}%TQvl(Ncg3}%&ET=B1OJ>sorkR&w8h_peu9W!_kSEO+v=9WzrE`IQG$FKH!db zLpp?o4L){~`mb0W(?jBpf|2#ffx47le7a)=!k|~Y!zM5gGoZ+(Hp_G{R^c0+hn{nu zWd!{I-|o_QIhJch~rJGBzMkB42t~X{7Ygg~rMVT=`81li=MK%bJOuBq5hd zjxtFdLfx5a7fI(SA)B++UQL7k%W({q{kr<3Y7kVhVpmgX!(Q9<75?E&c7Hay&=5$N z^E*$>oQBxc)t6sSKctmP>N8-`<*)gz=xZ7 zjleu&N~*p4P2508C(xiV7SmTv1Lv1o^%*;RdFzzofZZdwl zN-xErRgn~LX=l^YECX?E+Rfp4l2!fiL1VX;WLi6U$=Gy~(N|bg?e=bI$rv8B@0qDr zvIdw`fQ|9nXEuX-jpNn~>MQ>)ZQe*^RTWohlQyQVQ(PPnU0*vt+wWS_J#e7_m%vBU zWGv$w906}7$DLCZCyH&3IF`Cx+P*d4%zTBI=#Li@jI!cK5`A*Bq@U8i|fMsrR{e1?R($OaVfX|%6?Fz zY4;5693eK?I;NCI zW>y$ZvB`R2jVpL5%d7PTFwYt4bEumwhV=5pQ3Ic8D-A9(Kr!iGIO3P8=kH!;k79@` zxdFBuf)!rdK$b{{bA%QzfWV1&?SrSuw``j1jqmRKKK+_i+8*NlG|8TGyRP|=@B3y; zDa6ubrhLNs>u%j0iUJ6^LyrIQ4XZSUmVMk;SzFW&!Fk-e%0O_*%?gf(PE_hQ(2oftvu3{YT@g)#`5W<>Wq&4T zFu=t!jVi3);`TsyGmh+&p|s8F$GT4~LXnD=xrJ4ApHTa06}DAg1aV5{`K!$=*TNuD zNSXGk$Z#YHBA#IP=Z&HD@n`;&%K=KiiYeXt-!K5sLC#83rL0 zX^NkUTCX9FLv68OEQ5>la+5TI*YJTc32&YI=b!GNy9#*k+%pF`HcsrHGsdc+7)Rx( zjQj|D#37^5`O`F{<)e!;{VU17PzA`qz2ewFa#1_LP}|%TXylGcu+vu>D#sHa{K|}^ zU_g}eODX}+eRoXvGX`5W9*6)lx>E;oIubXAd88)g=OW`w8A!pPd<4K-3{59`u&^Q! zn&xBA?6VDrrVs^crW2JY84YR(Jl%?|cy!+>XP$W!rPFwRYPY(;~;SZ+^*Opw(=1FZ^@}$hTZtTjXLQiFr zq4Q$&fyWJ*t@u)GD9qWbRn|mjZ2=J2ORb|Sb3Ue9iRvJdCVmAqry8{`ed)7fOanF z7qY!RS}MDLqD>3^tG6Y&j3?+ZEV;3+cl&BT7sil-wBtGORLp)U*A3hwQz~8JqKPtp zV=X5q2-OOV##K1TCzswqmAN)`MF4X$^iyFL4*9no$kpLNp&?3dwz#j=V`t0bgN8Rg z-`u&9<1xxgly`D&T-LU@x*oCvkSp}_Lq|P4WZHBDufOM#6cu>rC^EXaGM9>^9i|G% zrvG$>o^+_bj%R?13V>-{KuN>%DS7cUqkzZ)xS$NzRii&wX;M^%oZ%|4ACIAiO?)#^ z;cf93T&)`RNOyuSL#D#>sw1|27c`+`DHz2xcZQNK{_^o`*?O6?ng24Prc( zCj>cXq~%>XC>XOZu=Oxq@l>T;rTy4qB8o}x1WKt(ULkm`TUfIf(*A`WxNuEG`ZUubD_H{|`-P z9oOXdK72x2N-04=LFw)ekrD}|TRH~_(lL^o!z21~i4!^{_W1Hc#D$RtN{jPX|sF-|bxk3x0DD(%l=k9hjiJURXPKLa7;?+@L; z{l7+=vRkhwK-B=<22=r;>4E)x!OjN8=O3I@hZ@mTbXehyE=qSwaL;TXzuk;8imWr3 zS4Y&2CSDq<@E}U;vQQal)>U+wHY5%>`7*FZ3`JsVP&+EtYt~q#A7C>ej(_V>P`Czi zm%np{^m`!kHv7McPkL(zNZZu7**;&B)+KQASLvhgV+2!Rd{~ghBlJ|$OJ_*97Aj~m z@`fw0xU^wWDd3EJuQlma*N@R~sqx6m%Cl^ko~LnmHPv-mLLd;wL8yV&`IB2@lWWkS zmV^bd1D`(6aVo3B>2S5$No(ZT$Fzq{rS3y$(_aj$LBQ1BH~)e0sj}}V zq?Qt8EQh$a?a|FdH!!>1wto&qkg9!JxehnBVXc`vISqp{pG}bbA-pqYkc(1ScwfrL z_{7-5qr-8FBb8f&x$22~Q^(*c)zKi87v-Ng^c`}%Lij9<(?7&4-m6#Wl=ts-xUVSj zH;a!&V!Tf7r0~lD#@Oda{6{FX+{MqLf~{zGC`UcwO@L+iQ2OV8H%X#J_s zX}z0KJiW};cYJ52%V8FKq79YNBBevvwb#&`KuUTwKZ_EeNS61yrEdDtf>^3hS%V+; zYM-YNRsUV7v&i70Oj~LN;Zh~D9rUB_q9uKg>rg9R_K7Ea^xv=ust`X>EDlAci7=@T z(lrRuw-I>5NqFSE*Qn7DYhL5g5E9%D0AE%1X=3qTPwYSazD|A;uorMI8=SAbel2Rh z1csxi#s%(9q}q6cTKLw){a*v!x^r$?{_6m&y+jtb>suT0bNXeJqDFsq@Mm4s%s_I6Qz zE#fMPv$p)<5>~|*nWe|o-iH2r_TOX=ms@MUnm(GkrIHbI{^U-XE|+*mzv@kzy7N-U z7N?P3itv+mWFLovISFve)h`uN6YB0abVc=PV9na!IBi`>LDrfF?N2RdF?nqkq6inB zH60;utxBP$qfKKTvove@){;++ML1VOb|Xnb1aG)AzIwZ)NG6R*Dix@V$A zM*|a%qZ)n%ndYm~nJNiWy7eb}zPX z?gUvc?HL#nt#*F%do|jDyhN@&yLCS#ca6&_lTqR5v!buc~#@);>NT!dtNz z=mZ6lPsCC_%vb;c9uwWnTTxK4_E=l~d3E0D_?T_=qd z@cQ+6z)JazPQ({g7~Qjo>rN*xCMckBnfk*YJFIj2iDw&>>5*zvyc;nz;~g>IXddi8 z(~{^kQ%rEEeKg6Ptt$n!8>5wEnEo9r@NZFKt{scGe1V?nBR4bX7q3&V$rRX*7bRjs zbrSf;PLO>%fb<>iW&t~TA*z&pFLYo~dOtjOs2k%gT@a?mBZ`-#`-1Q3r3s$PaCiw@ z-AYtd^6Yr>@|>P!TI{k0al3q3lgxs}A4>04*Cd4cO)LbX-ScAlt=)~-?c+4XeE12V zyK&yC5eTDL&}lvP)85dud3#u>AL)tBn^RBtK5wTuR%G1MpD735&fRU&-Ii-7 zI8)nNl!BiZZcTn>7c+7S=3_suR{HR2hf?;;_e$ih5hL z#%(wLO}k{Dd^CeIf6C#ZM(PjNytKg$tXAfM`A&{^-{m`mD0$ZK+^Y0XwXD7M(vkw6 z6J2-xoM&gai02*kv(dF?rLiQsb_}S@KP+%DvJ+2CsKKqRtS}pu^>XVVL+4$|cse0b zcpAYdi;s`$1?fkH>IRl<|0%8QAtn34#MMdTKy$W5*Y#Y6WZS1xo|t-pVi~`%3h+dP z{i2?c^xH@JfxK!+c_8J=DCS4#SgnSxms9xAW-Zlul()RA7+Q#KGx4Wym>6&QG3U}Q z9IbRJYg8-CZB4up(Jp@85*k)IzK_~X{i`^bZw4x07!&jXuP9-2>v$U;gsbrJ_ z%isQOMw9w)Bwdt+=-hX@YJ?ZV`PKNSpmP4e`S&kyu`tLu5obAC-Kc8vA3;!5&^CdP zgq(i2SY_$9Vy4)E=OdG|U+Kp+8&|Vp)S^y=v!TV>J*j^QoiPW1Q zO|x*5wAC4sJxgl}c*H)AQ^x}HZ!z2}aABwl^9_%blT87Z2~?;7%iLLEhSyzYW2w-} z=BE+Xjq~uc3fV?ZXDEFnT3l?E4M4X(8l1#{KNF~BfHmxPR9TlvMHl$~2nEBF~2lpO2F&pr!8OW?^PC7Uh z4U}j6#2$gP$7yY_##h=ySLkHpVARijGmkEFD+(6OBX8$Pd7lIi#Y`PqYGN}-If=C1 z80$3)G@rJgcsLUEXBOwCWGAaXkZoCFwVmn8$gyh26;`{VdPTkPG;mf!yHl{ZJLE%Z zP)+MoFOT_b@(QY(r(!g~P-SG-F*;JBHu}QR zZ!ML+&>UA?LCR?pspv2n%!5!V_u7b7SN(DDP?9?vHYXpIo-V|W7}^$i55+dH8!60$ z+$2ivT4{PH6?`<_i$ps!FfBHlf}`F2%jmMRitLJS<8u-wKd8cUEAvUvjW9nK({kS` ziq%`Vk@DF_i62(?nBP#vPB|ZuTN-%w;vR)HPvpGy=uagOySfB6Qu}&!3oD=9nGQTH z-#C}KFFmbzz!%qY$> zXjc?heg`$|sug2=^r%Op7KOba{rL4=W>=6hErLFMcIn$f+z-FrpHveHn^L8Ff=)AL;Ajc?Uk)&Ns-l#%BOAPhO;%ldyt4v{~T8RXU7X=W(FqS5RZ|y z8oF;EF8+2a&e_>AlIpy1z9H6n5zm@?h()p8g# zyem$JT9v)!^-7V2?aL^prh*lYQg73cDU4x~d0+|IXvC24{H=^w zJ3E1=hk~~!aUc*?*Fp>+w-F9n5;IfYO=6NeWqkha9lj1&zxdm0yIQIgzUbCP>H&!; zd9r8MA(4@<7wsDO=YAiXCTp35)nPt%gB=4E)VGlmfkJ_j1HjGLKa{=HXmpVQecwAx zq_b6Y&g=@W=*V_4RHnS{nLzL(C#iV1E^Nt{6Q5S!A{;dS7``D1dv>e=babE~ya~Y1 zFtKC51?$;@qa)8*<8Ug^qspCO1$*d1q4~hoF-I2kPG1XLux;_dBe>qSxZXXGWnVt$ z{geEY5{?x%Rj`pkLsHO=t4*)J6He6i_#L<%i~BowUiH690uMv+g%EfR9!2~M84kYW zEK|}%lZu1;tE8XI(esxy6ES`~0bOsf-V5klFp&MMHzm{D{LhSXht$={MyqhH*Nx{mHyi z&$q5x-5w{-Rhn)@J1@B?$P6OP5TlnMsqW@+H36bP=)_F{_o!8?4ag=Sd5dm{N!*3^ z_R>%xTS~Rhjw99*(kCv1MsYIyE|5GZ6wN=t&|SAZ@W%kI9=cD*2*+ncV0-mN!gbUE zT8N_|cBdgzo>4>3?++9sIgU6f7xc+Uzj5AYERlC4b7FctMQ=C(VTd$n*~kTIP?qb$ zV+34qQ;HNGXY;6ztTw^=rY+`kR7LmZ3(Weq?IZWd<874;7@$^0{syhlYWqP0NXlZbd>-7Eu z-rL6Ki8Pe@tafS}-ZbOY$$bO(=9%Xzk%(z}A7uZFz8`(Itnwn1!8bHKUlhnc&9{_(8*yT+lui?`cDf2NLN;3Y zK6OfEcHn#@uaR1=&w9ZIW2?f7cs&51So(bVdB)?9%`xTwO2qP5E!acEwmU85c);HV z;mIkd^V~z*+DKEy6RUWvI7kJxj#k#jT#A8pMx3PCPKDY|)Egi&RDzww3Z+ zcAr01dm_;g7|biQbd_(3{MIWZKq1F5P>n*1=Qja#@8vJzkT!CHW&U=cgSIQ1;uBn{ zp<1#o2CO;1yR_Pei-ryD7?t*zN8d(&W|S&^e_sm)8tMx@+?3re@b!h~wPtya(WeLC z3De^oi-(@d!_KM)(2YJk!ifIr$R`Gk)G$ye)c0$Xh6gMt=kbbjJg=EDWI8dfc}+%G zWAsH26c|_9O=$Ydpws*U7dN_rh`Zo)f30YZu%klri* z!fBFygQ!$!dio92?koQx`O#m)Ai`DV-2od|I#T3iPfO^8TO!|UTuigq>%_+eIW8WZ zs6;KSIzY<^h!ycySac(N&i?V?w=Z!_@iz-yKj;4zwlNc~16LS2v~w6^=-*1CG$MgQaXZV(r@_4$``+`y%WUp?LfoxF8C|ti0Nm>$Bi!16RpWw+ z&lK|%;le$N%gzOnC{fqPk z+#ep8`dc7tp-)=w#_lKs-1zVPhO7;IrFp#&iBe1j-aj5L{cTVVc#?^CBNfiVP_JJh z7JneF)M$0t0@067bev}$N$tpQy?c$KTy@zj@ggU4^lyGLOa#Xd59Cw^>Iw6D%3gnX z-9jkAN$D0{L8M>eR-L`A-0jorMD1Ayr{XM-Dnb8+hTFWkS$b-!Beudf7yQ-##tc$; zL0Majqj5&eYcv$;8@5Cs@(gz@`A(GqIED^*C13ZH|2WdZm-J+Q3fY zn=)qf2asAoo!s+bg;3FP@3EI;FI&(4-bVmUNR#VU!r2~az6{B^0r^x-)yEpMRwg)p5ZQ5zw!)2lHbfxn#)5)V6RX5zX)#z{D7;WJK->$%M z4QeYtUzf;E1X5v6ZXeV;hBRa<_zYc!2j&YAqa8kwd$&)upUBp;oo?))(mW^$EA!%Y zmqJ@cjb=Eg9sF~m>-!GPBBU?IF(f8@n2KJkHp5$RJX1PB$-9~d2Q6Cfb)Em`H?#TAxf!xkA^{HtpLbrOv;+82s~st~$Gi=l%QJ z>WbhSDaS2}8oHMOosE41Soa~E(|+US>C(o1FQgywuR4E6Cd{B;_eEtq=8k|bf`*FO zf4$?O`|@jJS5&u;(WB=>#oh6L1hEZjTcXJ;bWvMr%zZ>Meu$4U?J{wN&Z|8~1HKlA zA1rlz{}rele0zhoHvSoCQ`?|M3eO0gS+e3cMdPJC@x~1sHPg!DB%ZvA;Ah% z)1Y1LRw&+DgJ_;p2!BrUiKK85A`(eR1!&E<>Ys3KjuBf3q&RS3F7?!z6jkmEVByEz+Bz&$6kk2UO>;!TaE%Q>e!uJ?7cya@7HRKDVsehqN~&XN|q` zfOZJ=&1B4|a=>%B#m~R(Ltgc?DEv*P*Nkbitcki*Z*E6iM)xyCDA1+PzL->ubEetG z0h2bC8^4_B-&%Rr0IeTwzytHrZtFlZi5&=fud_3F|Cd6dkEIE1ptsn$7kK3RKcvTu z`)sQJm|J$;WRa~ecIM|SV@63${C)|-zxJ&e`#2Z;9gYp0xE3bowA&y0cC<+>_-pG} zF=(u0oMeQIDR>_jJl0Vxn`l2koVaqbH3!LbgZ%#I^eAoXNqQk zMtD6vo;OVJN;%q7^Wob{arCaQtDxL}X%~}PH=O*m*A8KyqMNIW>=^sr&BD-jsGs+u z?OaEB`svouIjb;8*r*9VH8nt5Y(p&kzTQ@dx_wO3XMY(Z7>}?oA9Z2s+YmjN%5z|b zO)aOIUbh?n&9t10;Fadj+{LF`nPm18*P(^*J!%#w>d{l{>C@oSoH0l^d_1$tz=p?n z6r=DrUjM`2)RP3%j-tn*eQ%F*agOi?Fjcz~RqPJUSJY73y{)d*v#2qnj(~Fi9<-SZ zn&ytu!kM1(B(D)79DDxT99mx8$!n^3@t?Zix&keUnqEcWGo_*~S?>%%nb8l#Yoim7 zsWCW)I_MvX9*Pc1dQwA?WHOr?CddAxK+}6`KTQhE=hvSv!OI;dF<0Ag763(ChpNMz zQn-AKKFe&;MH#y4=+|28&$=1rr^Dz6*}_lbEB>8}U*_)Up8y`hEg5R~r6C$}+zhmP zEVN|LN=?W$sv?Y^@b$QJSISM~HM+unVQI*wpT9^?40(b3u7FwQ!)=kX$Ig>q_}%NA zEXkb_QE?jwDdMWF%*-@z>upK5F^RK`;%n~&blDExBV}tY>Cy5J^6i`1&<6bH_9p%| z-0j!v1G5UhjB*qqeoLB=rMpd^o^LVjh5>iy##6mIT_Jv5lbfE5a3LmX_RG7u#T|nt zzztV72qG_{l5v$J_OzsVkO?_7c0*iWLyDW?%A@1&q6vU0VqzqvS(P)+Rj2zAXQnq{ zJ5C0_D_Wj080VNnHW1koicTS;1Cq+lFSRD3olg~tGLcG(_fiF~4pKu(CFk#5ZesjQ zOKXh_PfKUoo2?z=4mP-^2@B73m$X@&zxQGn)nBaQ{e+_s zi$qvS-K%97n)si{Q89*XgPgy@x%in7sZ$2w0olRwIb^zn8Xbfw8giy^w{71HZZ^|h z)!%y20r#1`*}C;-XXn`+&oU2EdhodUg~zs0DzTcrxP#i5QxY*O!Du+sND7LR#H!6T z-D9acEGX4M_Dngsh3to?Di^G7=;!b*b-t^KEp_`mWb2kM%BXJQr}s79t<#Wz(F&o< z8nfqhc{_c;ve}zjNp`OHB&E5jra5fb5S?EqWn||wJGUdVKO=)j&2HqTnN-tfvPapP zqRO*Uu0;0f4ABwU$>+a??27JwX#ZwQJg3==RczOtrukZBKtOU36!VN(X z?^dqfV67Nj;F-3a++E%;(m{vcVD+mqP)mwehELBEn>P5sJxD2w>=9=wC-rgG{jb4X zjjkE*tI{j)&?>PL_q_QOR+X=u89Wnt?ulRZzN<1Z`0w=qhOSJ!%afJ!3L;zEQ*V+l zav>% z+{LO`05`XLL!aTDRt{fCzM^Cx*eYE8(v+DE+ETey3pv{}H)075-Ev7gZgXg+lP)>V zAt(6QEM>)DEb-8KW8xmX-ps>NuPiT(1O+J}Je#W6SPBY1WdU!EXM+n|9tN+P-!|Xw zm%E+)PSt$fWp?Aes&3&HdgpKz9ZMC3J-(DQo<=dy1g<0j*mSdO`L(o1KCJYfOTVUan zug-ZFNiWwk1&0T`k}Tm&)?yzdd`C7vcRGBnD_Oy+Ro+L4O9k1U0)SP8SAEdRPc;pq zi&0{X^|~iKs`&|T_%rCKXQ^3Gl{o&iL4g&F^wG=R*uH8^)RnZ<8EubIVhg_SYv3R- z%zfXed{p=jYd?WgWa&*wAxFA5-R#G6SS8e;kZC?jkVv24~U#D7BiJxGc6Gth-#nPZQkoZbUV z>b}*7vvILhjY@t?`|K;Tw_csf4Qq9A?nmF8+b!|8J>AjxJZ5L(erI>%WCF9bH~2wi zK9$Vqp%~&)#BKYL1;w$0-3j=^&4W|19@ih$h1bpSfgwu+7Hb8ck;9@zSrFpewX2=f z>@f-zd#=qL?Y!!?($@sbetWDCZoS)!JKcFv;5}JO8*DvgTGKX5SWMk-#b?&D?}G3} z>7{{zw(q>Dwv#@mCd=MnB9}{m-M+PbF;mvxaeViCTfE@g{&-x9>)Vd5Y;Xr=97 zAXuL;Z$!>!+)}=@0bmA#UY@*mF-6Vf8t*J;%*z8a^yi_xADVHO28dC z)!|41l=`s|Rn_+C9oCmKECX$oqB?uCT?O_Vn#yFH=0VQlFck<^d1e|2_028r(YIZH6ZFc zZ3Ysh9(nOG(Leco0{5{k2O9W9)*~3#p&y&UY%bI~A0{q$(Q|3OG>`6+D=9UXXorHn zi&0Jw{T$MO57vjbL$Q;3>X)-fHNJW)Y>o1diR5b+l1dJ+Fh}Hv*RyThy20Gu2MS%kVpB)r5$dVo_%?7cP}= z8``vwYbA&*nY7JIR?rz-ZWb^NJ_ZH1I}}Q6e#a#0HD>W5UdA;3-b-Fe%rx|UO7P9% z>%}T=zgf9DKlrOGVY3v8LXhF;nD*F+E`hXB)ll*6BU|bUsj~5}n+sO#LeTs3-_^oJ zpOO$|AsZ9WEWhGW#4FYEWp+*rS<3qV#J{^U+c-U|=c8~IEH7^(i5)6JXJ1Mf)5#ZX zNayX57T67^FHy(>hh_Pu#8-m94zCDvOKz-N`~@_)3a%f3f!8C{h(ys zaCZ^oqcBKhHl*EjRM2?9pL`Q`c!d4-C{T3Kt}Umk zxWiuHlXQCX$kO!D^*yUi2b=rjUZ!X0z;&zZDC4>tBonb);>~^mgMER`zdTDS#nyKV zJ-TlEl$F_%0Vi5ax_p$fTu!Z+zppspuJ99i}g{}%zU^Sb-H5L1p8!^3R%CJ~40a`z1lbs9&<(nv+cN55@_<-{B9X8z5 z@?I+9>%7yZArnxTt;DXZ>jP*`U*b4u$WF>mT43Qfw!~zcqNpGbojlP`1=7&MpfJH4Wb)v z@8GV3<3`_vRy-&(e|0pn%@2Qt9nb}2_zoCa8;4tBR=SU4gI*AxyU? zs~l{?_AVrZyOVK)SkW8kp7lgmE}cfJMFq2aEt|J8=Rj4H%V-k zI&3INH^C^kagRYnLK)mj2Z zABgz$I!BCN5WnJJn7h4nuII&j-8&v4OV-^Y8GBacATixxKB7-ea~#|Di1dJ$pS*F6 zK0Qi$(_z_NRLgXk>n1eyc*^HjwoqT6_!yN_w46_=s2CKNchSZMR&?f#5n=@kmVp)I zjs|kolxcLkKl||HqGe*t7mQI(^iaPy_Ec_D%>8O5juE%KnK>~nCHekXZm6Z5RaN@& z-7}J1gP>7SlD%}tG@7>p?XSK@^A?I9HT4+5^NoEq^u#eFjdEK|&)BsGDzu~|?_KI{ za1%LJ(}mORA3e1I# zDcbxMW^X->V}1-dm0iRbB(Ri{TPW5?>?hKiy~lksv4xSu2AlnHlkK=ole<4yCRMTD zYUWBi8TPa(iGljv4g+6kUs!3BO)GeSdA^ZIRw=fN9RoqKnhqxtppgB>|3YI_?ZI8k z)xT$JRP4FT?JF${-O=tkCU;Z0=z8$4Eb@?<^x1VaAnJi-a$9goA`aIXPj>TT`}~Kl zM>S++M&JJEQU8F3{;c_fOhdytjEw1`YwNCxP#VdFH1%>%;xwusiji^|77~=~1~_^M z@|!#?a|*c_M-qzJOViX&U#@MAAdS^wLjF@7j?+Ve;DbQ{tfA-7VLsFrhlyBPg3&O@ zNmr>&NEP^GQOo#5p#=?aNom>pPxE{-f&aNB@>{9@h zf&+l9<0kYS4U$5|b97nKBSbWUyQ!Dkl!gx$UD}|V-@JfGw-QW zSw7od?^GFGPH*8xACN3#xlg*-YQ37?;Z5s&NrC?C7l8{Q_fe15zZcwyf4)~K-WTvT z5Df$OJRrz0ht@%kh9Z$HB(X13@zKgXI2?Lh1?)DV`C|1TJi9Q}A!VIlBV^b!;Nxo* zpzZV#_Q5AEqk1#xm5%8$+&&p3B-nqcoOKhs?EX_w6}Y0%-Y^CnVMiUvzXjesYYJnQ z`KMamRHvatYw0t}M*i>L$_hb`v%`SF+nUQ^AV)P$w!Ss@m>ytQ7c|~dPEb5r9W-89 z9p_dKm^}0rdUT_&Ay>e02)s@9yNG)8s1Hy+D{hOnXwXV{RHFl_$7E5j^#1F5h?%b9<=8Z>|;HSbX^}^1N#oCq5was68Dhfe; z$q$GCnwCFv)<;s^3RgldmpK31UJyQ>3t<(>yTyOUCiyu~#*$e4nMK88`x@VdG6 zKgpW^r#b|~<7s8TosoKNLUQuHYLAZ?sot%!UW-N6ITKg~ED1Sf; zI%4MWzy0BRKB)-kr}eD=? z5Q(t6zdkv6jG+~uOXfCRfqAApsE|%Xr;Y(1_LwT#_D+Yng|iGd`l9PlMEg6$XxNsR zY%YHQX1b6dOx1I&b{P1W98J~Q1!}USgjvi3*4tn<9TtGj-`vpOd3Ofzi)qJzTG18A-zU%)n*IH`FS-D5k ziT$v>_Z$_R*LtUJb&4v8UI0Gs0HM8Tf}ndNm9t`M?iL|CJ-pwMTQLr5nehY2c8- zasSSYQ8k&a-NqFk_7!BIN($KFX}&_=Li)Okc%4RM`L90;LR0sz`EsV#HpRN1JMogn zWxuqzPdE`8pP(+a=+v|pRx7-Zc+Y;==vi5-3AebMoW62>_y}GY>SJ;!sRFk*_#2H5 zkX|k<1FLKDmvHBUaTZzTQ#KBUg%(~2Ph8|~ht3Ijwu=Kj>dN*#vxg-m0T96pICRBG z-$GSY6|@a#0JsE?Jr7tHbXb^yCnHyjAXYl0E^!tm`FOnnc3JCg@+h*ma6-{oDz{6|rMruE z9`3HH^e$s1FH;{-Or6LxO_`f174AyCm1V(v6f7a7Xv6pE;i^BQr04-PA3XzASL<<- z6H{p4x$mZbpuMBx@9!_~xrd@l85p5hmQtuKeMcBRAze)&VJYl=_b7!YgCu!FvM3(I z=o=)Hf>*M)h+^K5Rhgqzg8Y?pvq}PyM%B$jm#|W3jL5gtO8x5s`IgH@a*+PT?~{Ydr{-R; zxxeMCf0^U%oB0_ZP5Mac9jFS(V{>OrY~aYtGNujryTx(-FUkDHqN5bdO(i` zcnlKq%BVaH^`)!50e<|`lJ>dlG^XU}X)`=g!=phC zk>~Kqc92lBx*?z;2?%pA)yABnH|$6#*R2x5YPis4dDx(Ts!mW7E1lT%@CPB4^dPwl z-#M+YjCxeQnA81J=vR0sh3?;_96c1$w% zoi@C@FOY_{GbS6(c-C{8G2adzd*uvGSk%Sd?U|AKQ-C6Q@ zZEWUBbF$V$i6c$Glru?4QoK zIYjCK$GFw&`+f0M z8Gyd%A9}_Li{a^_3Ydl=^GO_0uen5AFs^>{glaB6dlDEeM>ERX43_fI$WI62vrv56 z7hh}ypFDgi1E<>?@8_wvpLY{*FLdG1JMJ60hSM%}8ow_>jVy#|Z5aDImfdA>8s)SS z5)z)6QYj@u)a^<0{AasLBBLMZ!33?Wc^6r|Oh zb($@I9a-oaV^`zXZ6_D@9Pi0)5ZIWm*Dc)?mJ;}vHr|>Vbzct8+Si z;dds}^rbrlAq`gDSBS;B%6>xk$US6M{iu$dBf^;ix^Kn3@Y~pk|3DR#T;P!0Rs!km zd;Z$eFxH{wE#rP=)q$+}15;gtj@4{~KVYXh{4f+{yw%+KL&IXzNo;fu?7uoi2pMP_ zOB;50%sy3iIS!O?Zgec+w4x54i&CQ+iBjiLPrm?BKTS~hHPc_CFVxw|{<7ESHWQre z)#W@?0BJf&;xSapYD#btq z&QW_y<#1Htmz1e%AQ~}RV2*XF$%m(0<$>@?MS9w&n^$BJeu#$o&;2IA+s2s2=8iAA z8uno0y+jEq>X4Eb|2cTQCmX~>S#G7Z=4VPxKY`QPS3kPdYuapJX1X71lUi&LxfkBqNImn%@ zlxf$lIEzA9RkvDWbV8fGb|%~p3i_aY6N*0Y;cU;17Dtr*8{d6Q z#@+4^jTyvZ|1J{Zw~S>wV9q*@VDkRwlq{>~D&jEATFR@x zy&&_1*4uqQ3q;jKi604wng)%X9hBzL3&>@O-;LE@q$%+EUCiokdX+Y-&*6So(e z`ocs>pX?t6yiYf;)q_aW?CafT->?Xx3_`b)o073zZJB9R6z12oW+tg$uV0(dv5fJ@ ztkQ%~xpLZAv7a5vR)j#A)<$Dj4vfx}X-kWj-|d#Rr4R45#1W zTuZ@QxBNVia@-5jIn>|8FM)mg_8!$G0X2f$hDR7NoG60++#zzD&Cdna%j9y9t@p7>=n&By6996WO{x~-i>2!b+I%}h{N z#u6Rt>70`3p=jTy$RX%fEtA(mVVkMY?9r#{boS{E_{4oxW6xct+@y|zPD*1X ztJ-@U`k`iH+8RGEwQ$+NPoPNDZ5JFQ_;=fjFpk0`HC-Q)KHeT2{+L!3mghn%RICL& zF0GQs(Qxv(`afc$w`PMGu}>3NZy1G@6j!#E zHE#wkaLC5@)3rCZC&h-92LW?JsF<=;N)Ftbk|%O$dO^g2vLqVRb_pFIKZlO&U|6Eo zsNH267>~YOu-5P3()E4gntx3`(lcJHOSw5Jp5yxg&IX(>C>`}f*|rXwKfNlq&%9ec zX#D8HqWs9EH)RcRl}Z(Lv#x)7yxTDjpWFH=Jeeu#_RHG)Y*d(XNo{KlqjM4^!kRp> zAdx;0p8BTPRP5%iYL9N#)2Vt2dO|b%DI2*Hp02#)XqTpI?`x@cj`(I zlG!dyEOq_OY4JEZ`QnUyoM+E>t9b9O=p4oD6m=EGgN#4?ptOxy+Kr7P_!+KW?~T08 zkLpH(Oiik6vfOnx(?7`H(w^iTh=7F$p;)ioyl*5f%8-XBFe()!oLIgt<)B~cvS#37 zK1rBf_m0_Bb(4Y8<>=V28!>)GvueM&3H|~z`fsMm2XGE~^5JUU$MICqHT5w5aba`x z>!{;`W|C1jhf4CgCE<@E5cu`{I+|HQ{qf|l?LKX>!kLq}XuA7g{~t(4(>nux#G&mn z`;NSW>7z@1Kwtl#B4O@d6wy2g2lyfV{>hVfx>raf5uCcqe6RklCfyB1X2Hm-Q z^Y*UW6&i=DY|Fq!Wo8C^R$*imwj%rwa_M#h2`@SE)t#>16&#a1w09?*yU34M-ZDiV zVdRq!8*J(3RMAMbnBRbf^ZzwaLlEB|gu%iYwJDmSaLL!vOPKuwSpJNZQ87lmvM?WUEx*hNgs{zt9QV!C{j1ee-1hj z8Kp_7uUZ6OX}!yfG}{Y`AxGQNM`XAE+SfAlwR%3~bCo4)wHGkY_)u3G$R~aBnW|)- zY}e6Ctr9cx?CNPs-Yif~IHZAJD~O0*nW&co&-3Yf1uq}(I-XGYOkjSxmOVKC9{)We zfdC`@vSz{upR;@%h(h+J6TH~6{vK-EayrEB_B5A)Vr%*qjk_~9MZtM#ruuW^hV+E&Qzn+3zNjHX3TL%IF(CLFDRAcL38IexBwOQ)O_Zl9)+9Zvyn=<9xUiRWqQ zfA+Xdzm8I6IhT2tZ2!2K0zS+G$t4?~P{_C5to|I|BghQ}_d(r1Oy)L5Io7zlzfF$c z>0pyWT?=V7ic0DaIMt0dd!Q>V^2#enXGc*9)z{$zsE{(FGh+JFmZmMu{f-&uz;Rp7 zO7?tnbGygjjiJ8#u*ThnCsPaT5fjl+ECdDJje()*y>tEc72xKf3tT&Jm*BjKbQ2Ui zdp+H_%DFzd$e3{aJLCK(=}(8OB6c9#Dm1m2%B388a8^WGfOFWKC$hD3NFMimf^J+ z3|_~jXcKg_HIOBEAHdC0Xv{eKTuhR3w70cXyHPw&=1i?)|5V+Y2liCiQ09=kw7HP> zCGCT3g`Ki7vxd)87y3_g{*f_|qHtx-@0O)}-Kc1TUbU4<(`Qj+kHK$RvvI-J%>1s_ ze}?Zk`7SEgEqH<({CnT;t|UK(PtaKFt@+bB8_6n{vE9(pk;7>4cfG) z?2~SJgK1rAZG~l9JWbbO2k#20@+~ z9m2b6H8Vc_wk-Jjg_GGr1+o8_!=z+KT021LJu?&EcI^!(zVd$JqA8c zlTGMUE2|p1o0Pot5$qzK{uedUkyR3WIVDtOUt`3aK441~j4~YJO7z4$b8^G|1*LXX&q(gu+WCI0FK*@SJLXf4G1c z3LC_kU%=#g+rE1r7adbtu1~E#%{%GcXgm2wN0|TQ?~BJ`!2HdIy-{C(_iTJjM)#i4 z&7L|iU3^P-NrG-Lu808|@P7wkhMPse>7554LZ`3)Vrh7F;C0e7%5U&+;5Z=-`+ z7j(tO;eWA!U;-f>Vq4c`$)>AlP7v_`b>vbG%{$obOhsdIACqB&U9RNA?eJxlqDS>2 zd!L~@{87kj742Z>XN=P--~e8{NL@x#*>0bI0a&!W@Gq~ZYH22Voh!8mR)<*@F$T)u z;Oz>F`7M+=vCdpX(1nF6kAmIX)c;?5-x=1_(zXjIpdf-Iq&&-ejW28*IL)C z%sew|GV_%CxrfGJ!!O@04}H%bGvIZQ$}S3^VDL+pkej?Y_SGv()5RgN>b}Lfk^OBs ziq`&BpSAme1DEPN>vV^e2ScpXZ1gfbq72#Y2)y-(JIbFzx7W)tA!)}JX>t<2!Y(%t zUKJ@?aS(^$+F$PVhw?eqaBuDBtfz3_T)gEouYIC0pObSL?Eu$t6SuPj@LG*;!rKi%#(R1#ShlO*yeaUzN%om^Ag<;~XqH;#>CM5g48x|@m z54>72OwD%L@PiZf4gCj|6*WgRpjSJ_UN6QXRsqU(dm^)8r*LBq|J{iPyGVBbn@D6l zz4O;O_lBCU!d3R|*Bz464&W(NxIm%6hd&C6WnUp z95l}`iM;-K&MdM<>Urv?J*OuPE2c>+_8vL>ivb8k!#9V<<-LYira?C)GY~|9Xbn4V zYeS986(S>EAbBl*w$^CV%o~y!^uG7@9$wWSf*7yT^RX3l%)a-m7TAg%h}DGI&dP2| z+q5Sbl!Yznr(5=I3z-_MdcKNp8^2Hvb6yVVEwt_RTJT@`P_ zZ_S|Jv8~+UL-_aq=p|L8T?CW&1$)a!wW1!@kjcHZ``yoyz5c>m?^M&@aMnp%tCi0$ zH$Psk9;k1db(SPu=`lMAAeYb(x#cHM8k$4KSLQ1OjUKu&+RhN?rSq2X)$r3bv#yLY zBaRV%Z`i6RELXfg6YqA^8TLC$T4=6J3SIwv?AcTu5|=qB{JcHEAQ5kZ>EYbAxKklI zs|!2tF}pnI10+VVwBtL!tz?-xOV#@)873k%z!=7!v1+)t@_W(Ib0lP%X*<5gfPt@Q z0!t7?%)$;4UEq4<0~fCfU{%e7T#%YB6!&uLKf?~_C3 zLf6#_VN2myHmEhPCI`hwPV{go49*{)9um=sGIPBpr7;nRHVB3#3$^VmqKjTTjPltT zFq|CRet1vj$-SQgT#VYA;;G~@akts?0>JVwq4m#alah9m5f8Nriv{PdAtHF~| zr;1sG2ihPd-3vYN_@;diCmT}-ynutkx6kaF=fX?oSV!CUe)|B{p~sN^2Sz&>VSvSIGBtvNh` z_g}K{BA>S#ep%LRbmXb=eNESUG<@I8GH-eN75p%yWGhP_SE0ms(dB#vkFCjB9t(y| z%!{jlL$2CX!A6|e<<016?DUf6f_~J~L@~&y?znz_T5e9mBf{mogM7qH!wNA}@JhUK zLO#Fm8xJnsPD#GRx8hS8Z~P|?E6hmc0<&nRl+OZ=p4m4M?Tznqic>0$c@q#25~fbg z#s$l(cb#q9^-s}`w?1UyjCEiVknH8CPIjq>DVd5)q9nR1i!;wDF%+ zygsI>kU|ylp={%a$WdOi4o^As`$CJPW=Bu3NWfhcuW0P^zi;zl@z*mK+Koe}Fh?T_=9tx;72?MnIZ_5` z+c&aQt_pixDV$1D;oqRFH059R&u&!9(sdz<8^Bwy+2ikqU!lIdQoV$Eig%UY2u8Rs ziixxh39#q!fo6&M&(-Nh1bN@(Y5}gkLOCQmippm z(CU}mTXaGK@y_b&@9P2T;i_ojNul9dNy*+isr~O|Gx*(u{JzEH8|LzX#<^FbOX!f6 z@!yMg{VMBxi_VGKtcC{D9oj9F^?oag?IMan@j09lQEAKjm3`iYJw9T8j-2P~5JaSrZ&r>%9^x=@ovSrj^(lr2$Z7U!7tH&bDsrgFXyrl5P1Tdy zO{I8SgUJOuOKsMp0^Y6jQ5z8X!V&Q63;qO`AjC_kP=^0f=eE()6YThINtcpt<1vOG z4w92jqq36yEAEt}!}GMj8AN2KDo%Dk^m0Z;v!u$`KjNS|cQkzxUl*N$v z3m)mHa|{iUowg&hR=uLYtq|rPCbw2`6>t z1Cj=smDA`crG{;ST-d_D%{hCYp4mOphFKQK+}#`>f8LvSA`d){Mq*oTmlO9q z95pr>Ld^n9MoGqwM+xRTb@cmnLUv}-m_-?!4&C6nfC^AOtY8n)?rpdv=yeDFaQ7j! zpO9xosoj~)$g1D!dU$&_+_yFz{zw9-oBw@X&V6r>sg>&}dNp)-KHE>sd8H!!3w;wO&rQ*53ounwjaQn1WyR|TsaZD_{cO^ z#M8g9_f?s=WKVp%m7uU`7%5yk20Eotb0K+O{FbTVI{bp8rWXAXmmC+Pyx3+Pq8yDZ zp`qw|3>UgqF3FdAm`nZ6zh%WZ1E(1Ho}UG!Argk}fdf(Up**=w#tLE6ljl3Kx zTsm(J-rE>tw>kO;>y(C6^YoqRQaSm*r%GRlc|huhv14k}Zr?=3wqJ6R%WX3dBo1rX zxDbvXMF=>iv(hX#&NgjVr}ToI{PdpQQ8zNgdxo{#`xao#a1JnzO;i}4Bpj?CLNjdI zR6CO68jY_`@9QNod_#IQ435h`!F9@7Xrmw2e?)wfx4kYgV3fEo=P;F$Wm15xi>~ZX zuUge~FvdkWJ*aBqpA1OYlzNisB>BUh&BBz8mKX5BEH3Wh4WN05>tx3<=S>OU&rGTN z1XHw8?N~(Av$6RRPDi5|no*yri+*3bCPzy00{ak>g%zNv4#yr=>&DJ6022@a=ljs-kI^$9PdIx>+^} zb1?xXqigkRq~a}8x1dZ{yxU}CXHOG_BXW0sLQ#$J67e0s1Mf}YvfTzojq#+{Y1&wA zxVoI{W=8Tp$HZz_OU0Of<8D<%BuDM>>LI}xygqW;Vu@Z(Y z?}K&P%VL^HrF1wWoXAos-ECC829ewKM8`*PUD?S?B&Tj@6o5>p*EME=xrjsW^N5 zrj1n6H25X#M!7)v5@UCXl}|Wncb1G!qR&fC)h^j>vY%#nv?G_6N7x2oGAfq0Mn{RZ zZ*4$VIDq|t!N5sGB0u+&C#$J7Sp_umuw??Kq$=t`zAN#7G-B*`*wy4TM>t<#IdpAF z{v#2OLE})A@Iqgmy3ObNejZ?4q_fS;>)QhOd?)gMbh2TSD^Q~3F z@JpiGis_Pc7M^*HYU$w5@SG8fMe{a+Wi-M&pNETKXgX?qzGqG)Y@_Tw9))~S1j^0T z;kbGaF3cYq*nWQrQZJz{bd`0u=gN=?#Y8N4XgKd4LqE0tyx>#Z|%^VN@kO42X;SbRFme4-vUEWFEX z;(Z%je;uz99ztWJpH2UVsJ~}j6c1>~SVE-Y5Ck2VGBz%XMitmwn{)o@Rka-02nNo| z${TIJhgo`&WShFxdgzR`cG}M7!B#8M7ou{bHx~?75q1s~^yI=)Ji~RzrMEOggk6w? zPnY_r+p~SqjiM5oK`p3*Zm%{G#L$|NWtk$~!(3E+KqT%S}y=vJ993CCGxu(N7)xW21 zkfLs5Nqjcg)UOxkf z<Ge4;2^iJAu)pw{R)wwFuk3cQRrY}al}Ks8NtQZn74FnQa_3U zqj(aoSCyBR!<(~*72Z@#+hZU6;LNdW)L~DdN-YnJ!A<+2yMiKO)FMp*Y?~C-gpngT zm$w?Qu0sApj(M<81n2}A{vre5rGYN#vX zZ*&{^xx+wESX;}O>p2_UU7;J9=a^(}@?}Wm6>2LI(qjnX`VEp9FE$fBfFShlu%0=i z{K48&Zlc^N2f1TV&F9UQ3$}mk*`8hx^GVu&h^2tiD`F*@@cWGvj<;i;^MvL0_(FKk zZ1b7odTw(4s8Z40P@`^Fr)h&{4St){H=#1Tmy-7Hn^k23_|v;w^&8 z%v%>u@Ti6CD@(e=w&rBMGuDft3S+!PBPO575`Kh^LWDBzj!yPwXnO~bEzap~jp)ma z<|f4&m=9XP%?5FF$_9pppR%nhD)Cn$&x}rm#Ci{LN6H>!89c#~oWjP^pIK#}zAFrv zJ@XQQb5E`ABEB%S??eOH=-)V&8zMQDu~JbuFL*rt{ny3k{LCKPnYz7osv#?n&Y0Pa z%oj$N9iE6B`Xb3{OzIyT8VJoBDae~#aKL46ryiVB$gKQu%*HvkI$Hr4cBV{v8qm@< z*B^z~?m*!jXjZ$OJlYC-GMTljWQTdVjQj2$d@$&xDwikZNBAH5z`2cLb0INw9|b?; zDOwO(N-@@SOc9JY@|S9raRB4_M1zf*3cjo?UzcF$vkU1`crr;|@pcbAk9>7}1mUn< z{2nSm5OgD!8xpXfVg77gRTZfX?50y+v*tXZ$0fA`W23yNRT*WoR^Mf!TvY}2JG{B01Mn*pUhb*x+b-i_+rxm2gI$|pbG z3B|x3wYz?`0N(~McJ4;#IfoKY`;T_AW`mPIT6i-2sM0zzTT|)9YHx6J>0?2l9bNp@ zP*>Nt#Z8{nAK`k%M$|ob7XWCV6R6a!(lN_M`ind0IbCQb?BS}5%6g$T_I~Z^abQ8B zEDW3z2Y-x9sfzT^B#Akq@-TFeP}{aRzAD+C&S?DhB3eR&E;W3?6o0VZL2sLv%0GT; z0orgs!^4{CH4|oe{Y3AM&%x5cE@|e@K|S%#$Rq(O#FODYf`-NAN0h)W+SOT_;J-U9 z^^13I9#u{x;pINs%5HO`?S~qvc_fVzm-aQYEe9brBe5JTGN=m5P?Tf$ww&{-+_IPi zY?M#*f{_h*jHP6OMY42D-|j(wW2n<^8hGpbc>nSbHIidPs>|pIm;twp=fN_XtvkpZ zZfFm)Dyfzaf4bD9GW2<_1n0Kt%6Z$@Ds{mhPz&CQF%69Ld&+B0gX)H`H%xQ;R-vML zOE1>62RRelAWNDj#0`XyxEjpDF$^K?eY?#xh5UPrfd7R;w-uCPl!|T2{?)!_1 znQ?&QEh3950A+-Jac(*Yw64RZyV!T1u}tPVQVLh?y-O?#NHlrhKE&Z#qNmpIBx)T~ zUpy8I3dBqsQ5zlO;pdI0WQCa62Z9LUX4;qH)xuNU_Cz#A&AaO1W!xg!bHgb1+0^s< z{)CNticxO+#XmZ{x6pJkmdo`;@MQ<5kZRbAcOhCgpFUW`!MLOY0u-32K}1^fp<0!< zE&H)W(A!W&b#3n^ynVUc+Y^i5)W}ow+KPX&Flm5cce8oFfhEV8c?3XpTKt|c9@eW$ zJS=m>syc07bxerogG>VpL+$1!i}KY`g4p12(V|j^0LvTVE0K%&%KgkchCLeC2X!t_ z^{0~=RX+qw1=D^9j-MpO`+i>vqCD{h!vm4Ir`Dn`DI)FdSxaP0eh9ZTXXGDA^|*C@ zK$*}v286JB7K4L;T;i7} zBAA7PRFk1F*{)(z{kpf`-cs1MNr8`)K~M4^g#V-qbsHYIhX$g`kAea0f2yvPK)_6QqodDoadu0fC? zNFFXJH(OAa;Nxk!`QafPd0l}M_cUgqJ7B9!uT;>78y-lg$Uj0u*3uoGwnYZ9|3uJT z(&>-ix=YPxHpVj48?JpKbp9Qh;ypqIWih)SPGImI;E=GFpi@SLIgK z<3^o1C7U*bMX1G@nXf@~)J+WSwB)yjtli=Kf(m(f*GENgy%;qRJ)z2v)f%JTWd+c6{dyDW$;1sZSmH!~XH?g>wFYH?LZtI$*Ck>#(IsghD-|^UMzE)_ z+vDuQo1GlaOGMu@@p$=a-Rk|7nwe;pO2a7#r5v$c=lsE=TmFTrpvz4Vq<`ZbQr#dp zZ8cKAAM3Oyx73W3T_~*59v#U3%&+O+Iv_mMhcGx>VaPMtu}2_|a?{FNVLPvz-6++* z7*@cgV+W_HsRxLHInY=H<#ngEnpN!Fd$AGp(S=9eV`YSgsrbgrl6{?c0Un2zjBRdC zEb+ddHL`q)v-VRx?Jb9HeDRrMG>XD_)6S$k zwIieqTn`Ue!^K`v#TjQ|hWFrlyIPDx8xzsxq7hU)p?VSW~sn2nA#s_#mwCO`;@v|r<9I9fNemVjF_cpj5Qwgq8ih?mGAY~ zSPy-cOCI#YK<~UI!_9nJ69Bqyawq>Cch15a`}(FxH|YT(QvMx7m|ihNgf#)eszvh< zYM>{du0E1?e4^-&P}4x!ffKMqWVr~PrmdP#bpGWaA?f@;ed^?wNEnSdOdMlzQ>QyrfA%|1ALye6j{ugOT=1?CI-%eRkCYBgLVhpUBCu+SY) zT$wIc#~V^8hO-kVNZL;zsq_Y6)Qvs6{6Q@(QpSmVM`4 zS@OH2mkQQqN++%dWq-(}ic_cCyB3o#FsM#PgHk|!cqTuAYD+BYzC^sxBatLyg-b`F zyLnlxHp9%9eqfKjM4dX!3rg*(2eMRXGmJ4jYHySAGtSh_B&R%QUjj3z;NGxMnbtvM zml`N>o!VzI(q<_7{p7{BH30s6^+4XFZZTJ@Gu5MiIv#dsURzSYGP6&6Rj_FB3MAMY z)Yi8&BaSV(Mpu-<_BNcQoPh8AESCR=l1ZnsE3}f;?&ajocg_Qq%c2&fbY+~ z&&kZi$vY;^C-?4hoP|uu4?4VGKTXmE9OIRSQj!st0WP9x4j_>-$8lKMxop;9XWE1Q zTv^`;kPC-JzreB+u~)}$yEooczu}(q83YW?t>*%#8K^Rjtv+ec61bEsj9t){cMubK z+JVBsimyaVO`N^Gr#Tvs%=HBd-=ZxVIba=}KGM)0`Sh&Q{03=siFDe39ucc6`B$5YlWmVK6Wz76^;D+(xc1x?@44Gut~vCG@3S^sXz{^b&e_?fpd>m+x>rbEHi^P@ zl7k7+{{cw5DOWncSFp&}s^~)r-BXl3J3jzji3WC=jHn8wRT3Zd;-h!Gm2}f>Ikr%k zx(oo=4WSNPeQ(pW-B#OZKfUkvtzQC}SzwU>W4apuIu*8n?7@66%*tDYoh|Ywgvqdb zK)B_?M>+7@{Y`_ezB>;|VMk{t^T6~ZF}sIc?!*TKa3ZX2Uc~^!Ll6maR!(T-D4LdQ zIj?b$0h3=m&>8%rEhw!^X&8opWinSk@7q>v(Wno_Z>hO$(qT{%1$+A&;F&lQM2IKQ zf1ONo?ddYPJb-~IVfButvk5-1UV2k))JmtU&}Hn=fT_di%AfsPcfNQ@KK;y(UeR|U z<$kx)=awEdkmRw#T1P&>?jsC0iB${D_=(DiIeE}HcjdH^-&g!6+oot-zZ^+lW;?FK zO|f-zNpdJ=@7{n;O)t4C`qhcrX|FR}e>NKszdMvkY8CH^7!MZEeVaMT7H*1NFIg^}FIUQ`yU!MSlJ9_K_y zwfH8fufQ}|hXJ2Y27Hd1CK>G?)l<@Ok&Qt}}OG^`-p(QG0!;pcr0On_)+4^Mul^Rbsl6&aDS5 zLJ$Q=mIfSOE_tdaeN!M=H%lL^2#JCAXm;w8sNctRcy0$SsbQo)7+lf=?N-AwJkkLC zH(t8S7lFxLOVslHQWyXyY}}U=YYXxo*(1C%9bHg;z46U9OW~n$z*~^nD0)s7_OS47 zjWc)qxVIA;mM|Zoyq|jJyVn5zky$j({99^V7uH`=XIdI?>!jVT%w`)s*_ZqswEMur8G%2P zQg2U7nUJ<%OOrh<o`{Mhro_%dpO2 zOKdy{n{I6*SZIY5VM}g9c+`HIOmc394R#MTN*a!ST&AbxkSs89@MIvbi*`qz+PE4F zkPQeWYV$4oCtnvTLM9YKkntDxIIWOZUl z@J4QIULry7E~B+j3Sk>(Y&(Ccj@v^}AvZF2mwVCHs0bE=!ai%C5t3ind(WHo6RouK zTJ$J?xDKJXRE?w@_FypF)oIaTOxgS(bMZlu>^^#wkAY)@yc$>!4l*3PLCsl&--GE( zQncl;o~1+B$~H@Dvm6j``Ot`vOaQ!8-zDUNe3eIAsy^bvwImGWbFj)L-2>cp(&j&L zuw2i+`+E&Mqo7I+4TAAUFd9O#FI|+974M14t3J1eH75kdAqrxpRV|v!hrRYawMQr; z<*XQbP0@v@GU>su)p&d}P1V5C(!i@SpQ3aa7&zY0^R$%X<3y(dGv`~Z~Ycr zLp_o#nQx!I|Na0heZUBNSt46W*TZnIFb#%vzJW-IUUq7VC?8tcujaKT(x6@P%SN)u zKwu*f?FPEUDIx19Cif(?vw+*BC2+<=W^FBdtP+&C#P1X3&@S)QY9LW*9+-O6=YZ=! zN6Rwq%zqh3XFT7dBs3{5=!!koHeL- z3Pf`my6ASO_|4+ky+)&!3IiHjmTSe#ooIPHHSdMSTpl9zggVaU4mv&=zsOnEQcwGF z-iq5G8o+%6VA%?mDUJyzMh4CZ;NByj0(4m>A^9`lToa(b==y9!=QU^R z!kG0ueT(=Pyo>sK8S~x(G{Mlmmh;fpPtNTV~9B$_$4ph$HPnEHWm)(1s z4Zb~J=k<}6u{_`ejmP%5#s1Iif2N`D02hIaS7aKO3NSycD5+x8PW^0D)_7`dMijds zHg0PmJ1j9|%2~Q8=e2gYTBaCEyERX?ngI;cWBq&?NOt$zKWBS%9OA_RvW+6vPX8Q2 z#`8l@w)MwYm!bhuh`%aT3vzg>Ep6<0%!CkQ7SsSM<%~P=SJ_`({n_82UpYYHye6g9 z&HrBd2$;z!<1rWi>cvk<{`JLs51c5u`Q=d|{mApy|)bq2lU%&331H_{G_~^fvwu&n7-mJMK^-oj3G_UXi5Q~|Y z|2V|)W7Zo|4$Zb&!+T93JPXEHDyJ}&o%t_qW$ak`+L#;y=ebdz5caP{noDk zH5h>2`&+yI)u8!pK>f0T0Cw(g1L|M4;cpxHUxNWy(7$cuzs=*n938*iAirq+zis4S zciZ1K^8W`mGV?RjjGNlMZBQum$Hui6FS=Tk|LJw`ipW%lJKjIp#sSVP*DdT*BK*&O zt1x(aXFW}9(q zbzJ_($nkg^B-XvVZWbY8l&A2u`3(C|ref%qPszeYaeSOV!(o4|-}6?K=INx_8sXF# z(wdKL+e@4OxSOlBJ|uth-xY02`UtwrdFzn{0Bg0Wt(1TPX}LzFR!0BnjOZUaQeA8} z;k%+8xjT2RU9r`D^iOG~1GSK;iX$YdRWrCXB7%lf$}I<3xAQsWmecYG*6jMnk^i*V z{)f&kc=mSUHwZ?5SJdI%t2fY?*}sc}F)Z^zH@~2S45w;4mIs~~d4axQA_JW7E$h7Jv<}`AB?3ni!pfi2e;hy+_ z)8?c4wK&?phrh>%ipx}`RzJ8kqk4Q7GGGR?w=a>Lc-jQ;!X|*O1Gx%+JBb0dD`MEi zTehz@7qV$?C2Ay`yKbkZ^>?cZcJz?SS5LuFqrt<8L#J7cTcSAanj#p=Sgsp=7o4mS4Ct3keA8~j0+PI=2)rj$S?ZiaAKvHji7ug_=topy9hy;U2{X=)qgcMIv~ z#AyOhcK_Xle_z!pPwBFSnlAfYIZ_fE7W6u1$osX?pSRV?AEHLmgvi~XZf939(%7Uc zIrZDhjlIbT$L#198z*6ln2UdUHV@t{aJJE2gXCOtsSkX`|w5By@8r! z`xTdveSnPf-%|dk38Ry9)#2W5!L6I^AGgohA8j{Kuaw$=xnKL;zBWj=GSNb=iz4N zc}QLJ%B(pLdV%|W>-7=+l-KcIq*}yZHT_fNUl&TJ(rmj8IAVD^>n78m3DXlJ-X_>} zM!|k*{qNtLVY=PD=jC#~)U{GU{oC2Nyg9^w zlRo%4%eA^jat+RrUXjBhbdplkMix4+R9; z?s3GpG#!4A<%sP}%@veNogqHrGty#>4}T<=qR~EBu_X0g@t>dl=i-Ar>oxPL0JX(k psZHzkm@&SJHZUT(C`FY! +```cmake +cmake_minimum_required(VERSION 3.20) +project(translate-server CXX) +set(CMAKE_CXX_STANDARD 20) + +include(FetchContent) + +# llama.cpp (LLM inference engine) +FetchContent_Declare(llama + GIT_REPOSITORY https://github.com/ggml-org/llama.cpp + GIT_TAG master + GIT_SHALLOW TRUE +) +FetchContent_MakeAvailable(llama) + +# cpp-httplib (HTTP server/client) +FetchContent_Declare(httplib + GIT_REPOSITORY https://github.com/yhirose/cpp-httplib + GIT_TAG master +) +FetchContent_MakeAvailable(httplib) + +# nlohmann/json (JSON parser) +FetchContent_Declare(json + URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz +) +FetchContent_MakeAvailable(json) + +# cpp-llamalib (header-only llama.cpp wrapper) +FetchContent_Declare(cpp_llamalib + GIT_REPOSITORY https://github.com/yhirose/cpp-llamalib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp_llamalib) + +add_executable(translate-server src/main.cpp) + +target_link_libraries(translate-server PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + cpp-llamalib +) +``` + +`FetchContent_Declare` tells CMake where to find each library, and `FetchContent_MakeAvailable` fetches and builds them. The first `cmake -B build` will take some time because it downloads all libraries and builds llama.cpp, but subsequent runs will use the cache. + +Just link with `target_link_libraries`, and each library's CMake configuration sets up include paths and build settings for you. + +## 1.5 Creating the Skeleton Code + +We'll use this skeleton code as a base and add functionality chapter by chapter. + + +```cpp +// src/main.cpp +#include +#include + +#include +#include + +using json = nlohmann::json; + +httplib::Server svr; + +// Graceful shutdown on `Ctrl+C` +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // Log requests and responses + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + // Health check + svr.Get("/health", [](const auto &, auto &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // Stub implementations for each endpoint (replaced with real ones in later chapters) + svr.Post("/translate", + [](const auto &req, auto &res) { + res.set_content(json{{"translation", "TODO"}}.dump(), "application/json"); + }); + + svr.Post("/translate/stream", + [](const auto &req, auto &res) { + res.set_content("data: \"TODO\"\n\ndata: [DONE]\n\n", "text/event-stream"); + }); + + svr.Get("/models", + [](const auto &req, auto &res) { + res.set_content(json{{"models", json::array()}}.dump(), "application/json"); + }); + + svr.Post("/models/select", + [](const auto &req, auto &res) { + res.set_content(json{{"status", "TODO"}}.dump(), "application/json"); + }); + + // Allow the server to be stopped with `Ctrl+C` (`SIGINT`) or `kill` (`SIGTERM`) + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + // Start the server + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +## 1.6 Building and Verifying + +Build the project, start the server, and verify that requests work with curl. + +```bash +cmake -B build +cmake --build build -j +./build/translate-server +``` + +From another terminal, try it with curl. + +```bash +curl http://localhost:8080/health +# => {"status":"ok"} +``` + +If you see JSON come back, the setup is complete. + +## Next Chapter + +Now that the environment is set up, in the next chapter we'll implement the translation REST API on top of this skeleton. We'll run inference with llama.cpp and expose it as an HTTP endpoint with cpp-httplib. + +**Next:** [Integrating llama.cpp to Build a REST API](../ch02-rest-api) diff --git a/docs-src/pages/en/llm-app/ch02-rest-api.md b/docs-src/pages/en/llm-app/ch02-rest-api.md new file mode 100644 index 0000000..6204573 --- /dev/null +++ b/docs-src/pages/en/llm-app/ch02-rest-api.md @@ -0,0 +1,212 @@ +--- +title: "2. Integrating llama.cpp to Build a REST API" +order: 2 + +--- + +In the skeleton from Chapter 1, `/translate` simply returned `"TODO"`. In this chapter we integrate llama.cpp inference and turn it into an API that actually returns translation results. + +Calling the llama.cpp API directly makes the code quite long, so we use a thin wrapper library called [cpp-llamalib](https://github.com/yhirose/cpp-llamalib). It lets you load a model and run inference in just a few lines, keeping the focus on cpp-httplib. + +## 2.1 Initializing the LLM + +Simply pass the path to a model file to `llamalib::Llama`, and model loading, context creation, and sampler configuration are all taken care of. If you downloaded a different model in Chapter 1, adjust the path accordingly. + +```cpp +#include + +int main() { + auto llm = llamalib::Llama{"models/gemma-2-2b-it-Q4_K_M.gguf"}; + + // LLM inference takes time, so set a longer timeout (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + // ... Build and start the HTTP server ... +} +``` + +If you want to change the number of GPU layers, context length, or other settings, you can specify them via `llamalib::Options`. + +```cpp +auto llm = llamalib::Llama{"models/gemma-2-2b-it-Q4_K_M.gguf", { + .n_gpu_layers = 0, // CPU only + .n_ctx = 4096, +}}; +``` + +## 2.2 The `/translate` Handler + +We replace the handler that returned dummy JSON in Chapter 1 with actual inference. + +```cpp +svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + // Parse JSON (3rd arg `false`: don't throw on failure, check with `is_discarded()`) + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + // Validate required fields + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); // Default is Japanese + + // Build the prompt and run inference + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } +}); +``` + +`llm.chat()` can throw exceptions during inference (for example, when the context length is exceeded). By catching them with `try/catch` and returning the error as JSON, we prevent the server from crashing. + +## 2.3 Complete Code + +Here is the finished code with all the changes so far. + +

+Complete code (main.cpp) + +```cpp +#include +#include +#include + +#include +#include + +using json = nlohmann::json; + +httplib::Server svr; + +// Graceful shutdown on `Ctrl+C` +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // Load the model downloaded in Chapter 1 + auto llm = llamalib::Llama{"models/gemma-2-2b-it-Q4_K_M.gguf"}; + + // LLM inference takes time, so set a longer timeout (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + // Log requests and responses + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + // Parse JSON (3rd arg `false`: don't throw on failure, check with `is_discarded()`) + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + // Validate required fields + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); // Default is Japanese + + // Build the prompt and run inference + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // Dummy implementations to be replaced with real ones in later chapters + svr.Get("/models", + [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"models", json::array()}}.dump(), "application/json"); + }); + + svr.Post("/models/select", + [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "TODO"}}.dump(), "application/json"); + }); + + // Allow the server to be stopped with `Ctrl+C` (`SIGINT`) or `kill` (`SIGTERM`) + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + // Start the server (blocks until `stop()` is called) + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +
+ +## 2.4 Testing It Out + +Rebuild and start the server, then verify that it now returns actual translation results. + +```bash +cmake --build build -j +./build/translate-server +``` + +```bash +curl -X POST http://localhost:8080/translate \ + -H "Content-Type: application/json" \ + -d '{"text": "I had a great time visiting Tokyo last spring. The cherry blossoms were beautiful.", "target_lang": "ja"}' +# => {"translation":"去年の春に東京を訪れた。桜が綺麗だった。"} +``` + +In Chapter 1 the response was `"TODO"`, but now you get an actual translation back. + +## Next Chapter + +The REST API we built in this chapter waits for the entire translation to complete before sending the response, so for long texts the user has to wait with no indication of progress. + +In the next chapter, we use SSE (Server-Sent Events) to stream tokens back in real time as they are generated. + +**Next:** [Adding Token Streaming with SSE](../ch03-sse-streaming) diff --git a/docs-src/pages/en/llm-app/ch03-sse-streaming.md b/docs-src/pages/en/llm-app/ch03-sse-streaming.md new file mode 100644 index 0000000..656d9b4 --- /dev/null +++ b/docs-src/pages/en/llm-app/ch03-sse-streaming.md @@ -0,0 +1,264 @@ +--- +title: "3. Adding Token Streaming with SSE" +order: 3 + +--- + +The `/translate` endpoint from Chapter 2 returned the entire translation at once after completion. This is fine for short sentences, but for longer text the user has to wait several seconds with nothing displayed. + +In this chapter, we add a `/translate/stream` endpoint that uses SSE (Server-Sent Events) to return tokens in real time as they are generated. This is the same approach used by the ChatGPT and Claude APIs. + +## 3.1 What is SSE? + +SSE is a way to send HTTP responses as a stream. When a client sends a request, the server keeps the connection open and gradually returns events. The format is simple text. + +```text +data: "去年の" +data: "春に" +data: "東京を" +data: [DONE] +``` + +Each line starts with `data:` and events are separated by blank lines. The Content-Type is `text/event-stream`. Tokens are sent as escaped JSON strings, so they appear enclosed in double quotes (we implement this in Section 3.3). + +## 3.2 Streaming with cpp-httplib + +In cpp-httplib, you can use `set_chunked_content_provider` to send responses incrementally. Each time you write to `sink.os` inside the callback, data is sent to the client. + +```cpp +res.set_chunked_content_provider( + "text/event-stream", + [](size_t offset, httplib::DataSink &sink) { + sink.os << "data: hello\n\n"; + sink.done(); + return true; + }); +``` + +Calling `sink.done()` ends the stream. If the client disconnects mid-stream, writing to `sink.os` will fail and `sink.os.fail()` will return `true`. You can use this to detect disconnection and abort unnecessary inference. + +## 3.3 The `/translate/stream` Handler + +JSON parsing and validation are the same as the `/translate` endpoint from Chapter 2. The only difference is how the response is returned. We combine the streaming callback of `llm.chat()` with `set_chunked_content_provider`. + +```cpp +svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + // ... JSON parsing and validation same as /translate ... + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // Abort inference on disconnect + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); +}); +``` + +A few key points: + +- When you pass a callback to `llm.chat()`, it is called each time a token is generated. If the callback returns `false`, generation is aborted +- After writing to `sink.os`, you can check whether the client is still connected with `sink.os.good()`. If the client has disconnected, it returns `false` to stop inference +- Each token is escaped as a JSON string using `json(token).dump()` before sending. This is safe even for tokens containing newlines or quotes +- The first three arguments of `dump(-1, ' ', false, ...)` are the defaults. What matters is the fourth argument, `json::error_handler_t::replace`. Since the LLM returns tokens at the subword level, multi-byte characters (such as Japanese) can be split mid-character across tokens. Passing an incomplete UTF-8 byte sequence directly to `dump()` would throw an exception, so `replace` safely substitutes them. The browser reassembles the bytes on its end, so everything displays correctly +- The entire lambda is wrapped in `try/catch`. `llm.chat()` can throw exceptions for reasons such as exceeding the context window. If an exception goes uncaught inside the lambda, the server will crash, so we return the error as an SSE event instead +- `data: [DONE]` follows the OpenAI API convention to signal the end of the stream to the client + +## 3.4 Complete Code + +Here is the complete code with the `/translate/stream` endpoint added to the code from Chapter 2. + +
+Complete code (main.cpp) + +```cpp +#include +#include +#include + +#include +#include + +using json = nlohmann::json; + +httplib::Server svr; + +// Graceful shutdown on `Ctrl+C` +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // Load the GGUF model + auto llm = llamalib::Llama{"models/gemma-2-2b-it-Q4_K_M.gguf"}; + + // LLM inference takes time, so set a longer timeout (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + // Log requests and responses + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // Standard translation endpoint from Chapter 2 + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + // JSON parsing and validation (see Chapter 2 for details) + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // SSE streaming translation endpoint + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + // JSON parsing and validation (same as /translate) + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // Abort inference on disconnect + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // Dummy implementations to be replaced in later chapters + svr.Get("/models", + [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"models", json::array()}}.dump(), "application/json"); + }); + + svr.Post("/models/select", + [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "TODO"}}.dump(), "application/json"); + }); + + // Allow the server to be stopped with `Ctrl+C` (`SIGINT`) or `kill` (`SIGTERM`) + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + // Start the server (blocks until `stop()` is called) + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +
+ +## 3.5 Testing It Out + +Build and start the server. + +```bash +cmake --build build -j +./build/translate-server +``` + +Using curl's `-N` option to disable buffering, you can see tokens displayed in real time as they arrive. + +```bash +curl -N -X POST http://localhost:8080/translate/stream \ + -H "Content-Type: application/json" \ + -d '{"text": "I had a great time visiting Tokyo last spring. The cherry blossoms were beautiful.", "target_lang": "ja"}' +``` + +```text +data: "去年の" +data: "春に" +data: "東京を" +data: "訪れた" +data: "。" +data: "桜が" +data: "綺麗だった" +data: "。" +data: [DONE] +``` + +You should see tokens streaming in one by one. The `/translate` endpoint from Chapter 2 continues to work as well. + +## Next Chapter + +The server's translation functionality is now complete. In the next chapter, we use cpp-httplib's client functionality to add the ability to fetch and manage models from Hugging Face. + +**Next:** [Adding Model Download and Management](../ch04-model-management) diff --git a/docs-src/pages/en/llm-app/ch04-model-management.md b/docs-src/pages/en/llm-app/ch04-model-management.md new file mode 100644 index 0000000..0eb5ef6 --- /dev/null +++ b/docs-src/pages/en/llm-app/ch04-model-management.md @@ -0,0 +1,788 @@ +--- +title: "4. Adding Model Download and Management" +order: 4 + +--- + +By the end of Chapter 3, the server's translation functionality was fully in place. However, the only model file available is the one we manually downloaded in Chapter 1. In this chapter, we'll use cpp-httplib's **client functionality** to enable downloading and switching Hugging Face models from within the app. + +Once complete, you'll be able to manage models with requests like these: + +```bash +# Get the list of available models +curl http://localhost:8080/models +``` + +```json +{ + "models": [ + {"name": "gemma-2-2b-it", "params": "2B", "size": "1.6 GB", "downloaded": true, "selected": true}, + {"name": "gemma-2-9b-it", "params": "9B", "size": "5.8 GB", "downloaded": false, "selected": false}, + {"name": "Llama-3.1-8B-Instruct", "params": "8B", "size": "4.9 GB", "downloaded": false, "selected": false} + ] +} +``` + +```bash +# Select a different model (automatically downloads if not yet available) +curl -N -X POST http://localhost:8080/models/select \ + -H "Content-Type: application/json" \ + -d '{"model": "gemma-2-9b-it"}' +``` + +```text +data: {"status":"downloading","progress":0} +data: {"status":"downloading","progress":12} +... +data: {"status":"downloading","progress":100} +data: {"status":"loading"} +data: {"status":"ready"} +``` + +## 4.1 httplib::Client Basics + +So far we've only used `httplib::Server`, but cpp-httplib also provides client functionality. Since Hugging Face uses HTTPS, we need a TLS-capable client. + +```cpp +#include + +// Including the URL scheme automatically uses SSLClient +httplib::Client cli("https://huggingface.co"); + +// Automatically follow redirects (Hugging Face redirects to a CDN) +cli.set_follow_location(true); + +auto res = cli.Get("/api/models"); +if (res && res->status == 200) { + std::cout << res->body << std::endl; +} +``` + +To use HTTPS, you need to enable OpenSSL at build time. Add the following to your `CMakeLists.txt`: + +```cmake +find_package(OpenSSL REQUIRED) + +target_link_libraries(translate-server PRIVATE OpenSSL::SSL OpenSSL::Crypto) +target_compile_definitions(translate-server PRIVATE CPPHTTPLIB_OPENSSL_SUPPORT) + +# macOS: required for loading system certificates +if(APPLE) + target_link_libraries(translate-server PRIVATE "-framework CoreFoundation" "-framework Security") +endif() +``` + +Defining `CPPHTTPLIB_OPENSSL_SUPPORT` enables `httplib::Client("https://...")` to make TLS connections. On macOS, you also need to link the CoreFoundation and Security frameworks to access the system certificate store. See Section 4.8 for the complete `CMakeLists.txt`. + +## 4.2 Defining the Model List + +Let's define the list of models that the app can handle. Here are four models we've verified for translation tasks. + +```cpp +struct ModelInfo { + std::string name; // Display name + std::string params; // Parameter count + std::string size; // GGUF Q4 size + std::string repo; // Hugging Face repository + std::string filename; // GGUF filename +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; +``` + +## 4.3 Model Storage Location + +Up through Chapter 3, we stored models in the `models/` directory within the project. However, when managing multiple models, a dedicated app directory makes more sense. On macOS/Linux we use `~/.translate-app/models/`, and on Windows we use `%APPDATA%\translate-app\models\`. + +```cpp +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} +``` + +If the environment variable isn't set, it falls back to the current directory. The app creates this directory at startup (`create_directories` won't error even if it already exists). + +## 4.4 Rewriting Model Initialization + +We rewrite the model initialization at the beginning of `main()`. In Chapter 1 we hardcoded the path, but from here on we support model switching. We track the currently loaded filename in `selected_model` and load the first entry in `MODELS` at startup. The `GET /models` and `POST /models/select` handlers reference and update this variable. + +Since cpp-httplib runs handlers concurrently on a thread pool, reassigning `llm` while another thread is calling `llm.chat()` would crash. We add a `std::mutex` to protect against this. + +```cpp +int main() { + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + + // Automatically download the default model if not yet present + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + std::mutex llm_mutex; // Protect access during model switching + // ... +} +``` + +This ensures that users don't need to manually download models with curl on first launch. It uses the `download_model` function from Section 4.6 and displays progress on the console. + +## 4.5 The `GET /models` Handler + +This returns the model list with information about whether each model has been downloaded and whether it's currently selected. + +```cpp +svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = get_models_dir() / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); +}); +``` + +## 4.6 Downloading Large Files + +GGUF models are several gigabytes, so we can't load the entire file into memory. By passing callbacks to `httplib::Client::Get`, we can receive data chunk by chunk. + +```cpp +// content_receiver: callback that receives data chunks +// progress: download progress callback +cli.Get(url, + [&](const char *data, size_t len) { // content_receiver + ofs.write(data, len); + return true; // returning false aborts the download + }, + [&](size_t current, size_t total) { // progress + int pct = total ? (int)(current * 100 / total) : 0; + std::cout << pct << "%" << std::endl; + return true; // returning false aborts the download + }); +``` + +Let's use this to create a function that downloads models from Hugging Face. + +```cpp +#include +#include + +// Download a model and report progress via progress_cb. +// If progress_cb returns false, the download is aborted. +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); + cli.set_read_timeout(std::chrono::hours(1)); + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + [&](size_t current, size_t total) { + return progress_cb(total ? (int)(current * 100 / total) : 0); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // Write to .tmp first, then rename, so that an incomplete file + // is never mistaken for a usable model if the download is interrupted + std::filesystem::rename(tmp_path, path); + return true; +} +``` + +## 4.7 The `/models/select` Handler + +This handles model selection requests. We always respond with SSE, reporting status in sequence: download progress, loading, and ready. + +```cpp +svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + // Find the model in the list + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // Always respond with SSE (same format whether already downloaded or not) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSE event sending helper + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // Download if not yet present (report progress via SSE) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // Abort download on client disconnect + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // Load and switch to the model + send({{"status", "loading"}}); + { + std::lock_guard lock(llm_mutex); + llm = llamalib::Llama{path}; + selected_model = model.filename; + } + + send({{"status", "ready"}}); + sink.done(); + return true; + }); +}); +``` + +A few notes: + +- We send SSE events directly from the `download_model` progress callback. This is an application of `set_chunked_content_provider` + `sink.os` from Chapter 3 +- Since the callback returns `sink.os.good()`, the download stops if the client disconnects. The cancel button we add in Chapter 5 uses this +- When we update `selected_model`, it's reflected in the `selected` flag of `GET /models` +- The `llm` reassignment is protected by `llm_mutex`. The `/translate` and `/translate/stream` handlers also lock the same mutex, so inference can't run during a model switch (see the complete code) + +## 4.8 Complete Code + +Here is the complete code with model management added to the Chapter 3 code. + +
+Complete code (CMakeLists.txt) + +```cmake +cmake_minimum_required(VERSION 3.20) +project(translate-server CXX) +set(CMAKE_CXX_STANDARD 20) + +include(FetchContent) + +# llama.cpp +FetchContent_Declare(llama + GIT_REPOSITORY https://github.com/ggml-org/llama.cpp + GIT_TAG master + GIT_SHALLOW TRUE +) +FetchContent_MakeAvailable(llama) + +# cpp-httplib +FetchContent_Declare(httplib + GIT_REPOSITORY https://github.com/yhirose/cpp-httplib + GIT_TAG master +) +FetchContent_MakeAvailable(httplib) + +# nlohmann/json +FetchContent_Declare(json + URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz +) +FetchContent_MakeAvailable(json) + +# cpp-llamalib +FetchContent_Declare(cpp_llamalib + GIT_REPOSITORY https://github.com/yhirose/cpp-llamalib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp_llamalib) + +find_package(OpenSSL REQUIRED) + +add_executable(translate-server src/main.cpp) + +target_link_libraries(translate-server PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + cpp-llamalib + OpenSSL::SSL OpenSSL::Crypto +) + +target_compile_definitions(translate-server PRIVATE CPPHTTPLIB_OPENSSL_SUPPORT) + +if(APPLE) + target_link_libraries(translate-server PRIVATE + "-framework CoreFoundation" + "-framework Security" + ) +endif() +``` + +
+ +
+Complete code (main.cpp) + +```cpp +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +// ------------------------------------------------------------------------- +// Model definitions +// ------------------------------------------------------------------------- + +struct ModelInfo { + std::string name; + std::string params; + std::string size; + std::string repo; + std::string filename; +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; + +// ------------------------------------------------------------------------- +// Model storage directory +// ------------------------------------------------------------------------- + +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} + +// ------------------------------------------------------------------------- +// Model download +// ------------------------------------------------------------------------- + +// If progress_cb returns false, the download is aborted +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); // Hugging Face redirects to a CDN + cli.set_read_timeout(std::chrono::hours(1)); // Set a long timeout for large models + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + // content_receiver: receive data chunk by chunk and write to file + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + // progress: report download progress (returning false aborts) + [&, last_pct = -1](size_t current, size_t total) mutable { + int pct = total ? (int)(current * 100 / total) : 0; + if (pct == last_pct) return true; // Skip if same value + last_pct = pct; + return progress_cb(pct); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // Rename after download completes + std::filesystem::rename(tmp_path, path); + return true; +} + +// ------------------------------------------------------------------------- +// Server +// ------------------------------------------------------------------------- + +httplib::Server svr; + +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // Create the model storage directory + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + // Automatically download the default model if not yet present + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + std::mutex llm_mutex; // Protect access during model switching + + // Set a long timeout since LLM inference takes time (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // --- Translation endpoint (Chapter 2) ------------------------------------ + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + // JSON parsing and validation (see Chapter 2 for details) + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + std::lock_guard lock(llm_mutex); + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // --- SSE streaming translation (Chapter 3) ------------------------------- + + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + std::lock_guard lock(llm_mutex); + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // Abort inference on disconnect + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // --- Model list (Chapter 4) ---------------------------------------------- + + svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto models_dir = get_models_dir(); + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = models_dir / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); + }); + + // --- Model selection (Chapter 4) ----------------------------------------- + + svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // Always respond with SSE (same format whether already downloaded or not) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSE event sending helper + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // Download if not yet present (report progress via SSE) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // Abort download on client disconnect + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // Load and switch to the model + send({{"status", "loading"}}); + { + std::lock_guard lock(llm_mutex); + llm = llamalib::Llama{path}; + selected_model = model.filename; + } + + send({{"status", "ready"}}); + sink.done(); + return true; + }); + }); + + // Allow the server to be stopped with `Ctrl+C` (`SIGINT`) or `kill` (`SIGTERM`) + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +
+ +## 4.9 Testing + +Since we added OpenSSL configuration to CMakeLists.txt, we need to re-run CMake before building. + +```bash +cmake -B build +cmake --build build -j +./build/translate-server +``` + +### Checking the Model List + +```bash +curl http://localhost:8080/models +``` + +The gemma-2-2b-it model downloaded in Chapter 1 should show `downloaded: true` and `selected: true`. + +### Switching to a Different Model + +```bash +curl -N -X POST http://localhost:8080/models/select \ + -H "Content-Type: application/json" \ + -d '{"model": "gemma-2-9b-it"}' +``` + +Download progress streams via SSE, and `"ready"` appears when it's done. + +### Comparing Translations Across Models + +Let's translate the same sentence with different models. + +```bash +# Translate with gemma-2-9b-it (the model we just switched to) +curl -X POST http://localhost:8080/translate \ + -H "Content-Type: application/json" \ + -d '{"text": "The quick brown fox jumps over the lazy dog.", "target_lang": "ja"}' + +# Switch back to gemma-2-2b-it +curl -N -X POST http://localhost:8080/models/select \ + -H "Content-Type: application/json" \ + -d '{"model": "gemma-2-2b-it"}' + +# Translate the same sentence +curl -X POST http://localhost:8080/translate \ + -H "Content-Type: application/json" \ + -d '{"text": "The quick brown fox jumps over the lazy dog.", "target_lang": "ja"}' +``` + +Translation results vary depending on the model, even with the same code and the same prompt. Since cpp-llamalib automatically applies the appropriate chat template for each model, no code changes are needed. + +## Next Chapter + +The server's main features are now complete: REST API, SSE streaming, and model download and switching. In the next chapter, we'll add static file serving and build a Web UI you can use from a browser. + +**Next:** [Adding a Web UI](../ch05-web-ui) diff --git a/docs-src/pages/en/llm-app/ch05-web-ui.md b/docs-src/pages/en/llm-app/ch05-web-ui.md new file mode 100644 index 0000000..fb4e90d --- /dev/null +++ b/docs-src/pages/en/llm-app/ch05-web-ui.md @@ -0,0 +1,1223 @@ +--- +title: "5. Adding a Web UI" +order: 5 + +--- + +By the end of Chapter 4, we've built out all the server features: the translation API, SSE streaming, and model management. But so far, the only way to interact with it is through curl. In this chapter, we'll add a Web UI so you can translate from the browser. + +Here's what the finished screen looks like. + +![Web UI](../webui.png#large-center) + +- As you type text, tokens appear one by one (with debounce) +- You can switch models and languages from the header dropdowns +- Selecting an undownloaded model starts a download with a progress bar (cancellable) + +The HTML, CSS, and JavaScript code is minimal. We won't use any CSS framework -- just plain CSS (about 100 lines) for the layout. Since this is a C++ book, we won't go into detailed frontend explanations. We'll just show you "write this, and it does that." + +## 5.1 File Structure + +These are the files we'll add in this chapter. We'll place HTML, CSS, and JavaScript in the `public/` directory and serve them from the server. + +```ascii +translate-app/ +├── public/ +│ ├── index.html +│ ├── style.css +│ └── script.js +└── src/ + └── main.cpp # Add set_mount_point +``` + +## 5.2 Setting Up Static File Serving + +Using cpp-httplib's `set_mount_point`, you can serve a directory directly over HTTP. Create a `public/` directory and place an empty `index.html` in it. + +```bash +mkdir public +``` + +```html + + + + + Translate App + + +

Hello!

+ + +``` + +Add one line of `set_mount_point` to the server code and rebuild. + +```cpp +// Add inside `main()`, before `svr.listen()` +svr.set_mount_point("/", "./public"); +``` + +Start the server and open `http://127.0.0.1:8080` in your browser -- you should see "Hello!" displayed. Since these are static files, just reload the browser after editing `index.html` to see the changes. No server restart needed. + +## 5.3 Building the Layout + +Replace `index.html` with the final layout. + +```html + + + + + + Translate App + + + + + + +
+ Translate App +
+ + + +
+
+ + +
+ + +
+ + + +

Downloading model...

+ +

+ +
+ + + + +``` + +Key points about the HTML. + +- The favicon uses an inline SVG emoji, so no image file is needed +- `` shows download progress. It's a standard HTML element you can display as a modal with `showModal()` +- `` is for displaying translation results. It's an element that semantically represents "computed output" +- There's no translate button. Translation starts automatically when you type text (implemented in Section 5.4) + +Write the CSS to `public/style.css`. We won't use any CSS framework -- just plain CSS for the layout. + +```css +:root { + --gap: 0.5rem; + --color-border: #ccc; + --font: system-ui, sans-serif; +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +html, body { + height: 100%; + font-family: var(--font); +} + +body { + display: flex; + flex-direction: column; + padding: var(--gap); + gap: var(--gap); +} + +/* Header: title + dropdowns */ +header { + display: flex; + align-items: center; + justify-content: space-between; +} + +header div { + display: flex; + gap: var(--gap); +} + +/* Main: two-column layout */ +main { + flex: 1; + display: grid; + grid-template-columns: 1fr 1fr; + gap: var(--gap); + min-height: 0; +} + +#input-text { + resize: none; + padding: 0.75rem; + font-family: var(--font); + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; +} + +textarea:focus, +select:focus { + outline: 1px solid #4a9eff; + outline-offset: -1px; +} + +#output-text { + display: block; + padding: 0.75rem; + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; + white-space: pre-wrap; + overflow-y: auto; +} + +/* Download modal */ +dialog { + border: 1px solid var(--color-border); + border-radius: 8px; + padding: 1.5rem; + max-width: 400px; + width: 90%; + margin: auto; +} + +dialog::backdrop { + background: rgba(0, 0, 0, 0.4); +} + +dialog h3 { + margin-bottom: 0.75rem; +} + +dialog progress { + width: 100%; + height: 1.25rem; +} + +dialog p { + margin-top: 0.5rem; + text-align: center; + color: #666; +} + +dialog button { + display: block; + margin: 0.75rem auto 0; + padding: 0.4rem 1.5rem; + cursor: pointer; +} + +/* Block the entire UI during translation or model switching */ +body.busy { + cursor: wait; +} + +body.busy select, +body.busy textarea { + pointer-events: none; + opacity: 0.6; +} +``` + +Key points about the layout. + +- `body` uses Flexbox for vertical layout, and `main` takes up the remaining height with `flex: 1`. The input and output areas extend to the bottom of the window +- `main` uses CSS Grid's `1fr 1fr` to split into two columns +- The `--gap` variable unifies all spacing. The top of the header, the space between the header and boxes, and the bottom of the boxes all have the same width +- The `body.busy` class blocks the UI during translation or model switching. JavaScript toggles it on and off + +Reload the browser and you should see the input and output areas side by side. Nothing happens when you type yet, but the layout is complete. + +## 5.4 Connecting the Translation Feature + +Now it's time to call the server's API from JavaScript. Create `public/script.js`. + +### Reading the SSE Stream + +The `/translate/stream` endpoint we built in Chapter 3 is a POST endpoint. Since the browser's `EventSource` only supports GET, we'll read SSE using `fetch()` + `ReadableStream`. The basic pattern is: + +1. Send a POST request with `fetch()` +2. Get a stream with `res.body.getReader()` +3. Process lines starting with `data:` as we read chunks + +Chunks can be split in the middle of an SSE line, so we need to buffer them and process line by line. + +### Auto-translation with Debounce + +Instead of a translate button, we trigger translation automatically on text input or language change. We add a 300ms debounce to prevent requests from firing on every keystroke. + +To cancel the previous translation while typing, we use `AbortController`. When new input arrives, `abort()` cancels the previous `fetch` and starts a new translation. Since we need to pass a cancellation `signal` to `fetch`, the SSE reading is written inline. + +```js +const inputText = document.getElementById("input-text"); +const outputText = document.getElementById("output-text"); +const targetLang = document.getElementById("target-lang"); + +let debounceTimer = null; +let abortController = null; + +async function translate() { + const text = inputText.value.trim(); + if (!text) { + outputText.textContent = ""; + return; + } + + // Cancel any in-progress translation + if (abortController) abortController.abort(); + abortController = new AbortController(); + const { signal } = abortController; + + outputText.textContent = ""; + document.body.classList.add("busy"); + + try { + const res = await fetch("/translate/stream", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ text, target_lang: targetLang.value }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const parsed = JSON.parse(data); + if (parsed && parsed.error) { + outputText.textContent = "Error: " + parsed.error; + return; + } + outputText.textContent += parsed; + } + } + } + } catch (e) { + if (e.name === "AbortError") return; // Cancelled by new input + outputText.textContent = "Error: " + e.message; + } finally { + document.body.classList.remove("busy"); + } +} + +function scheduleTranslation() { + clearTimeout(debounceTimer); + debounceTimer = setTimeout(translate, 300); +} + +inputText.addEventListener("input", scheduleTranslation); +targetLang.addEventListener("change", scheduleTranslation); +``` + +We use `fetch` directly because we need to pass the `AbortController`'s `signal`. Since the server can return errors as JSON objects (from the `try/catch` we added in Chapter 3), we also check for `parsed.error`. + +Reload the browser and try typing some text. After 300ms, tokens should appear one by one. If you change the input, the previous translation is cancelled and a new one begins. + +## 5.5 Connecting Model Selection + +### Loading the Model List + +When the page loads, we call `GET /models` to initialize the dropdown. + +```js +const modelSelect = document.getElementById("model-select"); + +// Fetch model list from `GET /models` and build the dropdown +async function loadModels() { + const res = await fetch("/models"); + const { models } = await res.json(); + + modelSelect.innerHTML = ""; // Clear existing options + for (const m of models) { + const opt = document.createElement("option"); + opt.value = m.name; + // Mark undownloaded models with a ⬇ icon to distinguish them + opt.textContent = m.downloaded + ? `${m.name} (${m.params})` + : `${m.name} (${m.params}) ⬇`; + opt.selected = m.selected; // Select the current model using the `selected` flag from the server + modelSelect.appendChild(opt); + } +} + +loadModels(); // Run on page load +``` + +Undownloaded models are marked with a `⬇` icon to distinguish them. + +### Switching Models + +Changing the dropdown calls `POST /models/select`. If a download is needed, a `` with a progress bar appears. The cancel button can abort the download. + +As with translation, we use `AbortController`. Clicking the cancel button calls `abort()` to disconnect. The server detects the disconnection and aborts the download (thanks to `download_model` returning `sink.os.good()` from Chapter 4). + +```js +const dialog = document.getElementById("download-dialog"); +const progressBar = document.getElementById("download-progress"); +const downloadStatus = document.getElementById("download-status"); +const downloadCancel = document.getElementById("download-cancel"); + +let modelAbort = null; + +downloadCancel.addEventListener("click", () => { + if (modelAbort) modelAbort.abort(); +}); + +modelSelect.addEventListener("change", async () => { + const name = modelSelect.value; + document.body.classList.add("busy"); + + modelAbort = new AbortController(); + const { signal } = modelAbort; + + try { + const res = await fetch("/models/select", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ model: name }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const event = JSON.parse(data); + + switch (event.status) { + case "downloading": + if (!dialog.open) dialog.showModal(); // Show the modal + progressBar.value = event.progress; // Update the progress bar + downloadStatus.textContent = `${event.progress}%`; + break; + case "loading": + // Removing the `value` attribute puts `` into animated (indeterminate) state + progressBar.removeAttribute("value"); + downloadStatus.textContent = "Loading model..."; + break; + case "ready": + if (dialog.open) dialog.close(); + break; + case "error": + if (dialog.open) dialog.close(); + alert("Download failed: " + event.message); + break; + } + } + } + } + + await loadModels(); // Refresh the list since the `selected` flag changed + scheduleTranslation(); // Re-translate with the new model + } catch (e) { + if (e.name === "AbortError") { + // Cancelled -- revert to the original model + await loadModels(); + } else { + alert("Error: " + e.message); + } + } finally { + document.body.classList.remove("busy"); + if (dialog.open) dialog.close(); + modelAbort = null; + } +}); +``` + +`progressBar.removeAttribute("value")` puts the `` element into an indeterminate (animated) state. We use this while loading the model after the download completes. + +## 5.6 Complete Code + +
+Complete code (index.html) + +```html + + + + + + Translate App + + + + + + +
+ Translate App +
+ + + +
+
+ + +
+ + +
+ + + +

Downloading model...

+ +

+ +
+ + + + +``` + +
+ +
+Complete code (style.css) + +```css +:root { + --gap: 0.5rem; + --color-border: #ccc; + --font: system-ui, sans-serif; +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +html, body { + height: 100%; + font-family: var(--font); +} + +body { + display: flex; + flex-direction: column; + padding: var(--gap); + gap: var(--gap); +} + +/* Header: title + dropdowns */ +header { + display: flex; + align-items: center; + justify-content: space-between; +} + +header div { + display: flex; + gap: var(--gap); +} + +/* Main: two-column layout */ +main { + flex: 1; + display: grid; + grid-template-columns: 1fr 1fr; + gap: var(--gap); + min-height: 0; +} + +#input-text { + resize: none; + padding: 0.75rem; + font-family: var(--font); + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; +} + +textarea:focus, +select:focus { + outline: 1px solid #4a9eff; + outline-offset: -1px; +} + +#output-text { + display: block; + padding: 0.75rem; + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; + white-space: pre-wrap; + overflow-y: auto; +} + +/* Download modal */ +dialog { + border: 1px solid var(--color-border); + border-radius: 8px; + padding: 1.5rem; + max-width: 400px; + width: 90%; + margin: auto; +} + +dialog::backdrop { + background: rgba(0, 0, 0, 0.4); +} + +dialog h3 { + margin-bottom: 0.75rem; +} + +dialog progress { + width: 100%; + height: 1.25rem; +} + +dialog p { + margin-top: 0.5rem; + text-align: center; + color: #666; +} + +dialog button { + display: block; + margin: 0.75rem auto 0; + padding: 0.4rem 1.5rem; + cursor: pointer; +} + +/* Block the entire UI during translation or model switching */ +body.busy { + cursor: wait; +} + +body.busy select, +body.busy textarea { + pointer-events: none; + opacity: 0.6; +} +``` + +
+ +
+Complete code (script.js) + +```js +// --- DOM Elements --- + +const inputText = document.getElementById("input-text"); +const outputText = document.getElementById("output-text"); +const targetLang = document.getElementById("target-lang"); +const modelSelect = document.getElementById("model-select"); +const dialog = document.getElementById("download-dialog"); +const progressBar = document.getElementById("download-progress"); +const downloadStatus = document.getElementById("download-status"); +const downloadCancel = document.getElementById("download-cancel"); + +// --- Model List --- + +// Fetch model list from `GET /models` and build the dropdown +async function loadModels() { + const res = await fetch("/models"); + const { models } = await res.json(); + + modelSelect.innerHTML = ""; // Clear existing options + for (const m of models) { + const opt = document.createElement("option"); + opt.value = m.name; + // Mark undownloaded models with a ⬇ icon to distinguish them + opt.textContent = m.downloaded + ? `${m.name} (${m.params})` + : `${m.name} (${m.params}) ⬇`; + opt.selected = m.selected; // Select the current model using the `selected` flag from the server + modelSelect.appendChild(opt); + } +} + +loadModels(); // Run on page load + +// --- Translation (auto-translation with debounce) --- + +let debounceTimer = null; +let abortController = null; + +async function translate() { + const text = inputText.value.trim(); + if (!text) { + outputText.textContent = ""; + return; + } + + // Cancel any in-progress translation + if (abortController) abortController.abort(); + abortController = new AbortController(); + const { signal } = abortController; + + outputText.textContent = ""; + document.body.classList.add("busy"); + + try { + const res = await fetch("/translate/stream", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ text, target_lang: targetLang.value }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const parsed = JSON.parse(data); + if (parsed && parsed.error) { + outputText.textContent = "Error: " + parsed.error; + return; + } + outputText.textContent += parsed; + } + } + } + } catch (e) { + if (e.name === "AbortError") return; // Cancelled by new input + outputText.textContent = "Error: " + e.message; + } finally { + document.body.classList.remove("busy"); + } +} + +function scheduleTranslation() { + clearTimeout(debounceTimer); + debounceTimer = setTimeout(translate, 300); +} + +inputText.addEventListener("input", scheduleTranslation); +targetLang.addEventListener("change", scheduleTranslation); + +// --- Model Selection --- + +let modelAbort = null; + +downloadCancel.addEventListener("click", () => { + if (modelAbort) modelAbort.abort(); +}); + +modelSelect.addEventListener("change", async () => { + const name = modelSelect.value; + document.body.classList.add("busy"); + + modelAbort = new AbortController(); + const { signal } = modelAbort; + + try { + const res = await fetch("/models/select", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ model: name }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const event = JSON.parse(data); + + switch (event.status) { + case "downloading": + if (!dialog.open) dialog.showModal(); + progressBar.value = event.progress; + downloadStatus.textContent = `${event.progress}%`; + break; + case "loading": + progressBar.removeAttribute("value"); + downloadStatus.textContent = "Loading model..."; + break; + case "ready": + if (dialog.open) dialog.close(); + break; + case "error": + if (dialog.open) dialog.close(); + alert("Download failed: " + event.message); + break; + } + } + } + } + + await loadModels(); + scheduleTranslation(); // Re-translate with the new model + } catch (e) { + if (e.name === "AbortError") { + // Cancelled -- revert to the original model + await loadModels(); + } else { + alert("Error: " + e.message); + } + } finally { + document.body.classList.remove("busy"); + if (dialog.open) dialog.close(); + modelAbort = null; + } +}); +``` + +
+ +
+Complete code (main.cpp) + +The only server-side change is the single `set_mount_point` line. Add it before `svr.listen()` in the complete code from Chapter 4. + +```cpp +#include +#include +#include + +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +// ------------------------------------------------------------------------- +// Model definitions +// ------------------------------------------------------------------------- + +struct ModelInfo { + std::string name; + std::string params; + std::string size; + std::string repo; + std::string filename; +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; + +// ------------------------------------------------------------------------- +// Model storage directory +// ------------------------------------------------------------------------- + +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} + +// ------------------------------------------------------------------------- +// Model download +// ------------------------------------------------------------------------- + +// Abort the download if progress_cb returns false +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); // Hugging Face redirects to CDN + cli.set_read_timeout(std::chrono::hours(1)); // Long timeout for large models + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + // content_receiver: receive chunks and write to file + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + // progress: report download progress (return false to abort) + [&, last_pct = -1](size_t current, size_t total) mutable { + int pct = total ? (int)(current * 100 / total) : 0; + if (pct == last_pct) return true; // Skip if same value + last_pct = pct; + return progress_cb(pct); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // Rename after download completes + std::filesystem::rename(tmp_path, path); + return true; +} + +// ------------------------------------------------------------------------- +// Server +// ------------------------------------------------------------------------- + +httplib::Server svr; + +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // Create model storage directory + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + // Auto-download default model if not present + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + + // LLM inference takes time, so set a longer timeout (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // --- Translation endpoint (Chapter 2) ------------------------------------ + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // --- SSE streaming translation (Chapter 3) -------------------------------- + + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // Abort inference on disconnect + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // --- Model list (Chapter 4) ----------------------------------------------- + + svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto models_dir = get_models_dir(); + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = models_dir / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); + }); + + // --- Model selection (Chapter 4) ------------------------------------------ + + svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // Always respond with SSE (same format whether downloaded or not) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSE event send helper + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // Download if not yet downloaded (report progress via SSE) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // Abort download on client disconnect + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // Load and switch to the model + send({{"status", "loading"}}); + llm = llamalib::Llama{path}; + selected_model = model.filename; + + send({{"status", "ready"}}); + sink.done(); + return true; + }); + }); + + // --- Static file serving (Chapter 5) -------------------------------------- + + svr.set_mount_point("/", "./public"); + + // Allow graceful shutdown via `Ctrl+C` (`SIGINT`) or `kill` (`SIGTERM`) + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +
+ +## 5.7 Testing + +Rebuild and start the server. + +```bash +cmake --build build -j +./build/translate-server +``` + +Open `http://127.0.0.1:8080` in your browser. + +1. Type some text -- after 300ms, tokens appear incrementally +2. Change the input -- the previous translation is cancelled and a new one starts +3. Change the language dropdown -- automatic re-translation +4. Change the model dropdown -- switches immediately if already downloaded +5. Select an undownloaded model -- a progress bar appears, and Cancel can abort it + +Everything we did with curl in Chapter 4 can now be done from the browser. + +## Next Chapter + +The server and Web UI are complete. In the next chapter, we'll wrap this app with webview/webview to make it a desktop application that runs without a browser. We'll embed the static files into the binary so the distributable is a single executable. + +**Next:** [Turning It into a Desktop App with WebView](../ch06-desktop-app) diff --git a/docs-src/pages/en/llm-app/ch06-desktop-app.md b/docs-src/pages/en/llm-app/ch06-desktop-app.md new file mode 100644 index 0000000..304382d --- /dev/null +++ b/docs-src/pages/en/llm-app/ch06-desktop-app.md @@ -0,0 +1,724 @@ +--- +title: "6. Turning It into a Desktop App with WebView" +order: 6 + +--- + +In Chapter 5, we completed a translation app you can use from a browser. But every time, you have to start the server, open the URL in a browser... Wouldn't it be nice to just double-click and start using it, like a normal app? + +In this chapter, we'll do two things: + +1. **WebView integration** — Use [webview/webview](https://github.com/webview/webview) to turn it into a desktop app that runs without a browser +2. **Single binary packaging** — Use [cpp-embedlib](https://github.com/yhirose/cpp-embedlib) to embed HTML/CSS/JS into the binary, making the distributable a single file + +When finished, you'll be able to just run `./translate-app` to open a window and start translating. + +![Desktop App](../app.png#large-center) + +The model downloads automatically on first launch, so the only thing you need to give users is the single binary. + +## 6.1 Introducing webview/webview + +[webview/webview](https://github.com/webview/webview) is a library that lets you use the OS's native WebView component (WKWebView on macOS, WebKitGTK on Linux, WebView2 on Windows) from C/C++. Unlike Electron, it doesn't bundle its own browser, so the impact on binary size is negligible. + +We'll fetch it with CMake. Add the following to your `CMakeLists.txt`: + +```cmake +# webview/webview +FetchContent_Declare(webview + GIT_REPOSITORY https://github.com/webview/webview + GIT_TAG master +) +FetchContent_MakeAvailable(webview) +``` + +This makes the `webview::core` CMake target available. When you link it with `target_link_libraries`, it automatically sets up include paths and platform-specific frameworks. + +> **macOS**: No additional dependencies are needed. WKWebView is built into the system. +> +> **Linux**: WebKitGTK is required. Install it with `sudo apt install libwebkit2gtk-4.1-dev`. +> +> **Windows**: The WebView2 runtime is required. It comes pre-installed on Windows 11. For Windows 10, download it from the [official Microsoft website](https://developer.microsoft.com/en-us/microsoft-edge/webview2/). + +## 6.2 Running the Server on a Background Thread + +Up through Chapter 5, the server's `listen()` was blocking the main thread. To use WebView, we need to run the server on a separate thread and run the WebView event loop on the main thread. + +```cpp +#include "webview/webview.h" +#include + +int main() { + // ... (server setup is the same as Chapter 5) ... + + // Start the server on a background thread + auto port = svr.bind_to_any_port("127.0.0.1"); + std::thread server_thread([&]() { svr.listen_after_bind(); }); + + std::cout << "Listening on http://127.0.0.1:" << port << std::endl; + + // Display the UI with WebView + webview::webview w(false, nullptr); + w.set_title("Translate App"); + w.set_size(1024, 768, WEBVIEW_HINT_NONE); + w.navigate("http://127.0.0.1:" + std::to_string(port)); + w.run(); // Block until the window is closed + + // Stop the server when the window is closed + svr.stop(); + server_thread.join(); +} +``` + +Let's look at the key points: + +- **`bind_to_any_port`** — Instead of `listen("127.0.0.1", 8080)`, we let the OS choose an available port. Since desktop apps can be launched multiple times, using a fixed port would cause conflicts +- **`listen_after_bind`** — Starts accepting requests on the port reserved by `bind_to_any_port`. While `listen()` does bind and listen in one call, we need to know the port number first, so we split the operations +- **Shutdown order** — When the WebView window is closed, we stop the server with `svr.stop()` and wait for the thread to finish with `server_thread.join()`. If we reversed the order, WebView would lose access to the server + +The `signal_handler` from Chapter 5 is no longer needed. In a desktop app, closing the window means terminating the application. + +## 6.3 Embedding Static Files with cpp-embedlib + +In Chapter 5, we served files from the `public/` directory, so you'd need to distribute `public/` alongside the binary. With [cpp-embedlib](https://github.com/yhirose/cpp-embedlib), you can embed HTML, CSS, and JavaScript into the binary, packaging the distributable into a single file. + +### CMakeLists.txt + +Fetch cpp-embedlib and embed `public/`: + +```cmake +# cpp-embedlib +FetchContent_Declare(cpp-embedlib + GIT_REPOSITORY https://github.com/yhirose/cpp-embedlib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp-embedlib) + +# Embed the public/ directory into the binary +cpp_embedlib_add(WebAssets + FOLDER ${CMAKE_CURRENT_SOURCE_DIR}/public + NAMESPACE Web +) + +target_link_libraries(translate-app PRIVATE + WebAssets # Embedded files + cpp-embedlib-httplib # cpp-httplib integration +) +``` + +`cpp_embedlib_add` converts the files under `public/` into binary data at compile time and creates a static library called `WebAssets`. When linked, you can access the embedded files through a `Web::FS` object. `cpp-embedlib-httplib` is a helper library that provides the `httplib::mount()` function. + +### Replacing set_mount_point with httplib::mount + +Simply replace Chapter 5's `set_mount_point` with cpp-embedlib's `httplib::mount`: + +```cpp +#include +#include "WebAssets.h" + +// Chapter 5: +// svr.set_mount_point("/", "./public"); + +// Chapter 6: +httplib::mount(svr, Web::FS); +``` + +`httplib::mount` registers handlers that serve the files embedded in `Web::FS` over HTTP. MIME types are automatically determined from file extensions, so there's no need to manually set `Content-Type`. + +The file contents are directly mapped to the binary's data segment, so no memory copies or heap allocations occur. + +## 6.4 macOS: Adding the Edit Menu + +If you try to paste text into the input field with `Cmd+V`, you'll find it doesn't work. On macOS, keyboard shortcuts like `Cmd+V` (paste) and `Cmd+C` (copy) are routed through the application's menu bar. Since webview/webview doesn't create one, these shortcuts never reach the WebView. We need to add a macOS Edit menu using the Objective-C runtime: + +```cpp +#ifdef __APPLE__ +#include + +void setup_macos_edit_menu() { + auto cls = [](const char *n) { return (id)objc_getClass(n); }; + auto sel = sel_registerName; + auto msg = reinterpret_cast(objc_msgSend); + auto msg_s = reinterpret_cast(objc_msgSend); + auto msg_id = reinterpret_cast(objc_msgSend); + auto msg_v = reinterpret_cast(objc_msgSend); + auto msg_mi = reinterpret_cast(objc_msgSend); + + auto str = [&](const char *s) { + return msg_s(cls("NSString"), sel("stringWithUTF8String:"), s); + }; + + id app = msg(cls("NSApplication"), sel("sharedApplication")); + id mainMenu = msg(msg(cls("NSMenu"), sel("alloc")), sel("init")); + id editItem = msg(msg(cls("NSMenuItem"), sel("alloc")), sel("init")); + id editMenu = msg_id(msg(cls("NSMenu"), sel("alloc")), + sel("initWithTitle:"), str("Edit")); + + struct { const char *title; const char *action; const char *key; } items[] = { + {"Undo", "undo:", "z"}, + {"Redo", "redo:", "Z"}, + {"Cut", "cut:", "x"}, + {"Copy", "copy:", "c"}, + {"Paste", "paste:", "v"}, + {"Select All", "selectAll:", "a"}, + }; + + for (auto &[title, action, key] : items) { + id mi = msg_mi(msg(cls("NSMenuItem"), sel("alloc")), + sel("initWithTitle:action:keyEquivalent:"), + str(title), sel(action), str(key)); + msg_v(editMenu, sel("addItem:"), mi); + } + + msg_v(editItem, sel("setSubmenu:"), editMenu); + msg_v(mainMenu, sel("addItem:"), editItem); + msg_v(app, sel("setMainMenu:"), mainMenu); +} +#endif +``` + +Call this before `w.run()`: + +```cpp +#ifdef __APPLE__ + setup_macos_edit_menu(); +#endif + w.run(); +``` + +On Windows and Linux, keyboard shortcuts are delivered directly to the focused control without going through the menu bar, so this workaround is macOS-specific. + +## 6.5 Complete Code + +
+Complete code (CMakeLists.txt) + +```cmake +cmake_minimum_required(VERSION 3.20) +project(translate-app CXX) +set(CMAKE_CXX_STANDARD 20) + +include(FetchContent) + +# llama.cpp +FetchContent_Declare(llama + GIT_REPOSITORY https://github.com/ggml-org/llama.cpp + GIT_TAG master + GIT_SHALLOW TRUE +) +FetchContent_MakeAvailable(llama) + +# cpp-httplib +FetchContent_Declare(httplib + GIT_REPOSITORY https://github.com/yhirose/cpp-httplib + GIT_TAG master +) +FetchContent_MakeAvailable(httplib) + +# nlohmann/json +FetchContent_Declare(json + URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz +) +FetchContent_MakeAvailable(json) + +# cpp-llamalib +FetchContent_Declare(cpp_llamalib + GIT_REPOSITORY https://github.com/yhirose/cpp-llamalib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp_llamalib) + +# webview/webview +FetchContent_Declare(webview + GIT_REPOSITORY https://github.com/webview/webview + GIT_TAG master +) +FetchContent_MakeAvailable(webview) + +# cpp-embedlib +FetchContent_Declare(cpp-embedlib + GIT_REPOSITORY https://github.com/yhirose/cpp-embedlib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp-embedlib) + +# Embed the public/ directory into the binary +cpp_embedlib_add(WebAssets + FOLDER ${CMAKE_CURRENT_SOURCE_DIR}/public + NAMESPACE Web +) + +find_package(OpenSSL REQUIRED) + +add_executable(translate-app src/main.cpp) + +target_link_libraries(translate-app PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + cpp-llamalib + OpenSSL::SSL OpenSSL::Crypto + WebAssets + cpp-embedlib-httplib + webview::core +) + +if(APPLE) + target_link_libraries(translate-app PRIVATE + "-framework CoreFoundation" + "-framework Security" + ) +endif() + +target_compile_definitions(translate-app PRIVATE + CPPHTTPLIB_OPENSSL_SUPPORT +) +``` + +
+ +
+Complete code (main.cpp) + +```cpp +#include +#include +#include +#include +#include "WebAssets.h" +#include "webview/webview.h" + +#ifdef __APPLE__ +#include +#endif + +#include +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +// ------------------------------------------------------------------------- +// macOS Edit menu (Cmd+C/V/X/A require an Edit menu on macOS) +// ------------------------------------------------------------------------- + +#ifdef __APPLE__ +void setup_macos_edit_menu() { + auto cls = [](const char *n) { return (id)objc_getClass(n); }; + auto sel = sel_registerName; + auto msg = reinterpret_cast(objc_msgSend); + auto msg_s = reinterpret_cast(objc_msgSend); + auto msg_id = reinterpret_cast(objc_msgSend); + auto msg_v = reinterpret_cast(objc_msgSend); + auto msg_mi = reinterpret_cast(objc_msgSend); + + auto str = [&](const char *s) { + return msg_s(cls("NSString"), sel("stringWithUTF8String:"), s); + }; + + id app = msg(cls("NSApplication"), sel("sharedApplication")); + id mainMenu = msg(msg(cls("NSMenu"), sel("alloc")), sel("init")); + id editItem = msg(msg(cls("NSMenuItem"), sel("alloc")), sel("init")); + id editMenu = msg_id(msg(cls("NSMenu"), sel("alloc")), + sel("initWithTitle:"), str("Edit")); + + struct { const char *title; const char *action; const char *key; } items[] = { + {"Undo", "undo:", "z"}, + {"Redo", "redo:", "Z"}, + {"Cut", "cut:", "x"}, + {"Copy", "copy:", "c"}, + {"Paste", "paste:", "v"}, + {"Select All", "selectAll:", "a"}, + }; + + for (auto &[title, action, key] : items) { + id mi = msg_mi(msg(cls("NSMenuItem"), sel("alloc")), + sel("initWithTitle:action:keyEquivalent:"), + str(title), sel(action), str(key)); + msg_v(editMenu, sel("addItem:"), mi); + } + + msg_v(editItem, sel("setSubmenu:"), editMenu); + msg_v(mainMenu, sel("addItem:"), editItem); + msg_v(app, sel("setMainMenu:"), mainMenu); +} +#endif + +// ------------------------------------------------------------------------- +// Model definitions +// ------------------------------------------------------------------------- + +struct ModelInfo { + std::string name; + std::string params; + std::string size; + std::string repo; + std::string filename; +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; + +// ------------------------------------------------------------------------- +// Model storage directory +// ------------------------------------------------------------------------- + +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} + +// ------------------------------------------------------------------------- +// Model download +// ------------------------------------------------------------------------- + +// Abort the download if progress_cb returns false +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); // Hugging Face redirects to a CDN + cli.set_read_timeout(std::chrono::hours(1)); // Long timeout for large models + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + // content_receiver: Receive data chunk by chunk and write to file + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + // progress: Report download progress (return false to abort) + [&, last_pct = -1](size_t current, size_t total) mutable { + int pct = total ? (int)(current * 100 / total) : 0; + if (pct == last_pct) return true; // Skip if the value hasn't changed + last_pct = pct; + return progress_cb(pct); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // Rename after download completes + std::filesystem::rename(tmp_path, path); + return true; +} + +// ------------------------------------------------------------------------- +// Server +// ------------------------------------------------------------------------- + +int main() { + httplib::Server svr; + // Create the model storage directory + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + // Auto-download the default model if not already present + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + std::mutex llm_mutex; // Protect access during model switching + + // Set a long timeout since LLM inference takes time (default is 5 seconds) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // --- Translation endpoint (Chapter 2) ------------------------------------ + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + std::lock_guard lock(llm_mutex); + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // --- SSE streaming translation (Chapter 3) ------------------------------- + + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + std::lock_guard lock(llm_mutex); + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // Abort inference on disconnect + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // --- Model list (Chapter 4) ---------------------------------------------- + + svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto models_dir = get_models_dir(); + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = models_dir / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); + }); + + // --- Model selection (Chapter 4) ----------------------------------------- + + svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // Always respond with SSE (same format whether downloaded or not) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSE event sending helper + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // Download if not yet downloaded (report progress via SSE) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // Abort download on client disconnect + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // Load and switch to the model + send({{"status", "loading"}}); + { + std::lock_guard lock(llm_mutex); + llm = llamalib::Llama{path}; + selected_model = model.filename; + } + + send({{"status", "ready"}}); + sink.done(); + return true; + }); + }); + + // --- Embedded file serving (Chapter 6) ------------------------------------ + // Chapter 5: svr.set_mount_point("/", "./public"); + httplib::mount(svr, Web::FS); + + // Start the server on a background thread + auto port = svr.bind_to_any_port("127.0.0.1"); + std::thread server_thread([&]() { svr.listen_after_bind(); }); + + std::cout << "Listening on http://127.0.0.1:" << port << std::endl; + + // Display the UI with WebView + webview::webview w(false, nullptr); + w.set_title("Translate App"); + w.set_size(1024, 768, WEBVIEW_HINT_NONE); + w.navigate("http://127.0.0.1:" + std::to_string(port)); + +#ifdef __APPLE__ + setup_macos_edit_menu(); +#endif + w.run(); // Block until the window is closed + + // Stop the server when the window is closed + svr.stop(); + server_thread.join(); +} +``` + +
+ +To summarize the changes from Chapter 5: + +- `#include ` replaced with `#include `, ``, `"WebAssets.h"`, `"webview/webview.h"` +- Removed the `signal_handler` function +- `svr.set_mount_point("/", "./public")` replaced with `httplib::mount(svr, Web::FS)` +- `svr.listen("127.0.0.1", 8080)` replaced with `bind_to_any_port` + `listen_after_bind` + WebView event loop + +Not a single line of handler code has changed. The REST API, SSE streaming, and model management built through Chapter 5 all work as-is. + +## 6.6 Building and Testing + +```bash +cmake -B build +cmake --build build -j +``` + +Launch the app: + +```bash +./build/translate-app +``` + +No browser is needed. A window opens automatically. The same UI from Chapter 5 appears as-is, and translation and model switching all work just the same. + +When you close the window, the server shuts down automatically. There's no need for `Ctrl+C`. + +### What Needs to Be Distributed + +You only need to distribute: + +- The single `translate-app` binary + +That's it. You don't need the `public/` directory. HTML, CSS, and JavaScript are embedded in the binary. Model files download automatically on first launch, so there's no need to ask users to prepare anything in advance. + +## Next Chapter + +Congratulations! 🎉 + +In Chapter 1, `/health` just returned `{"status":"ok"}`. Now we have a desktop app where you type text and translations stream in real time, pick a different model from a dropdown and it downloads automatically, and closing the window cleanly shuts everything down — all in a single distributable binary. + +What we changed in this chapter was just the static file serving and the server startup. Not a single line of handler code changed. The REST API, SSE streaming, and model management we built through Chapter 5 all work as a desktop app, as-is. + +In the next chapter, we'll shift perspective and read through the code of llama.cpp's own `llama-server`. Let's compare our simple server with a production-quality one and see what design decisions differ and why. + +**Next:** [Reading the llama.cpp Server Source Code](../ch07-code-reading) diff --git a/docs-src/pages/en/llm-app/ch07-code-reading.md b/docs-src/pages/en/llm-app/ch07-code-reading.md new file mode 100644 index 0000000..a28a723 --- /dev/null +++ b/docs-src/pages/en/llm-app/ch07-code-reading.md @@ -0,0 +1,154 @@ +--- +title: "7. Reading the llama.cpp Server Source Code" +order: 7 + +--- + +Over the course of six chapters, we built a translation desktop app from scratch. We have a working product, but it's ultimately a "learning-oriented" implementation. So how does "production-quality" code differ? Let's read the source code of `llama-server`, the official server bundled with llama.cpp, and compare. + +`llama-server` is located at `llama.cpp/tools/server/`. It uses the same cpp-httplib, so you can read the code the same way as in the previous chapters. + +## 7.1 Source Code Location + +```ascii +llama.cpp/tools/server/ +├── server.cpp # Main server implementation +├── httplib.h # cpp-httplib (bundled version) +└── ... +``` + +The code is contained in a single `server.cpp`. It runs to several thousand lines, but once you understand the structure, you can narrow down the parts worth reading. + +## 7.2 OpenAI-Compatible API + +The biggest difference between the server we built and `llama-server` is the API design. + +**Our API:** + +```text +POST /translate → {"translation": "..."} +POST /translate/stream → SSE: data: "token" +``` + +**llama-server's API:** + +```text +POST /v1/chat/completions → OpenAI-compatible JSON +POST /v1/completions → OpenAI-compatible JSON +POST /v1/embeddings → Text embedding vectors +``` + +`llama-server` conforms to [OpenAI's API specification](https://platform.openai.com/docs/api-reference). This means OpenAI's official client libraries (such as the Python `openai` package) work out of the box. + +```python +# Example of connecting to llama-server with the OpenAI client +from openai import OpenAI +client = OpenAI(base_url="http://localhost:8080/v1", api_key="dummy") + +response = client.chat.completions.create( + model="local-model", + messages=[{"role": "user", "content": "Hello!"}] +) +``` + +Compatibility with existing tools and libraries is a big design decision. We designed a simple translation-specific API, but if you're building a general-purpose server, OpenAI compatibility has become the de facto standard. + +## 7.3 Concurrent Request Handling + +Our server processes requests one at a time. If another request arrives while a translation is in progress, it waits until the previous inference finishes. This is fine for a desktop app used by one person, but it becomes a problem for a server shared by multiple users. + +`llama-server` handles concurrent requests through a mechanism called **slots**. + +![llama-server's slot management](../slots.svg#half) + +The key point is that tokens from each slot are not inferred **one by one in sequence**, but rather **all at once in a single batch**. GPUs excel at parallel processing, so processing two users simultaneously takes almost the same time as processing one. This is called "continuous batching." + +In our server, cpp-httplib's thread pool assigns one thread per request, but the inference itself runs single-threaded inside `llm.chat()`. `llama-server` consolidates this inference step into a shared batch processing loop. + +## 7.4 Differences in SSE Format + +The streaming mechanism itself is the same (`set_chunked_content_provider` + SSE), but the data format differs. + +**Our format:** + +```text +data: "去年の" +data: "春に" +data: [DONE] +``` + +**llama-server (OpenAI-compatible):** + +```text +data: {"id":"chatcmpl-xxx","object":"chat.completion.chunk","choices":[{"delta":{"content":"去年の"}}]} +data: {"id":"chatcmpl-xxx","object":"chat.completion.chunk","choices":[{"delta":{"content":"春に"}}]} +data: [DONE] +``` + +Our format simply sends the tokens. Because `llama-server` follows the OpenAI specification, even a single token comes wrapped in JSON. It may look verbose, but it includes useful information for clients, like an `id` to identify the request and a `finish_reason` to indicate why generation stopped. + +## 7.5 KV Cache Reuse + +In our server, we process the entire prompt from scratch on every request. Our translation app's prompt is short ("Translate the following text to ja..." + input text), so this isn't a problem. + +`llama-server` reuses the KV cache for the prefix portion when a request shares a common prompt prefix with a previous request. + +![KV cache reuse](../kv-cache.svg#half) + +For chatbots that send a long system prompt and few-shot examples with every request, this alone dramatically reduces response time. The difference is night and day: processing several thousand tokens of system prompt every time versus reading them from cache in an instant. + +For our translation app, where the system prompt is just a single sentence, the benefit is limited. However, it's an optimization worth keeping in mind when applying this to your own applications. + +## 7.6 Structured Output + +Since our translation API returns plain text, there was no need to constrain the output format. But what if you want the LLM to respond in JSON? + +```text +Prompt: Analyze the sentiment of the following text and return it as JSON. +LLM output (expected): {"sentiment": "positive", "score": 0.8} +LLM output (reality): Here are the results of the sentiment analysis. {"sentiment": ... +``` + +LLMs sometimes ignore instructions and add extraneous text. `llama-server` solves this problem with **grammar constraints**. + +```bash +curl http://localhost:8080/v1/chat/completions \ + -d '{ + "messages": [{"role": "user", "content": "Analyze sentiment..."}], + "json_schema": { + "type": "object", + "properties": { + "sentiment": {"type": "string", "enum": ["positive", "negative", "neutral"]}, + "score": {"type": "number"} + }, + "required": ["sentiment", "score"] + } + }' +``` + +When you specify `json_schema`, tokens that don't conform to the grammar are excluded during token generation. This guarantees that the output is always valid JSON, so there's no need to worry about `json::parse` failing. + +When embedding LLMs into applications, whether you can reliably parse the output directly impacts reliability. Grammar constraints are unnecessary for free-text output like translation, but they're essential for use cases where you need to return structured data as an API response. + +## 7.7 Summary + +Let's organize the differences we've covered. + +| Aspect | Our Server | llama-server | +|------|-------------|--------------| +| API design | Translation-specific | OpenAI-compatible | +| Concurrent requests | Sequential processing | Slots + continuous batching | +| SSE format | Tokens only | OpenAI-compatible JSON | +| KV cache | Cleared each time | Prefix reuse | +| Structured output | None | JSON Schema / grammar constraints | +| Code size | ~200 lines | Several thousand lines | + +Our code is simple because of the assumption that "one person uses it as a desktop app." If you're building a server for multiple users or one that integrates with the existing ecosystem, `llama-server`'s design serves as a valuable reference. + +Conversely, even 200 lines of code is enough to make a fully functional translation app. I hope this code reading exercise has also conveyed the value of "building only what you need." + +## Next Chapter + +In the next chapter, we'll cover the key points for swapping in your own library and customizing the app to make it truly yours. + +**Next:** [Making It Your Own](../ch08-customization) diff --git a/docs-src/pages/en/llm-app/ch08-customization.md b/docs-src/pages/en/llm-app/ch08-customization.md new file mode 100644 index 0000000..7154ce5 --- /dev/null +++ b/docs-src/pages/en/llm-app/ch08-customization.md @@ -0,0 +1,120 @@ +--- +title: "8. Making It Your Own" +order: 8 + +--- + +Through Chapter 7, we've built a translation desktop app and studied how production-quality code differs. In this chapter, let's go over the key points for **turning this app into something entirely your own**. + +The translation app was just a vehicle. Replace llama.cpp with your own library, and the same architecture works for any application. + +## 8.1 Swapping Out the Build Configuration + +First, replace the llama.cpp-related `FetchContent` entries in `CMakeLists.txt` with your own library. + +```cmake +# Remove: llama.cpp and cpp-llamalib FetchContent + +# Add: your own library +FetchContent_Declare(my_lib + GIT_REPOSITORY https://github.com/yourname/my-lib + GIT_TAG main +) +FetchContent_MakeAvailable(my_lib) + +target_link_libraries(my-app PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + my_lib # Your library instead of cpp-llamalib + # ... +) +``` + +If your library doesn't support CMake, you can place the header and source files directly in `src/` and add them to `add_executable`. Keep cpp-httplib, nlohmann/json, and webview as they are. + +## 8.2 Adapting the API to Your Task + +Change the translation API's endpoints and parameters to match your task. + +| Translation app | Your app (e.g., image processing) | +|---|---| +| `POST /translate` | `POST /process` | +| `{"text": "...", "target_lang": "ja"}` | `{"image": "base64...", "filter": "blur"}` | +| `POST /translate/stream` | `POST /process/stream` | +| `GET /models` | `GET /filters` or `GET /presets` | + +Then update each handler's implementation. For example, just replace the `llm.chat()` calls with your own library's API. + +```cpp +// Before: LLM translation +auto translation = llm.chat(prompt); +res.set_content(json{{"translation", translation}}.dump(), "application/json"); + +// After: e.g., an image processing library +auto result = my_lib::process(input_image, options); +res.set_content(json{{"result", result}}.dump(), "application/json"); +``` + +The same goes for SSE streaming. If your library has a function that reports progress via a callback, you can use the exact same pattern from Chapter 3 to send incremental responses. SSE isn't limited to LLMs — it's useful for any time-consuming task: image processing progress, data conversion steps, long-running computations. + +## 8.3 Design Considerations + +### Libraries with Expensive Initialization + +In this book, we load the LLM model at the top of `main()` and keep it in a variable. This is intentional. Loading the model on every request would take several seconds, so we load it once at startup and reuse it. If your library has expensive initialization (loading large data files, acquiring GPU resources, etc.), the same approach works well. + +### Thread Safety + +cpp-httplib processes requests concurrently using a thread pool. In Chapter 4 we protected the `llm` object with a `std::mutex` to prevent crashes during model switching. The same pattern applies when integrating your own library. If your library isn't thread-safe or you need to swap objects at runtime, protect access with a `std::mutex`. + +## 8.4 Customizing the UI + +Edit the three files in `public/`. + +- **`index.html`** — Change the input form layout. Swap ` + + + + + +

Downloading model...

+ +

+ +
+ + + + +``` + +HTMLのポイントです。 + +- FaviconはインラインSVG絵文字なので、画像ファイルは不要です +- ``はモデルダウンロード中の進捗表示に使います。HTML標準の要素で、`showModal()`でモーダルとして表示できます +- ``は翻訳結果の表示用です。意味的に「計算結果の出力」を表す要素です +- 翻訳ボタンはありません。テキストを入力すると自動で翻訳が始まります(5.4節で実装) + +CSSを`public/style.css`に書きます。CSSフレームワークは使わず、素のCSSだけでレイアウトします。 + +```css +:root { + --gap: 0.5rem; + --color-border: #ccc; + --font: system-ui, sans-serif; +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +html, body { + height: 100%; + font-family: var(--font); +} + +body { + display: flex; + flex-direction: column; + padding: var(--gap); + gap: var(--gap); +} + +/* ヘッダー: タイトル + ドロップダウン */ +header { + display: flex; + align-items: center; + justify-content: space-between; +} + +header div { + display: flex; + gap: var(--gap); +} + +/* メイン: 左右2カラム */ +main { + flex: 1; + display: grid; + grid-template-columns: 1fr 1fr; + gap: var(--gap); + min-height: 0; +} + +#input-text { + resize: none; + padding: 0.75rem; + font-family: var(--font); + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; +} + +textarea:focus, +select:focus { + outline: 1px solid #4a9eff; + outline-offset: -1px; +} + +#output-text { + display: block; + padding: 0.75rem; + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; + white-space: pre-wrap; + overflow-y: auto; +} + +/* ダウンロードモーダル */ +dialog { + border: 1px solid var(--color-border); + border-radius: 8px; + padding: 1.5rem; + max-width: 400px; + width: 90%; + margin: auto; +} + +dialog::backdrop { + background: rgba(0, 0, 0, 0.4); +} + +dialog h3 { + margin-bottom: 0.75rem; +} + +dialog progress { + width: 100%; + height: 1.25rem; +} + +dialog p { + margin-top: 0.5rem; + text-align: center; + color: #666; +} + +dialog button { + display: block; + margin: 0.75rem auto 0; + padding: 0.4rem 1.5rem; + cursor: pointer; +} + +/* 翻訳中・モデル切替中にUI全体をブロックする */ +body.busy { + cursor: wait; +} + +body.busy select, +body.busy textarea { + pointer-events: none; + opacity: 0.6; +} +``` + +レイアウトのポイントです。 + +- `body`をFlexboxで縦並びにし、`main`が`flex: 1`で残りの高さを占めます。入力欄と出力欄がウィンドウ下端まで伸びます +- `main`はCSS Gridの`1fr 1fr`で左右2カラムに分割しています +- `--gap`変数で全てのスペーシングを統一しています。ヘッダー上端、ヘッダーとBox間、Box下端が全て同じ幅です +- `body.busy`クラスは、翻訳中やモデル切り替え中にUIをブロックするために使います。JavaScriptから付け外しします + +ブラウザをリロードすると、入力欄と出力欄が横に並んだ画面が表示されるはずです。まだ何も入力しても何も起きませんが、レイアウトは完成です。 + +## 5.4 翻訳機能をつなぐ + +いよいよJavaScriptでサーバーのAPIを呼び出します。`public/script.js`を作ります。 + +### SSEストリームの読み方 + +3章で作った`/translate/stream`はPOSTエンドポイントです。ブラウザの`EventSource`はGETしか使えないので、`fetch()` + `ReadableStream`でSSEを読みます。基本パターンはこうです。 + +1. `fetch()`でPOSTリクエストを送る +2. `res.body.getReader()`でストリームを取得 +3. チャンクを読みながら`data:`で始まる行を処理する + +チャンクはSSEの行の途中で切れることがあるので、バッファに溜めて行単位で処理する必要があります。 + +### debounce付き自動翻訳 + +翻訳ボタンの代わりに、テキスト入力や言語変更をトリガーにして自動で翻訳を開始します。300msのdebounceを入れて、タイピング中に毎回リクエストが飛ばないようにします。 + +入力中に前の翻訳を中断するため、`AbortController`を使います。新しい入力があると`abort()`で前の`fetch`をキャンセルし、新しい翻訳を開始します。`fetch`にキャンセル用の`signal`を渡す必要があるので、SSEの読み取りはインラインで書いています。 + +```js +const inputText = document.getElementById("input-text"); +const outputText = document.getElementById("output-text"); +const targetLang = document.getElementById("target-lang"); + +let debounceTimer = null; +let abortController = null; + +async function translate() { + const text = inputText.value.trim(); + if (!text) { + outputText.textContent = ""; + return; + } + + // 進行中の翻訳があればキャンセル + if (abortController) abortController.abort(); + abortController = new AbortController(); + const { signal } = abortController; + + outputText.textContent = ""; + document.body.classList.add("busy"); + + try { + const res = await fetch("/translate/stream", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ text, target_lang: targetLang.value }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const parsed = JSON.parse(data); + if (parsed && parsed.error) { + outputText.textContent = "Error: " + parsed.error; + return; + } + outputText.textContent += parsed; + } + } + } + } catch (e) { + if (e.name === "AbortError") return; // 新しい入力でキャンセルされた + outputText.textContent = "Error: " + e.message; + } finally { + document.body.classList.remove("busy"); + } +} + +function scheduleTranslation() { + clearTimeout(debounceTimer); + debounceTimer = setTimeout(translate, 300); +} + +inputText.addEventListener("input", scheduleTranslation); +targetLang.addEventListener("change", scheduleTranslation); +``` + +`AbortController`の`signal`を渡す必要があるため、`fetch`を直接使っています。サーバーからエラーがJSONオブジェクトで返ってくることがあるので(3章で追加した`try/catch`)、`parsed.error`のチェックも入れています。 + +ブラウザをリロードして、テキストを入力してみましょう。300ms後にトークンが1つずつ表示されるはずです。入力を変えると前の翻訳が中断され、新しい翻訳が始まります。 + +## 5.5 モデル選択をつなぐ + +### モデル一覧の読み込み + +ページを開いた時に`GET /models`を呼んで、ドロップダウンを初期化します。 + +```js +const modelSelect = document.getElementById("model-select"); + +// `GET /models`からモデル一覧を取得し、ドロップダウンを構築する +async function loadModels() { + const res = await fetch("/models"); + const { models } = await res.json(); + + modelSelect.innerHTML = ""; // 既存の選択肢をクリア + for (const m of models) { + const opt = document.createElement("option"); + opt.value = m.name; + // 未ダウンロードのモデルには ⬇ マークを付けて区別する + opt.textContent = m.downloaded + ? `${m.name} (${m.params})` + : `${m.name} (${m.params}) ⬇`; + opt.selected = m.selected; // サーバーが返す`selected`フラグで現在のモデルを選択状態に + modelSelect.appendChild(opt); + } +} + +loadModels(); // ページ読み込み時に実行 +``` + +未ダウンロードのモデルには`⬇`マークを付けて区別します。 + +### モデルの切り替え + +ドロップダウンを変更すると`POST /models/select`を呼びます。ダウンロードが必要な場合は``で進捗バーを表示します。キャンセルボタンで中断もできます。 + +翻訳と同様に`AbortController`を使います。キャンセルボタンが押されたら`abort()`で接続を切断します。サーバー側は切断を検知してダウンロードを中断します(4章の`download_model`で`sink.os.good()`を返しているおかげです)。 + +```js +const dialog = document.getElementById("download-dialog"); +const progressBar = document.getElementById("download-progress"); +const downloadStatus = document.getElementById("download-status"); +const downloadCancel = document.getElementById("download-cancel"); + +let modelAbort = null; + +downloadCancel.addEventListener("click", () => { + if (modelAbort) modelAbort.abort(); +}); + +modelSelect.addEventListener("change", async () => { + const name = modelSelect.value; + document.body.classList.add("busy"); + + modelAbort = new AbortController(); + const { signal } = modelAbort; + + try { + const res = await fetch("/models/select", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ model: name }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const event = JSON.parse(data); + + switch (event.status) { + case "downloading": + if (!dialog.open) dialog.showModal(); // モーダルを表示 + progressBar.value = event.progress; // 進捗バーを更新 + downloadStatus.textContent = `${event.progress}%`; + break; + case "loading": + // `value`属性を消すと``がアニメーション(不確定)状態になる + progressBar.removeAttribute("value"); + downloadStatus.textContent = "Loading model..."; + break; + case "ready": + if (dialog.open) dialog.close(); + break; + case "error": + if (dialog.open) dialog.close(); + alert("Download failed: " + event.message); + break; + } + } + } + } + + await loadModels(); // `selected`フラグが変わったので一覧を再取得 + scheduleTranslation(); // 新しいモデルで再翻訳 + } catch (e) { + if (e.name === "AbortError") { + // キャンセルされた — 元のモデルに戻す + await loadModels(); + } else { + alert("Error: " + e.message); + } + } finally { + document.body.classList.remove("busy"); + if (dialog.open) dialog.close(); + modelAbort = null; + } +}); +``` + +`progressBar.removeAttribute("value")`で``をindeterminate(アニメーション)状態にしています。ダウンロード完了後のモデルロード中に使います。 + +## 5.6 全体のコード + +
+全体のコード(index.html) + +```html + + + + + + Translate App + + + + + + +
+ Translate App +
+ + + +
+
+ + +
+ + +
+ + + +

Downloading model...

+ +

+ +
+ + + + +``` + +
+ +
+全体のコード(style.css) + +```css +:root { + --gap: 0.5rem; + --color-border: #ccc; + --font: system-ui, sans-serif; +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +html, body { + height: 100%; + font-family: var(--font); +} + +body { + display: flex; + flex-direction: column; + padding: var(--gap); + gap: var(--gap); +} + +/* ヘッダー: タイトル + ドロップダウン */ +header { + display: flex; + align-items: center; + justify-content: space-between; +} + +header div { + display: flex; + gap: var(--gap); +} + +/* メイン: 左右2カラム */ +main { + flex: 1; + display: grid; + grid-template-columns: 1fr 1fr; + gap: var(--gap); + min-height: 0; +} + +#input-text { + resize: none; + padding: 0.75rem; + font-family: var(--font); + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; +} + +textarea:focus, +select:focus { + outline: 1px solid #4a9eff; + outline-offset: -1px; +} + +#output-text { + display: block; + padding: 0.75rem; + font-size: 1rem; + border: 1px solid var(--color-border); + border-radius: 4px; + white-space: pre-wrap; + overflow-y: auto; +} + +/* ダウンロードモーダル */ +dialog { + border: 1px solid var(--color-border); + border-radius: 8px; + padding: 1.5rem; + max-width: 400px; + width: 90%; + margin: auto; +} + +dialog::backdrop { + background: rgba(0, 0, 0, 0.4); +} + +dialog h3 { + margin-bottom: 0.75rem; +} + +dialog progress { + width: 100%; + height: 1.25rem; +} + +dialog p { + margin-top: 0.5rem; + text-align: center; + color: #666; +} + +dialog button { + display: block; + margin: 0.75rem auto 0; + padding: 0.4rem 1.5rem; + cursor: pointer; +} + +/* 翻訳中・モデル切替中にUI全体をブロックする */ +body.busy { + cursor: wait; +} + +body.busy select, +body.busy textarea { + pointer-events: none; + opacity: 0.6; +} +``` + +
+ +
+全体のコード(script.js) + +```js +// --- DOM要素 --- + +const inputText = document.getElementById("input-text"); +const outputText = document.getElementById("output-text"); +const targetLang = document.getElementById("target-lang"); +const modelSelect = document.getElementById("model-select"); +const dialog = document.getElementById("download-dialog"); +const progressBar = document.getElementById("download-progress"); +const downloadStatus = document.getElementById("download-status"); +const downloadCancel = document.getElementById("download-cancel"); + +// --- モデル一覧 --- + +// `GET /models`からモデル一覧を取得し、ドロップダウンを構築する +async function loadModels() { + const res = await fetch("/models"); + const { models } = await res.json(); + + modelSelect.innerHTML = ""; // 既存の選択肢をクリア + for (const m of models) { + const opt = document.createElement("option"); + opt.value = m.name; + // 未ダウンロードのモデルには ⬇ マークを付けて区別する + opt.textContent = m.downloaded + ? `${m.name} (${m.params})` + : `${m.name} (${m.params}) ⬇`; + opt.selected = m.selected; // サーバーが返す`selected`フラグで現在のモデルを選択状態に + modelSelect.appendChild(opt); + } +} + +loadModels(); // ページ読み込み時に実行 + +// --- 翻訳(debounce付き自動翻訳) --- + +let debounceTimer = null; +let abortController = null; + +async function translate() { + const text = inputText.value.trim(); + if (!text) { + outputText.textContent = ""; + return; + } + + // 進行中の翻訳があればキャンセル + if (abortController) abortController.abort(); + abortController = new AbortController(); + const { signal } = abortController; + + outputText.textContent = ""; + document.body.classList.add("busy"); + + try { + const res = await fetch("/translate/stream", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ text, target_lang: targetLang.value }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const parsed = JSON.parse(data); + if (parsed && parsed.error) { + outputText.textContent = "Error: " + parsed.error; + return; + } + outputText.textContent += parsed; + } + } + } + } catch (e) { + if (e.name === "AbortError") return; // 新しい入力でキャンセルされた + outputText.textContent = "Error: " + e.message; + } finally { + document.body.classList.remove("busy"); + } +} + +function scheduleTranslation() { + clearTimeout(debounceTimer); + debounceTimer = setTimeout(translate, 300); +} + +inputText.addEventListener("input", scheduleTranslation); +targetLang.addEventListener("change", scheduleTranslation); + +// --- モデル選択 --- + +let modelAbort = null; + +downloadCancel.addEventListener("click", () => { + if (modelAbort) modelAbort.abort(); +}); + +modelSelect.addEventListener("change", async () => { + const name = modelSelect.value; + document.body.classList.add("busy"); + + modelAbort = new AbortController(); + const { signal } = modelAbort; + + try { + const res = await fetch("/models/select", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ model: name }), + signal, + }); + + if (!res.ok) { + const err = await res.json(); + throw new Error(err.error || `HTTP ${res.status}`); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop(); + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6); + if (data === "[DONE]") return; + const event = JSON.parse(data); + + switch (event.status) { + case "downloading": + if (!dialog.open) dialog.showModal(); + progressBar.value = event.progress; + downloadStatus.textContent = `${event.progress}%`; + break; + case "loading": + progressBar.removeAttribute("value"); + downloadStatus.textContent = "Loading model..."; + break; + case "ready": + if (dialog.open) dialog.close(); + break; + case "error": + if (dialog.open) dialog.close(); + alert("Download failed: " + event.message); + break; + } + } + } + } + + await loadModels(); + scheduleTranslation(); // 新しいモデルで再翻訳 + } catch (e) { + if (e.name === "AbortError") { + // キャンセルされた — 元のモデルに戻す + await loadModels(); + } else { + alert("Error: " + e.message); + } + } finally { + document.body.classList.remove("busy"); + if (dialog.open) dialog.close(); + modelAbort = null; + } +}); +``` + +
+ +
+全体のコード(main.cpp) + +サーバー側の変更は`set_mount_point`の1行だけです。4章の全体コードの`svr.listen()`の前に追加してください。 + +```cpp +#include +#include +#include + +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +// ------------------------------------------------------------------------- +// モデル定義 +// ------------------------------------------------------------------------- + +struct ModelInfo { + std::string name; + std::string params; + std::string size; + std::string repo; + std::string filename; +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; + +// ------------------------------------------------------------------------- +// モデル保存ディレクトリ +// ------------------------------------------------------------------------- + +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} + +// ------------------------------------------------------------------------- +// モデルダウンロード +// ------------------------------------------------------------------------- + +// progress_cbがfalseを返したらダウンロードを中断する +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); // Hugging FaceはCDNにリダイレクトする + cli.set_read_timeout(std::chrono::hours(1)); // 大きなモデルに備えて長めに + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + // content_receiver: チャンクごとにデータを受け取ってファイルに書き込む + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + // progress: ダウンロード進捗を通知(falseを返すと中断) + [&, last_pct = -1](size_t current, size_t total) mutable { + int pct = total ? (int)(current * 100 / total) : 0; + if (pct == last_pct) return true; // 同じ値なら通知をスキップ + last_pct = pct; + return progress_cb(pct); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // ダウンロード完了後にリネーム + std::filesystem::rename(tmp_path, path); + return true; +} + +// ------------------------------------------------------------------------- +// サーバー +// ------------------------------------------------------------------------- + +httplib::Server svr; + +void signal_handler(int sig) { + if (sig == SIGINT || sig == SIGTERM) { + std::cout << "\nReceived signal, shutting down gracefully...\n"; + svr.stop(); + } +} + +int main() { + // モデル保存ディレクトリを作成 + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + // デフォルトモデルが未ダウンロードなら自動取得 + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + + // LLM推論は時間がかかるのでタイムアウトを長めに設定(デフォルトは5秒) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // --- 翻訳エンドポイント(2章) ----------------------------------------- + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // --- SSEストリーミング翻訳(3章)-------------------------------------- + + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // 切断されたら推論を中断 + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // --- モデル一覧(4章) ------------------------------------------------- + + svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto models_dir = get_models_dir(); + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = models_dir / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); + }); + + // --- モデル選択(4章) ------------------------------------------------- + + svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // 常にSSEで応答する(DL済みでも未DLでも同じ形式) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSEイベント送信ヘルパー + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // 未ダウンロードならダウンロード(進捗をSSEで通知) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // クライアント切断時にダウンロードを中断 + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // モデルをロードして切り替え + send({{"status", "loading"}}); + llm = llamalib::Llama{path}; + selected_model = model.filename; + + send({{"status", "ready"}}); + sink.done(); + return true; + }); + }); + + // --- 静的ファイル配信(5章) ------------------------------------------- + + svr.set_mount_point("/", "./public"); + + // `Ctrl+C` (`SIGINT`)や`kill` (`SIGTERM`)でサーバーを停止できるようにする + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + std::cout << "Listening on http://127.0.0.1:8080" << std::endl; + svr.listen("127.0.0.1", 8080); +} +``` + +
+ +## 5.7 動作確認 + +ビルドし直してサーバーを起動します。 + +```bash +cmake --build build -j +./build/translate-server +``` + +ブラウザで`http://127.0.0.1:8080`を開きます。 + +1. テキストを入力する → 300ms後にトークンが逐次表示される +2. 入力を変更する → 前の翻訳が中断され、新しい翻訳が始まる +3. 言語のドロップダウンを変更する → 自動で再翻訳される +4. モデルのドロップダウンを変更する → ダウンロード済みならすぐ切り替わる +5. 未ダウンロードのモデルを選ぶ → 進捗バーが表示され、Cancelで中断できる + +curlで操作していた4章と同じことが、ブラウザからできるようになりました。 + +## 次の章へ + +サーバーとWeb UIが揃いました。次の章ではこのアプリをwebview/webviewで包んで、ブラウザなしで動くデスクトップアプリにします。静的ファイルをバイナリに埋め込んで、配布物をバイナリ1つにまとめます。 + +**Next:** [WebViewでデスクトップアプリ化する](../ch06-desktop-app) diff --git a/docs-src/pages/ja/llm-app/ch06-desktop-app.md b/docs-src/pages/ja/llm-app/ch06-desktop-app.md new file mode 100644 index 0000000..ebdedd6 --- /dev/null +++ b/docs-src/pages/ja/llm-app/ch06-desktop-app.md @@ -0,0 +1,724 @@ +--- +title: "6. WebViewでデスクトップアプリ化する" +order: 6 + +--- + +5章で、ブラウザから操作できる翻訳アプリが完成しました。でも使うたびに「サーバーを起動して、ブラウザでURLを開いて…」という手順が必要です。普通のアプリのように、ダブルクリックで起動してすぐ使えるようにしたいですよね。 + +この章では2つのことをやります。 + +1. **WebView化** — [webview/webview](https://github.com/webview/webview)でブラウザなしで動くデスクトップアプリにする +2. **シングルバイナリ化** — [cpp-embedlib](https://github.com/yhirose/cpp-embedlib)でHTML/CSS/JSをバイナリに埋め込み、配布物を1ファイルにする + +完成すると、`./translate-app`を実行するだけでウインドウが開き、翻訳が使えるようになります。 + +![Desktop App](../app.png#large-center) + +モデルは初回起動時に自動ダウンロードされるので、ユーザーに渡すのはバイナリ1つだけです。 + +## 6.1 webview/webview を導入する + +[webview/webview](https://github.com/webview/webview)は、OS標準のWebViewコンポーネント(macOSならWKWebView、LinuxならWebKitGTK、WindowsならWebView2)をC/C++から使えるようにするライブラリです。Electronのように独自ブラウザを同梱するわけではないので、バイナリサイズへの影響はほぼありません。 + +CMakeで取得します。`CMakeLists.txt`に以下を追加してください。 + +```cmake +# webview/webview +FetchContent_Declare(webview + GIT_REPOSITORY https://github.com/webview/webview + GIT_TAG master +) +FetchContent_MakeAvailable(webview) +``` + +これで`webview::core`というCMakeターゲットが使えるようになります。`target_link_libraries`でリンクすると、インクルードパスやプラットフォーム固有のフレームワークを自動で設定してくれます。 + +> **macOS**: 追加の依存は不要です。WKWebViewはシステムに組み込まれています。 +> +> **Linux**: WebKitGTKが必要です。`sudo apt install libwebkit2gtk-4.1-dev`でインストールしてください。 +> +> **Windows**: WebView2ランタイムが必要です。Windows 11には標準搭載されています。Windows 10の場合は[Microsoft公式サイト](https://developer.microsoft.com/en-us/microsoft-edge/webview2/)から入手してください。 + +## 6.2 サーバーをバックグラウンドスレッドで動かす + +5章まではサーバーの`listen()`がメインスレッドをブロックしていました。WebViewを使うには、サーバーを別スレッドで動かし、メインスレッドでWebViewのイベントループを回す必要があります。 + +```cpp +#include "webview/webview.h" +#include + +int main() { + // ... (サーバーのセットアップは5章と同じ) ... + + // サーバーをバックグラウンドスレッドで起動 + auto port = svr.bind_to_any_port("127.0.0.1"); + std::thread server_thread([&]() { svr.listen_after_bind(); }); + + std::cout << "Listening on http://127.0.0.1:" << port << std::endl; + + // WebViewでUIを表示 + webview::webview w(false, nullptr); + w.set_title("Translate App"); + w.set_size(1024, 768, WEBVIEW_HINT_NONE); + w.navigate("http://127.0.0.1:" + std::to_string(port)); + w.run(); // ウインドウが閉じるまでブロック + + // ウインドウが閉じたらサーバーも停止 + svr.stop(); + server_thread.join(); +} +``` + +ポイントを見ていきましょう。 + +- **`bind_to_any_port`** — `listen("127.0.0.1", 8080)`の代わりに、OSに空いているポートを選んでもらいます。デスクトップアプリは複数起動されることがあるので、ポートを固定するとぶつかります +- **`listen_after_bind`** — `bind_to_any_port`で確保したポートでリクエストの受付を開始します。`listen()`はbindとlistenを一度にやりますが、ポート番号を先に知る必要があるので分けています +- **シャットダウン順序** — WebViewのウインドウが閉じたら`svr.stop()`でサーバーを止め、`server_thread.join()`でスレッドの終了を待ちます。逆順だとWebViewがサーバーにアクセスできなくなります + +5章の`signal_handler`は不要になります。デスクトップアプリではウインドウを閉じることがアプリの終了を意味するからです。 + +## 6.3 cpp-embedlib で静的ファイルを埋め込む + +5章では`public/`ディレクトリからファイルを配信していました。これだと配布時に`public/`も一緒に渡す必要があります。[cpp-embedlib](https://github.com/yhirose/cpp-embedlib)を使うと、HTML・CSS・JavaScriptをバイナリに埋め込んで、配布物をバイナリ1つにまとめられます。 + +### CMakeLists.txt + +cpp-embedlibを取得し、`public/`を埋め込みます。 + +```cmake +# cpp-embedlib +FetchContent_Declare(cpp-embedlib + GIT_REPOSITORY https://github.com/yhirose/cpp-embedlib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp-embedlib) + +# public/ ディレクトリをバイナリに埋め込む +cpp_embedlib_add(WebAssets + FOLDER ${CMAKE_CURRENT_SOURCE_DIR}/public + NAMESPACE Web +) + +target_link_libraries(translate-app PRIVATE + WebAssets # 埋め込みファイル + cpp-embedlib-httplib # cpp-httplib連携 +) +``` + +`cpp_embedlib_add`は、`public/`配下のファイルをコンパイル時にバイナリに変換し、`WebAssets`という静的ライブラリを作ります。リンクすると`Web::FS`というオブジェクトから埋め込みファイルにアクセスできます。`cpp-embedlib-httplib`は`httplib::mount()`関数を提供するヘルパーライブラリです。 + +### set_mount_point を httplib::mount に置き換える + +5章の`set_mount_point`をcpp-embedlibの`httplib::mount`に置き換えるだけです。 + +```cpp +#include +#include "WebAssets.h" + +// 5章: +// svr.set_mount_point("/", "./public"); + +// 6章: +httplib::mount(svr, Web::FS); +``` + +`httplib::mount`は、`Web::FS`に埋め込まれたファイルをHTTPで配信するハンドラを登録します。MIMEタイプはファイルの拡張子から自動判定するので、`Content-Type`を手動で設定する必要はありません。 + +ファイルの中身はバイナリのデータセグメントに直接マップしているので、メモリコピーもヒープ割り当ても発生しません。 + +## 6.4 macOS: Editメニューの追加 + +入力欄に`Cmd+V`でテキストをペーストしようとすると、動かないことに気づくはずです。macOSでは、`Cmd+V`(ペースト)や`Cmd+C`(コピー)などのキーボードショートカットは、アプリケーションのメニューバーを経由してWebViewに届きます。webview/webviewはメニューバーを作らないので、これらのショートカットが効きません。Objective-CランタイムAPIを使ってEditメニューを追加する必要があります。 + +```cpp +#ifdef __APPLE__ +#include + +void setup_macos_edit_menu() { + auto cls = [](const char *n) { return (id)objc_getClass(n); }; + auto sel = sel_registerName; + auto msg = reinterpret_cast(objc_msgSend); + auto msg_s = reinterpret_cast(objc_msgSend); + auto msg_id = reinterpret_cast(objc_msgSend); + auto msg_v = reinterpret_cast(objc_msgSend); + auto msg_mi = reinterpret_cast(objc_msgSend); + + auto str = [&](const char *s) { + return msg_s(cls("NSString"), sel("stringWithUTF8String:"), s); + }; + + id app = msg(cls("NSApplication"), sel("sharedApplication")); + id mainMenu = msg(msg(cls("NSMenu"), sel("alloc")), sel("init")); + id editItem = msg(msg(cls("NSMenuItem"), sel("alloc")), sel("init")); + id editMenu = msg_id(msg(cls("NSMenu"), sel("alloc")), + sel("initWithTitle:"), str("Edit")); + + struct { const char *title; const char *action; const char *key; } items[] = { + {"Undo", "undo:", "z"}, + {"Redo", "redo:", "Z"}, + {"Cut", "cut:", "x"}, + {"Copy", "copy:", "c"}, + {"Paste", "paste:", "v"}, + {"Select All", "selectAll:", "a"}, + }; + + for (auto &[title, action, key] : items) { + id mi = msg_mi(msg(cls("NSMenuItem"), sel("alloc")), + sel("initWithTitle:action:keyEquivalent:"), + str(title), sel(action), str(key)); + msg_v(editMenu, sel("addItem:"), mi); + } + + msg_v(editItem, sel("setSubmenu:"), editMenu); + msg_v(mainMenu, sel("addItem:"), editItem); + msg_v(app, sel("setMainMenu:"), mainMenu); +} +#endif +``` + +`w.run()`の前に呼び出します。 + +```cpp +#ifdef __APPLE__ + setup_macos_edit_menu(); +#endif + w.run(); +``` + +WindowsとLinuxでは、キーボードショートカットはメニューバーを介さずフォーカスのあるコントロールに直接届くので、この対処はmacOS固有です。 + +## 6.5 全体のコード + +
+全体のコード(CMakeLists.txt) + +```cmake +cmake_minimum_required(VERSION 3.20) +project(translate-app CXX) +set(CMAKE_CXX_STANDARD 20) + +include(FetchContent) + +# llama.cpp +FetchContent_Declare(llama + GIT_REPOSITORY https://github.com/ggml-org/llama.cpp + GIT_TAG master + GIT_SHALLOW TRUE +) +FetchContent_MakeAvailable(llama) + +# cpp-httplib +FetchContent_Declare(httplib + GIT_REPOSITORY https://github.com/yhirose/cpp-httplib + GIT_TAG master +) +FetchContent_MakeAvailable(httplib) + +# nlohmann/json +FetchContent_Declare(json + URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz +) +FetchContent_MakeAvailable(json) + +# cpp-llamalib +FetchContent_Declare(cpp_llamalib + GIT_REPOSITORY https://github.com/yhirose/cpp-llamalib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp_llamalib) + +# webview/webview +FetchContent_Declare(webview + GIT_REPOSITORY https://github.com/webview/webview + GIT_TAG master +) +FetchContent_MakeAvailable(webview) + +# cpp-embedlib +FetchContent_Declare(cpp-embedlib + GIT_REPOSITORY https://github.com/yhirose/cpp-embedlib + GIT_TAG main +) +FetchContent_MakeAvailable(cpp-embedlib) + +# public/ ディレクトリをバイナリに埋め込む +cpp_embedlib_add(WebAssets + FOLDER ${CMAKE_CURRENT_SOURCE_DIR}/public + NAMESPACE Web +) + +find_package(OpenSSL REQUIRED) + +add_executable(translate-app src/main.cpp) + +target_link_libraries(translate-app PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + cpp-llamalib + OpenSSL::SSL OpenSSL::Crypto + WebAssets + cpp-embedlib-httplib + webview::core +) + +if(APPLE) + target_link_libraries(translate-app PRIVATE + "-framework CoreFoundation" + "-framework Security" + ) +endif() + +target_compile_definitions(translate-app PRIVATE + CPPHTTPLIB_OPENSSL_SUPPORT +) +``` + +
+ +
+全体のコード(main.cpp) + +```cpp +#include +#include +#include +#include +#include "WebAssets.h" +#include "webview/webview.h" + +#ifdef __APPLE__ +#include +#endif + +#include +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +// ------------------------------------------------------------------------- +// macOS Editメニュー(Cmd+C/V/X/AにはEditメニューが必要) +// ------------------------------------------------------------------------- + +#ifdef __APPLE__ +void setup_macos_edit_menu() { + auto cls = [](const char *n) { return (id)objc_getClass(n); }; + auto sel = sel_registerName; + auto msg = reinterpret_cast(objc_msgSend); + auto msg_s = reinterpret_cast(objc_msgSend); + auto msg_id = reinterpret_cast(objc_msgSend); + auto msg_v = reinterpret_cast(objc_msgSend); + auto msg_mi = reinterpret_cast(objc_msgSend); + + auto str = [&](const char *s) { + return msg_s(cls("NSString"), sel("stringWithUTF8String:"), s); + }; + + id app = msg(cls("NSApplication"), sel("sharedApplication")); + id mainMenu = msg(msg(cls("NSMenu"), sel("alloc")), sel("init")); + id editItem = msg(msg(cls("NSMenuItem"), sel("alloc")), sel("init")); + id editMenu = msg_id(msg(cls("NSMenu"), sel("alloc")), + sel("initWithTitle:"), str("Edit")); + + struct { const char *title; const char *action; const char *key; } items[] = { + {"Undo", "undo:", "z"}, + {"Redo", "redo:", "Z"}, + {"Cut", "cut:", "x"}, + {"Copy", "copy:", "c"}, + {"Paste", "paste:", "v"}, + {"Select All", "selectAll:", "a"}, + }; + + for (auto &[title, action, key] : items) { + id mi = msg_mi(msg(cls("NSMenuItem"), sel("alloc")), + sel("initWithTitle:action:keyEquivalent:"), + str(title), sel(action), str(key)); + msg_v(editMenu, sel("addItem:"), mi); + } + + msg_v(editItem, sel("setSubmenu:"), editMenu); + msg_v(mainMenu, sel("addItem:"), editItem); + msg_v(app, sel("setMainMenu:"), mainMenu); +} +#endif + +// ------------------------------------------------------------------------- +// モデル定義 +// ------------------------------------------------------------------------- + +struct ModelInfo { + std::string name; + std::string params; + std::string size; + std::string repo; + std::string filename; +}; + +const std::vector MODELS = { + { + .name = "gemma-2-2b-it", + .params = "2B", + .size = "1.6 GB", + .repo = "bartowski/gemma-2-2b-it-GGUF", + .filename = "gemma-2-2b-it-Q4_K_M.gguf", + }, + { + .name = "gemma-2-9b-it", + .params = "9B", + .size = "5.8 GB", + .repo = "bartowski/gemma-2-9b-it-GGUF", + .filename = "gemma-2-9b-it-Q4_K_M.gguf", + }, + { + .name = "Llama-3.1-8B-Instruct", + .params = "8B", + .size = "4.9 GB", + .repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", + .filename = "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + }, +}; + +// ------------------------------------------------------------------------- +// モデル保存ディレクトリ +// ------------------------------------------------------------------------- + +std::filesystem::path get_models_dir() { +#ifdef _WIN32 + auto env = std::getenv("APPDATA"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / "translate-app" / "models"; +#else + auto env = std::getenv("HOME"); + auto base = env ? std::filesystem::path(env) : std::filesystem::path("."); + return base / ".translate-app" / "models"; +#endif +} + +// ------------------------------------------------------------------------- +// モデルダウンロード +// ------------------------------------------------------------------------- + +// progress_cbがfalseを返したらダウンロードを中断する +bool download_model(const ModelInfo &model, + std::function progress_cb) { + httplib::Client cli("https://huggingface.co"); + cli.set_follow_location(true); // Hugging FaceはCDNにリダイレクトする + cli.set_read_timeout(std::chrono::hours(1)); // 大きなモデルに備えて長めに + + auto url = "/" + model.repo + "/resolve/main/" + model.filename; + auto path = get_models_dir() / model.filename; + auto tmp_path = std::filesystem::path(path).concat(".tmp"); + + std::ofstream ofs(tmp_path, std::ios::binary); + if (!ofs) { return false; } + + auto res = cli.Get(url, + // content_receiver: チャンクごとにデータを受け取ってファイルに書き込む + [&](const char *data, size_t len) { + ofs.write(data, len); + return ofs.good(); + }, + // progress: ダウンロード進捗を通知(falseを返すと中断) + [&, last_pct = -1](size_t current, size_t total) mutable { + int pct = total ? (int)(current * 100 / total) : 0; + if (pct == last_pct) return true; // 同じ値なら通知をスキップ + last_pct = pct; + return progress_cb(pct); + }); + + ofs.close(); + + if (!res || res->status != 200) { + std::filesystem::remove(tmp_path); + return false; + } + + // ダウンロード完了後にリネーム + std::filesystem::rename(tmp_path, path); + return true; +} + +// ------------------------------------------------------------------------- +// サーバー +// ------------------------------------------------------------------------- + +int main() { + httplib::Server svr; + // モデル保存ディレクトリを作成 + auto models_dir = get_models_dir(); + std::filesystem::create_directories(models_dir); + + // デフォルトモデルが未ダウンロードなら自動取得 + std::string selected_model = MODELS[0].filename; + auto path = models_dir / selected_model; + if (!std::filesystem::exists(path)) { + std::cout << "Downloading " << selected_model << "..." << std::endl; + if (!download_model(MODELS[0], [](int pct) { + std::cout << "\r" << pct << "%" << std::flush; + return true; + })) { + std::cerr << "\nFailed to download model." << std::endl; + return 1; + } + std::cout << std::endl; + } + auto llm = llamalib::Llama{path}; + std::mutex llm_mutex; // モデル切り替え中のアクセスを保護する + + // LLM推論は時間がかかるのでタイムアウトを長めに設定(デフォルトは5秒) + svr.set_read_timeout(300); + svr.set_write_timeout(300); + + svr.set_logger([](const auto &req, const auto &res) { + std::cout << req.method << " " << req.path << " -> " << res.status + << std::endl; + }); + + svr.Get("/health", [](const httplib::Request &, httplib::Response &res) { + res.set_content(json{{"status", "ok"}}.dump(), "application/json"); + }); + + // --- 翻訳エンドポイント(2章) ----------------------------------------- + + svr.Post("/translate", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + try { + std::lock_guard lock(llm_mutex); + auto translation = llm.chat(prompt); + res.set_content(json{{"translation", translation}}.dump(), + "application/json"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(json{{"error", e.what()}}.dump(), "application/json"); + } + }); + + // --- SSEストリーミング翻訳(3章)-------------------------------------- + + svr.Post("/translate/stream", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded()) { + res.status = 400; + res.set_content(json{{"error", "Invalid JSON"}}.dump(), + "application/json"); + return; + } + + if (!input.contains("text") || !input["text"].is_string() || + input["text"].get().empty()) { + res.status = 400; + res.set_content(json{{"error", "'text' is required"}}.dump(), + "application/json"); + return; + } + + auto text = input["text"].get(); + auto target_lang = input.value("target_lang", "ja"); + + auto prompt = "Translate the following text to " + target_lang + + ". Output only the translation, nothing else.\n\n" + text; + + res.set_chunked_content_provider( + "text/event-stream", + [&, prompt](size_t, httplib::DataSink &sink) { + std::lock_guard lock(llm_mutex); + try { + llm.chat(prompt, [&](std::string_view token) { + sink.os << "data: " + << json(std::string(token)).dump( + -1, ' ', false, json::error_handler_t::replace) + << "\n\n"; + return sink.os.good(); // 切断されたら推論を中断 + }); + sink.os << "data: [DONE]\n\n"; + } catch (const std::exception &e) { + sink.os << "data: " << json({{"error", e.what()}}).dump() << "\n\n"; + } + sink.done(); + return true; + }); + }); + + // --- モデル一覧(4章) ------------------------------------------------- + + svr.Get("/models", + [&](const httplib::Request &, httplib::Response &res) { + auto models_dir = get_models_dir(); + auto arr = json::array(); + for (const auto &m : MODELS) { + auto path = models_dir / m.filename; + arr.push_back({ + {"name", m.name}, + {"params", m.params}, + {"size", m.size}, + {"downloaded", std::filesystem::exists(path)}, + {"selected", m.filename == selected_model}, + }); + } + res.set_content(json{{"models", arr}}.dump(), "application/json"); + }); + + // --- モデル選択(4章) ------------------------------------------------- + + svr.Post("/models/select", + [&](const httplib::Request &req, httplib::Response &res) { + auto input = json::parse(req.body, nullptr, false); + if (input.is_discarded() || !input.contains("model")) { + res.status = 400; + res.set_content(json{{"error", "'model' is required"}}.dump(), + "application/json"); + return; + } + + auto name = input["model"].get(); + + auto it = std::find_if(MODELS.begin(), MODELS.end(), + [&](const ModelInfo &m) { return m.name == name; }); + + if (it == MODELS.end()) { + res.status = 404; + res.set_content(json{{"error", "Unknown model"}}.dump(), + "application/json"); + return; + } + + const auto &model = *it; + + // 常にSSEで応答する(DL済みでも未DLでも同じ形式) + res.set_chunked_content_provider( + "text/event-stream", + [&, model](size_t, httplib::DataSink &sink) { + // SSEイベント送信ヘルパー + auto send = [&](const json &event) { + sink.os << "data: " << event.dump() << "\n\n"; + }; + + // 未ダウンロードならダウンロード(進捗をSSEで通知) + auto path = get_models_dir() / model.filename; + if (!std::filesystem::exists(path)) { + bool ok = download_model(model, [&](int pct) { + send({{"status", "downloading"}, {"progress", pct}}); + return sink.os.good(); // クライアント切断時にダウンロードを中断 + }); + if (!ok) { + send({{"status", "error"}, {"message", "Download failed"}}); + sink.done(); + return true; + } + } + + // モデルをロードして切り替え + send({{"status", "loading"}}); + { + std::lock_guard lock(llm_mutex); + llm = llamalib::Llama{path}; + selected_model = model.filename; + } + + send({{"status", "ready"}}); + sink.done(); + return true; + }); + }); + + // --- 埋め込みファイル配信(6章) --------------------------------------- + // 5章: svr.set_mount_point("/", "./public"); + httplib::mount(svr, Web::FS); + + // サーバーをバックグラウンドスレッドで起動 + auto port = svr.bind_to_any_port("127.0.0.1"); + std::thread server_thread([&]() { svr.listen_after_bind(); }); + + std::cout << "Listening on http://127.0.0.1:" << port << std::endl; + + // WebViewでUIを表示 + webview::webview w(false, nullptr); + w.set_title("Translate App"); + w.set_size(1024, 768, WEBVIEW_HINT_NONE); + w.navigate("http://127.0.0.1:" + std::to_string(port)); + +#ifdef __APPLE__ + setup_macos_edit_menu(); +#endif + w.run(); // ウインドウが閉じるまでブロック + + // ウインドウが閉じたらサーバーも停止 + svr.stop(); + server_thread.join(); +} +``` + +
+ +5章からの変更点をまとめると: + +- `#include ` → `#include `, ``, `"WebAssets.h"`, `"webview/webview.h"` +- `signal_handler`関数を削除 +- `svr.set_mount_point("/", "./public")` → `httplib::mount(svr, Web::FS)` +- `svr.listen("127.0.0.1", 8080)` → `bind_to_any_port` + `listen_after_bind` + WebViewのイベントループ + +ハンドラのコードは1行も変わっていません。5章まで作ってきたREST API・SSEストリーミング・モデル管理がそのまま動きます。 + +## 6.6 ビルドと動作確認 + +```bash +cmake -B build +cmake --build build -j +``` + +起動します。 + +```bash +./build/translate-app +``` + +ブラウザは不要です。ウインドウが自動で開きます。5章と同じUIがそのまま表示され、翻訳やモデル切り替えがすべてそのまま動きます。 + +ウインドウを閉じるとサーバーも自動で終了します。`Ctrl+C`は不要です。 + +### 何が配布に必要か + +配布に必要なのは: + +- `translate-app`バイナリ1つ + +これだけです。`public/`ディレクトリは不要です。HTML・CSS・JavaScriptはバイナリに埋め込まれています。モデルファイルは初回起動時に自動ダウンロードするので、ユーザーに事前準備を求める必要もありません。 + +## 次の章へ + +お疲れさまでした!🎉 + +1章では`/health`が`{"status":"ok"}`を返すだけでした。それが今、テキストを入力すればリアルタイムで翻訳が流れ、ドロップダウンからモデルを切り替えれば自動でダウンロードが始まり、ウインドウを閉じればサーバーも一緒に終了する―そんなデスクトップアプリになりました。しかもバイナリ1つで配れます。 + +6章で変えたのは、静的ファイルの配信方法とサーバーの起動方法だけです。ハンドラのコードは1行も変わっていません。5章までに積み上げてきたREST API・SSEストリーミング・モデル管理が、そのままデスクトップアプリとして動いています。 + +次の章では視点を変えて、llama.cpp本家の`llama-server`のコードを読みます。本書のシンプルなサーバーと、プロダクション品質のサーバーを比較して、設計判断の違いとその理由を学びましょう。 + +**Next:** [llama.cpp本家のサーバー実装をコードリーディング](../ch07-code-reading) diff --git a/docs-src/pages/ja/llm-app/ch07-code-reading.md b/docs-src/pages/ja/llm-app/ch07-code-reading.md new file mode 100644 index 0000000..57e0e14 --- /dev/null +++ b/docs-src/pages/ja/llm-app/ch07-code-reading.md @@ -0,0 +1,154 @@ +--- +title: "7. llama.cpp本家のサーバー実装をコードリーディング" +order: 7 + +--- + +6章かけてゼロから翻訳デスクトップアプリを作りました。動くものは完成しましたが、あくまで「学習用」の実装です。では「プロダクション品質」のコードはどう違うのか? llama.cppに同梱されている公式サーバー`llama-server`のソースコードを読んで、比較してみましょう。 + +`llama-server`は`llama.cpp/tools/server/`にあります。同じcpp-httplibを使っているので、コードの読み方はこれまでの章と同じです。 + +## 7.1 ソースコードの場所 + +```ascii +llama.cpp/tools/server/ +├── server.cpp # メインのサーバー実装 +├── httplib.h # cpp-httplib(同梱版) +└── ... +``` + +ファイルは1つの`server.cpp`にまとまっています。数千行ありますが、構造を知っていれば読むべき箇所は絞れます。 + +## 7.2 OpenAI互換API + +ここまで作ってきたサーバーと`llama-server`の最も大きな違いはAPIの設計です。 + +**私たちのAPI:** + +```text +POST /translate → {"translation": "..."} +POST /translate/stream → SSE: data: "token" +``` + +**llama-serverのAPI:** + +```text +POST /v1/chat/completions → OpenAI互換のJSON +POST /v1/completions → OpenAI互換のJSON +POST /v1/embeddings → テキスト埋め込みベクトル +``` + +`llama-server`は[OpenAIのAPI仕様](https://platform.openai.com/docs/api-reference)に合わせています。つまり、OpenAIの公式クライアントライブラリ(Pythonの`openai`パッケージなど)がそのまま動きます。 + +```python +# OpenAIクライアントでllama-serverに接続する例 +from openai import OpenAI +client = OpenAI(base_url="http://localhost:8080/v1", api_key="dummy") + +response = client.chat.completions.create( + model="local-model", + messages=[{"role": "user", "content": "Hello!"}] +) +``` + +既存のツールやライブラリとの互換性を持たせるかどうかは、大きな設計判断です。私たちは翻訳専用のAPIをシンプルに設計しましたが、汎用のサーバーを作るならOpenAI互換が事実上の標準になっています。 + +## 7.3 同時リクエスト処理 + +私たちのサーバーはリクエストを1つずつ処理します。翻訳中に別のリクエストが来ると、前の推論が終わるまで待ちます。1人で使うデスクトップアプリなら問題ありませんが、複数人で共有するサーバーでは困ります。 + +`llama-server`は**スロット**という仕組みで同時リクエストを処理します。 + +![llama-serverのスロット管理](../slots.svg#half) + +ポイントは、各スロットのトークンを**1つずつ順番に**ではなく、**まとめて1回のバッチ**で推論することです。GPUは並列処理が得意なので、2人分を同時に処理しても1人分とほとんど変わらない時間で済みます。これを「連続バッチ処理(continuous batching)」と呼びます。 + +私たちのサーバーではcpp-httplibのスレッドプールが各リクエストに1スレッドを割り当てますが、推論自体は`llm.chat()`の中でシングルスレッドです。`llama-server`はこの推論部分を共有のバッチ処理ループに集約しています。 + +## 7.4 SSEフォーマットの違い + +ストリーミングの仕組み自体は同じ(`set_chunked_content_provider` + SSE)ですが、送るデータのフォーマットが違います。 + +**私たちの形式:** + +```text +data: "去年の" +data: "春に" +data: [DONE] +``` + +**llama-server(OpenAI互換):** + +```text +data: {"id":"chatcmpl-xxx","object":"chat.completion.chunk","choices":[{"delta":{"content":"去年の"}}]} +data: {"id":"chatcmpl-xxx","object":"chat.completion.chunk","choices":[{"delta":{"content":"春に"}}]} +data: [DONE] +``` + +私たちの形式はトークンだけを送るシンプルなものです。`llama-server`はOpenAI互換のため、1つのトークンにもJSONのラッパーが付きます。冗長に見えますが、`id`でリクエストを識別したり、`finish_reason`で停止理由を返せたりと、クライアントにとって便利な情報が含まれています。 + +## 7.5 KVキャッシュの再利用 + +私たちのサーバーでは、リクエストのたびにプロンプト全体をゼロから処理しています。翻訳アプリのプロンプトは短い("Translate the following text to ja..." + 入力テキスト)ので、これで問題ありません。 + +`llama-server`は、前のリクエストと共通するプロンプトのprefixがある場合、その部分のKVキャッシュを再利用します。 + +![KVキャッシュの再利用](../kv-cache.svg#half) + +長いシステムプロンプトやfew-shot例を毎回送るチャットボットでは、これだけで応答時間が大幅に短縮されます。数千トークンのシステムプロンプトを毎回処理するのと、キャッシュから一瞬で読むのとでは、体感が全く違います。 + +翻訳アプリではシステムプロンプトが1文だけなので効果は限定的ですが、自分のアプリに応用するときは意識したい最適化です。 + +## 7.6 構造化出力 + +翻訳APIはプレーンテキストを返すので、出力形式を制約する必要がありませんでした。でも、LLMにJSONで返させたい場合はどうでしょう? + +```text +プロンプト: 以下の文の感情を分析してJSONで返してください。 +LLMの出力(期待): {"sentiment": "positive", "score": 0.8} +LLMの出力(現実): 感情分析の結果は以下の通りです。{"sentiment": ... +``` + +LLMは指示を無視して余計なテキストを付けることがあります。`llama-server`はこの問題を**文法制約(grammar)**で解決しています。 + +```bash +curl http://localhost:8080/v1/chat/completions \ + -d '{ + "messages": [{"role": "user", "content": "Analyze sentiment..."}], + "json_schema": { + "type": "object", + "properties": { + "sentiment": {"type": "string", "enum": ["positive", "negative", "neutral"]}, + "score": {"type": "number"} + }, + "required": ["sentiment", "score"] + } + }' +``` + +`json_schema`を指定すると、LLMのトークン生成時に文法に合わないトークンを除外します。出力が必ず有効なJSONになるので、`json::parse`が失敗する心配がありません。 + +LLMをアプリに組み込むとき、出力を確実にパースできるかどうかは信頼性に直結します。翻訳のようなフリーテキスト出力では不要ですが、APIのレスポンスとして構造化データを返す用途では必須の機能です。 + +## 7.7 まとめ + +ここまでの違いを整理します。 + +| 観点 | 私たちのサーバー | llama-server | +|------|-------------|--------------| +| API設計 | 翻訳専用 | OpenAI互換 | +| 同時リクエスト | 1つずつ処理 | スロット+連続バッチ | +| SSEフォーマット | トークンのみ | OpenAI互換JSON | +| KVキャッシュ | 毎回クリア | prefixを再利用 | +| 構造化出力 | なし | JSON Schema/文法制約 | +| コード量 | 約200行 | 数千行 | + +私たちのコードがシンプルなのは、「デスクトップアプリで1人が使う」という前提があるからです。複数人に提供するサーバーや、既存のエコシステムと連携するサーバーを作るなら、`llama-server`の設計が参考になります。 + +逆に言えば、200行のコードでも翻訳アプリとしては十分に動きます。「必要な分だけ作る」ことの価値も、このコードリーディングから感じてもらえたら嬉しいです。 + +## 次の章へ + +次の章では、ここまで作ったアプリを自分のライブラリに差し替えてカスタマイズするためのポイントをまとめます。 + +**Next:** [自分だけのアプリにカスタマイズする](../ch08-customization) diff --git a/docs-src/pages/ja/llm-app/ch08-customization.md b/docs-src/pages/ja/llm-app/ch08-customization.md new file mode 100644 index 0000000..fffea81 --- /dev/null +++ b/docs-src/pages/ja/llm-app/ch08-customization.md @@ -0,0 +1,120 @@ +--- +title: "8. 自分だけのアプリにカスタマイズする" +order: 8 + +--- + +7章までで翻訳デスクトップアプリが完成し、プロダクション品質のコードとの違いも学びました。この章では、ここまで作ったアプリを**自分だけのアプリに作り変える**ためのポイントをまとめます。 + +翻訳アプリはあくまで題材です。llama.cppを自分のライブラリに差し替えれば、同じ構成でどんなアプリでも作れます。 + +## 8.1 ビルド設定を差し替える + +まず`CMakeLists.txt`で、llama.cpp関連の`FetchContent`を自分のライブラリに置き換えます。 + +```cmake +# 削除: llama.cpp と cpp-llamalib の FetchContent + +# 追加: 自分のライブラリ +FetchContent_Declare(my_lib + GIT_REPOSITORY https://github.com/yourname/my-lib + GIT_TAG main +) +FetchContent_MakeAvailable(my_lib) + +target_link_libraries(my-app PRIVATE + httplib::httplib + nlohmann_json::nlohmann_json + my_lib # cpp-llamalib の代わりに自分のライブラリ + # ... +) +``` + +ライブラリがCMakeに対応していない場合は、ヘッダーファイルとソースファイルを直接`src/`に置いて`add_executable`に追加すればOKです。cpp-httplibやnlohmann/json、webviewはそのまま残します。 + +## 8.2 APIを自分のタスクに合わせる + +翻訳APIのエンドポイントとパラメータを、自分のタスクに合わせて変更します。 + +| 翻訳アプリ | 自分のアプリ(例: 画像処理) | +|---|---| +| `POST /translate` | `POST /process` | +| `{"text": "...", "target_lang": "ja"}` | `{"image": "base64...", "filter": "blur"}` | +| `POST /translate/stream` | `POST /process/stream` | +| `GET /models` | `GET /filters`や`GET /presets` | + +個々のハンドラの中身も書き換えます。例えば`llm.chat()`を呼んでいた箇所を、自分のライブラリのAPIに差し替えるだけです。 + +```cpp +// Before: LLM翻訳 +auto translation = llm.chat(prompt); +res.set_content(json{{"translation", translation}}.dump(), "application/json"); + +// After: 例えば画像処理ライブラリの場合 +auto result = my_lib::process(input_image, options); +res.set_content(json{{"result", result}}.dump(), "application/json"); +``` + +SSEストリーミングも同じです。コールバックで進捗を返す関数があれば、3章と同じパターンで逐次レスポンスを返せます。LLMに限らず、処理に時間がかかるタスクならどれでも使えます。画像処理の進捗、データ変換のステップ、長時間の計算結果など、用途は様々です。 + +## 8.3 設計上の注意点 + +### 初期化コストが高いライブラリ + +本書ではLLMモデルを`main()`の先頭でロードし、変数に保持しています。これは意図的な設計です。リクエストのたびにモデルをロードすると数秒かかるので、起動時に1回だけロードして使い回しています。大きなデータファイルの読み込みやGPUリソースの確保など、初期化が重いライブラリでも同じアプローチが使えます。 + +### スレッド安全性 + +cpp-httplibはスレッドプールでリクエストを並行処理します。4章ではモデル切り替え時に`llm`オブジェクトが上書きされる問題を`std::mutex`で保護しました。自分のライブラリを組み込む場合も同じパターンが使えます。ライブラリがスレッドセーフでない場合や、オブジェクトの差し替えが発生する場合は`std::mutex`で保護してください。 + +## 8.4 UIをカスタマイズする + +`public/`の3ファイルを編集します。 + +- **`index.html`** — 入力フォームの構成を変えます。`