From 5c8af7b8b2440a936d8049cd93749157c6693710 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Mon, 10 Dec 2018 13:34:54 +0100 Subject: [PATCH] Rebuilt binary and Doxygen documentation. --- bin/VmaReplay_Release_vs2017.exe | Bin 162816 -> 208384 bytes bin/VulkanSample_Release_vs2017.exe | Bin 215552 -> 273408 bytes docs/html/vk__mem__alloc_8h_source.html | 2 +- src/Tests.cpp | 2 +- 4 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/VmaReplay_Release_vs2017.exe b/bin/VmaReplay_Release_vs2017.exe index 60cf6460d2c0e2fe782fc9e21dafa4e174253f25..d68dd24b7749953ce956c48156edd3d2deaf9673 100644 GIT binary patch literal 208384 zcmd?S33yaR_BP(xpjE!?8t62p zyPqSvke};YOwA3{Wcxd zm=V}?)cKXO=enxqT=%;D*y)NXs^a|IU@U^bl*Sbn4O?F*#-Q`!DoSVCU zzKQy$Z7Y_ndhWwk>+iV@!Pe)IHs1T7Jn74i zM7dSZebjmm(r>-@wn)GA-chYDi}d>~e4aP^vP#ymw-%N8Y_`i653rp-eeg7^Opono z+X0z_72B1WHd~9#HLk$BL%e&?eEE*70rGR=X{$o7vQzP2maSr;1!Te^S0b?%Cc;otf(lOg7NLTKPtFJ>r_K^*J zCCZ(+N4e4X`v3HQs-Ao3?=o#&z2|1yHsU{|Z)Dm?rr%+@Bbgov*63FquWAfsh5p)sP4A8r>ok~_5RkTIR5j;n?~1+ z%Cv=ds@eu);!vASjgNJ$$WXOu`KlUO>zc7fbx+KjlY3fIYF1=^os$ zNY0!rRm*~y`c4C`iLQV)HqY;FQzFB_B5LgF{311WbAGX^U0_#Mj@sW-6Q8C;uLpam zx;NuhvWef_H1|kl<)F-(-P6LV8egZzn^gDMygA)!^h>4o9G2C7RJ94HdUBrXUav$B zHsRIS7}BFgzHM9r`ftP@wsOA(c#w#xAkk!&UG2#i`8g6G+G&6EBG!2+uUKK zBt53?1H8{nV-Jlw97IIE&Iq^oYImx-FT<-WX;CxAy1E9D6reu)*P`r#T9GJIA&=V| zF-N1rd>Nw3`_TmNGe?uI$B4wrUnuegy?2<%mFU`^;&CAxy_-1H}lAHF7Nr?3%pY)69MgdkJ|X;U&VhXKrmEQ+wL#iJmNidSC^`N zt~UP&CSIpD8|SE5Mrcw%yCqNiKA_EVSo|_x&pu1l-ttG{g3%6EBh@Yk$g|JMRyEZg z&^&oT_uBA&#`8t%?Kew9yT2nhw{tV{&Kd%JJUPEJ8g}sXJ4!~ zcV_#uTDt!s+7!^I;I}o82866cKBm+(ZM;J56H5qBi5?1+WZ=n5EAd# z+Wq>3WyT{4#zE^#MmeTxc$oSMeAu%Vanxe*6s|&y595@G= ztB(t4-d=xU8wR~sjr0z`A?VlIjHkwD+I%CL07CJ>M z%`b~(Dvr|pa%IS9yW$AsS1LosqY!dhMi*ev#;Qf#hsB*&U6X0659lbb>JvT2V?#)7 zAiqkD>~_pKQ`OG}d7xI+nk}-1kt~c^uUgowMw$d!^9IJ9M*-7D;~Ad@%!H+F}&UZv-oP;XuslCCTVAZL;aWs3Q z0aZZx<)I_^bg@TT#@HduYh4$wi974AmO4GboOmIBrm*G|@a7c3o!S>51cQ|G-G1hn z`g&?^EJquzuK;OEZ3pyAP@9N`Z1%H{vi#cMfHt{cL>$_%NNe>MZd04T%Yvp?YA=Mv z5YQf|0YN@xW%dDneSG#TWq7I0R$8198bZAqX|}8G)pNe?ItgWi`l!{xXk4jvAp!0| z-%qac7T=nq)E-B*=H42fpz6=DFCpUKe`Cq+c)YeLd{pe^I%dXhaj217@`OyB>8bl| zBWL<98;lN5-TZQ|HZT@pxe-lWU)WJxA5$UQplUX+va&QQgT+Rb%u=q)x$>_ZNUa9N z{n}W^4&_!~r>AZ~yAoZ3EYO{)x|@|nn~A5e*_y`OO#x+elV8#h)XwzlLkFvRu-%xS z%lf8(_cfAlxCbGvO))su%A!jlPpTGd=XfVM?NGsG-i+5MIuJvzEq^ympD77QyviGwp5 z!5e97$qw&&9N+LUTN`1eh@GNYKl)%qU$geP*)ksVx1Fl>gVbV ze7^y?UV&QGHXB#(Z?ka}K3fj)(Ry2a?KUw2&;ei%x8y+w1}{@tGy&E6^>cp;Xid<7 zKY>ev%IIc)#K@Esh+()OQw+m@4?x`<2FwuU-?7s%1kg^LuuuJ^Bk*+&M&N|8;1BZZ z91v4;Yk^Ivtt5|vSN!^<3=)4Fh)+^ZZfU^!U=MCda;$1`OHhz0IEB<2x#$jyTYfp6 z<4ETsU#2aE@BfQD@-sRTUFRS@k*1&evo8lOwcB20{bsiv1(B28*7NJStr0?P>?lRI znG71;89ru2+!Eb>Oe#g!2Xbp}DX@h`a^Aq8J^HO+&yob(!Zm`~WAo7@Sc7EJ!##Cw z2|fVe;NboOpbbdqfzPOEBJs>QPl4?$j)g*-kfPAGnhGs+2aF4EhF|w*@2S78f#gAd zH490o5b`|1)LoAYg|Ry`4BI7DdAOZsQmQ109{`GifU+oh< z83UK4ESd_kF>pWbec-aXYJpJ>^YOCuzJ|H$0+u!hhGJE_4jCX(!W@LG6u|Nd3AR?Zmy!bJBDPjqiR z`d6v#CCcO+(T(}tLAl!USSIw)f_76Qe3zjt8V4FweSAEqt@G=?IF=XC!_cK~`QvR? zuO3Y8N?rUl6o$pklXpsP4rsrUn?FaxjAKV<+Je!YO6_$&#^c`NTMCrgEAS?jKu{=w zqx%p`CGY_CU;iniuM+5<64XxzD*^LBuo5t13OvBX%r4mYXrC@1OF*#p$`YyuQ|5v! z(fV&F^C>gYGmw#AuUon?HpXwfoQ0CYv=n_pVG8?xez1>D47M`dLwi7}{o@|vJywkO zN00PvEXI2(*$P8zt{$yvS57!=YldiRj@4FZj@gj56uqS^sudjDf6~1=&7?bTYTP;c zGFVZGQMn{J&vI}=d8T^G{zi)QzhNPFT_gms-|U&vU)@(4pPiGc4wc%kF(-KhAB_{D zCUoA@Jrb!8SzS5!2l55e#WW?P{=~v1bg-XOH4JYWWsaccaTwVcTvIfIJ+wy;LWKbx zcDP@=xERK$$FI#-VUP;Tf4XMd@tWu5-+9!%S_UJCA^yj9PmE6j0ZVU{K|hp1DURK4+SrXW+9k^^Adk%CvRo zsgaw+qF!ws?Z61xsL?Z9SYVqPdqo)1JCuvwnx&lLysshCHh1(6<-#*o;7!$?FI>(? zunA>F8NYH%yHQ03*L_WDYb!=AF6>V=-dZ!N*cRT^rGb-(>pbErA+8FE>-u!K^84aS zqA3L$<&-mq5{rje4whI(q{H$d^>Hdi;AM-R@6lu++VU_Eoji*TK)b#L^RZpVV|^i$ zU~2)kLviFh3AhnW=;k>G?@*@ou)lNFm)MW^uLj2D4pljEv;U=7o%~PDYV*V5-1Tul zTkUT)a@2*p9fNJQxkdh~nnt1K2x~5E3uteM`t$cvfA=_G@$c#sP|cs`07SiN^(ZwK z$afJiP}mgIfCUQJ=>baPJ)y7@gOX^7aH$B_%4%A@?%o@Y$7*z>aEF*)mBZDncc8$U zz*qsT)%b4^R#vv#);%BOkQ*(`_N;M=CLUa=A zIVc9Xp2OmHzn@#q9mVC`RyWX_zAP^56x7**wV1h@TQ!5LIpJRrz64eKj#aqVhkj!$ zCVM*uDnqgq$0Ua`1Rko^aYBgjl!ykR^=s(4C?dkdGSplTUY(yOkkUi&of`8xh&txA zWb#U3UY$*s2Ec!G%*{n@uaQ7yNJe+H`V!>}|J|x;->Q+#it7GGRmOcI)Umm!KOM95 zaaMo1*Q?6dHsjH=IVNrF)4?!mj#DF@ooY<&?H(F;ZkUed(rsv0we`l0C>fx8DTd*} z7`GWc695~#3WTmvV{_5lBn{0grgy`WXk)T{7fh8t7*(5%HEd5nbYH-|?uMgOZ8sam zkq5tB50qPsr9j`3eHI*{Xr%hOXdT<<-lg2OomtvuHSz`;y5)x9s`gLXG+RGc3s4P!)AM)#`vxD4s2ED`RFld)JqPpl?RQDp3T`XluN&?BV{_(&55G#23-aUb@HZUS&w>aQq7oSsva4!}Cxe-zu3eoJn4| z>FmNml3(DhD;-_{j~&DjT>p9KU~s)LgWd<^1@&=7fh?$(O~wR~rKr(2LNocvxKcO= z2Bx|_(WjUu`gb}O!RM*XpJZDd0k&3!<0}B0S@+--Za{wk(+$}6u`}7|tKp#2v*%Ix zRr)Pe&=4E0#>UzGdiIf22OctZ8FC=i1N~X8R17eArp*LfS6)cAz6kUsS=cj$%*721 zG}ep8evTcCfc9;ma664nLR#3#9R}Z0wValjCWGe+w@PJ37)d z5avJ@$O|ar)&!$y*aINN@;|Fu20h9#cva4`t2%_u-4Xu0dwxLM1cnO~zTuBF13?!M zYyyG}LGZz`vu45hSIZWGhzxA`q`(K=&0+L0>44vOVuo7yPt3SbuB!*C&7T8}ftln@ zGW#KFVOPDXWgjmHfSnIFsr%jVmt;uP0x!-~^!-*f@=0%kh0>`#FZt*4)7c>yB{f$LnYORe*!EaS!jw-Iev)88&q`Q>D^enRl*&*mF!anHcH_h>=a1u zQQhmHV%7=u63|DPyz{odwV6EnGwgHZ2$Oxba(L3KnD0(wZwVD6xo3kK%ia+XqjpHp z-4o6U6t3<*BB-_CXLOvI5*J2Sc>dlJ62ewv>A=8UC-{OXk$=R4in@X z2_fI?7b)K;)FZ}DNIAy}L$zoMhbjcKlLN#)a!t9wdp;!G1yjJGzHG|zt(f@Wto~Rn zT*rOONH12KM`AoUb*tF8!GwmI0{Q|7@D@4uZ@6JN8pa7z%>4m(&kcjYDyIp%jB|en zJk79KZiYc3=YDus_n7-1I#mon=tfnqhV1?avO7-bcXQrf!0%cw^m{x2KX*XGK+k}| zv&A3z$uVcVszK4cX>`IP;m7i~v@ZXYmeNx)YzY~_CYn&AY9XxWF;b0U;&_La@^s+t z&Wk&zpN~$WU;TcdNur4T7Sk9viG&(OFR-uC>*b<@r_I2k5k_mO40tAAzzqfJPxM*e z-VJJ9ey{^N*S%Ua{g3a5fZ_5L*%ORjDrpDRzEF8~z z#8+2_Pb<}qF?E5A4!^FVIqSnuLzXBQ&vUmLf{iAbmZv0`3tQ~(rZ;kT)5tA<35<=# zUW5n<=uG3?594v)2qQfRlA+5LBBh;oY{l@G4Wz%tJ*yR zC~K?VefJ6h=Z}dO--!2o(2gSyomcJEuqTV%N`2@(!dTM>#nh}fRXw&Et@i7K-q14L z8S^q@8QSPNuUR?M5NjkdeJcD`bkAB`9guVY>|SD69ryu5(>+`M`AKW5SkY)&n{=J!1+tn0 z?$M|{5F3r!*ZABQX3wc3m)o%f@ld(xf6LFKPj=xK2E7?tPRkbj?iIv`r%#K)s96+4yLXsTr70CbRWPH$U|=~ z^hQ1z9&o=A&I!6#hmQ1W8Kuf|P2O0BAI9+99GFtwAEPQ5UD@3oe(iz+Z1K3DUPA@S z@EV{4i_r~CcCoN9KgwOuWkA>6COANk>P@oSU8(+}o5=;Vn#pWn$=!R6M0XCU@i ze!F=7Ex%1XU&(LcclY|aqx@P{K+m?W`1fV+4mEkuqJ2Ng4L)RqnBSxMYg-4hkPa_y{Z6<7!fUd7VcO}O2JJ;N^{4sy>iTknFC2W%(%Zuk<3`5YpfB8VNu*YViB8+3Ng z>3oMplc=K-b(lVBAg$b^nkrT^K2c3OWFFPnmFRsWMKt0w%yr?C6r9xpC;9>s6v(gc zhcj41l)DgYA}L>Bq709d=pHA!WH$us1UB$4um$V-BdaH}zb23sNo4yg(G$eBF+2Hk zBD6YrWtou{RKVAc1|`52E$Jz7b&+lH7xVP>H^Rv^k8qBI8L>!0hJW za?}!tO<92!EcZ5I$pjhOjLM82ht&=9B<`U~RBok{^1}qGadx%`8YA-bLk@b<9wM#ABp+UXDbufZno%d1l^_j*snl$WBqTr3e!F%f?O_Xn2dQ7^tEDejQSzXkG+ z{>VFt{FVgrY#;|O@25nUiVkCsJAhn?-i9>f#eQ~RNQo_ z^d7E6UleYhCN%yXSaHS#lfVY43#3#B4MCu1KM-j2C$NVEet<0q+4{?{AqE0-l;~xs zTFY?fJOsQ-bP7@+FZ=AAyG7bZ!UO{x%{T1@N#aA2#9icp!VlHR_eXJT5a8y#6ky!1 z{>vHt$EH_MH>L~J_4cQZJZqejpl%&wAtK)&WFbAnM0)-nNbM3S;^2bz{zx4}`Z*>Y z+2ksT)Db>U)dpZZdM8Ql97|T+!9)so=1IIi0B>G@ye{IsCxJIl;&oZXHUbrU;Lq@Z z^ySV1iS$b#E$EN5h)7EkNY@}-2t9DPg;X<LF4C)1T~tYb8=4 zM;xmQOr$@K=ufFCk?sakwLj7_BK>^=>7f$oZ!Ag=HIXjg18KQL`ZTFU+arXC+TbEo ztVD$=h?x`_=ZcIiC?NFjc##yQAjL2G8LBhIFJS54T>{vW5lt9)lxGkV| z7Kn<-=KBFr7@n80P&qk#A6cr89_ZJHoulgKV+qT3EKK21ac3)>6Y5AL!w1u?3l9Tk ze{XE@C`%71(|HIIL(;SKLWg2Ce*pU>0})<25=~`;Cgm za7`rfSBn6R^I<3Jxu=scmm=;#locb z9&;RJUluFTAt>#2Ur?+>2I384Cp++9C>Lh|1n%32ZxSNUVF&XS5i(<8 zuinh};?#~U0{S0KPF;15*+Nmrm{{pgqs%%ASO+$`AR!!9cIyY&9W*>&Tk<|YNIt$p z8+3O{m2i=YB1a^Oe7ZdzUy;$Z2oKX$BOab9lc&PLH&sT2xL^iCaxr7V?-A5`r6*3? zO`mCHK>K8;Eu&OBO{uL#A4f{w{27!B z@S@G7G{EJ#zRgF#25Yg=>ssM?57F*z8K%lesb)zIV_qMK|3e<$*7^5!wPq{ zV>@dOSj$G1N<-7lSg!7spO&IWj05}AbV-7y7x{gAx1edK&85`x@LQy0$Gad$x#KP* zRBfiq3U*kyrU5zD`aRy{t+_dSs4e_$Kx5d#2I0jA^TR6vcY(VpJlGG-In>y%kQ08W z)ME5Qz{o#QMsGyLCZiLD1%G%Dt7fQg+D&gEN*u*!lFspxP9Iz(0VuwmHjh#p74(h) zy%{&(ELcCM30c3U1Kn`^@3jLjAe~!3PtYj^vI11eL7m6%&)_=-H3-7E%G+tHQEKTJ zij@5I9TK)c64v=E3Ht*=MX=Q=RfWI5s(1x77-uCZ3kp@S6EWRlft8z0$_`0VmIqrw zxZih)W0V$!_EZXazOrA}wUPJiN#1)WRd4hn9!3i9MUaF!#*)I?1cjIKdze8K z+Cb{~O0nYw@}T*Y+L-9T*&XOW<&KCT42mts)t|(q#)U_e%ed9t@d^i(l^LT>4QD@> z(LG>PQTVr~z2`mqgV0$P4^r9Q1WJr=jRbwr|C_*a2O%5t7Pv2W8B4s05|NV5w*?{J zfhwQ&a5ktYbw4ajndrO7^J>H0Gch-B=Jydz-e;k@+kEZ~bC2~>`Fn@Z5XoxtYC~JK z+AW;@-SF%$S?!HCyW2x=!58hT#c>l#oGD*`Jx*(ecUo+MYNf57(IZRGwui6NO3xm} z*X;1+s3zXr{ZD{~$|EHuATcvMX+?(EPw=_d%q{R@hd!&xuPte{qe{dc3>Coz+Mr^b zF;JE?!6%KE(3`bQ;bYL=0<@PK1&Aw{KJ((~T^;|y;OM!Rup2h=J^2msW>XM|#Qm`f zhP@QQbA^G-iM|CCbRgrHQV@m_rciB&|FRBj*Q?E6A(*5)@~`3ACK1v3W~Vor{cK1< z6)o6OHD;rVfOZICKb}>dG!XOgECYofy&;n)C*Wg65J#~Meqno18_Ydu)Zs#BkJ!fJ zSjEPb2(eU~Kh06Ipt5it3T^BNA!u!bQS=U@%8(=C$^!W+1IvheTwmPM$Y7A(8Qf8V zu~(dOm}PIrranIln?c6=Ln#Y%_rX_t9TSnY72X*$XH1vdE>=9WtxF z1IUXzi{WEHbX0f&GM8AH$C#Ow$b8t!9BgJzM&`R#=5DOj3I1DT=5h#$^D~(law5}Z zWo|J`4rNObagsX`U!ZA?ul|LY+$N5%wMj&$pB+Pt0WK18O=5syaYIbM8G(iM@t(Tb zaJFOPIW{{_ZS}gi{fzel{K3|l4fA}q2Y)L5%1{=kcxG4NuQK+Rda${wzqE`G>?xPW zPEqxlz=gh1V}%HYvx{LhieF=aEH!pr9{ybTD`_Z)@6k3Cfn1xknW;$?Wjqd`8F%f|}wc!~#=P_~H={mwQ# zh-Nvu*O(yDRMGbud#YMKYJ?{io8IIAvE4Ua2C|t(b=TpbAjQD$+R!)6(Y4F{Tx~bP zgEW60gI(Zpo?7rrf?clT7VZs0*ua_K28eiUW`PX10q20{5qJ~x;BO|3Z@eR#{8+~2 zTl*sjhF#@iCygKu5%94A3L^X;k;_>REi0k~%aMaTb${$kQU zmpD@CXKr8m%TOflJP#h6|Hbt0QvOHu=aK&Bh$EGL=Jut(9!28Ld*CQa((l<*6L}E( zVGJ#RphQYM?U?))9HwfF3h>Mug0QP5=(k(2Xg7v@9FN!IBqSLcXQ{w-h&B1WU}!{H znPI&`yEDp)5q;)aUFEiCK^qzGr@=U6tHxfIGV!vMiI=5J{GP&zT6zACvxX)lbYDk+ zXA$L-CQ2#ck)(vbEZ}vm9B2(6)Gkze(jF7`hClM7Ywnr;7{VK|pLe*b-OG~59UdBh zdl8b`d0+TrHh+8_;`6NSytRSY-yO1WwzBB0gUAj#8%zg*hx@s%V?dK|@^Ck|6Qrx! ztE`%vx*bRcu-V(rr}l!VHtQ#U)@CYT58@Y2LDOD7_L!qPVDQqR+BE?M)t}R=qF5H;HC>Y{_^(L|7m?)RUo(*y>lF{)a!(l@aa^Xn(8{aIe+z zaSWn5pokmGUK7AJH7ua@-2?qR8I6!ScXQ}~(vf8j#)jLv zKO!`?yzOz%(5~?UFQP<14=F+YVmnb)N3>zLT*UG=gAxQX_hyGbR*%7*0kD2y@4bP@ zwl!A0Y3m#+>I5mG&O1qG_x`H3m@kyXn6DCMHKJiUpf+SQl-B5wpBvPvR@EJb!_%tau1?g$ZSopTa|n%lVgo#b!KPNH0Fs@)$O znveBubwCpY1-1Qxy4PWpHWRh4y45oj~3KTX4 z5cevMZqSxR@GRobwDtfdG?(%1%Ovkrk3deVL@&mx-{ijqcFDDI=RZ#b6OLS94t_pzvuJRM?Ge$YEI6xWmP+1-|_V*UoJYHhC}zGP}NOn#gh9UxYTwBT9%5N?wQ( z#61qj5>Navf#*2U;^x`^Zj&amvL~^Ud+UC0PU5a=OEpGu%7dv_6Rl*(cAwyS@ah5QyTgm zd!hf5ATW3(hQCV;Y5Ti9FMa#JnZ))>_;6ZyC0dG2bauxb0*4Yk9ci@7k;zf|5f>KR zj}1Twu26aU6iB>IeS`?4te;}h_c1h>Kw`cVp>2vwb@Lw#n+|IPUomIByVAVAv%DDsu4OTyNmJSRG+T*TC;!d6rK!qfY$W^t+GI;1GH4@gy#0de0c<~Qld`?@OzgLT+L$XX!PUUiC=ghab+twHpN~U>eZxp@wG!QB7p-j4pGBrwcwFK&vKy?7>NCm31fJOMMWKC<{Xt03p1(2L~ zM=oekqF-Qr#^`C@b`x7rB#!FWkuT7n<^D#?uF)sqYABv;fVt8EBcVg3x$*^Uk`*kh z-PwjmKwI3v)G@e+#Izh#Eqir98?+wbVe5nLwlEHBw9#4_kBLoMD}j;KmbF3?gvUkh z5GYkF%7R6V=q(lkwwPB&VM7n{hYBBZ;epx6pY@@qxTJn9cD-uJ&pW}FE~R#nK=G4SFS9tOX#X&pgrj9Kb6TY! zr%j+lX2LQKB5+mDoeDRKBvpIc*!~Y3N>=er_Bpm`mSaek`?Za6=fTIK`$tY+9v(pS z-S8$^GjpV56ShvRIoY%~hGQ$scm((`IK`z1cJ|cV41oy`#N$+3$nK47%qWdrkymqz z-4^bHtY%MS?~VboDL~dT)}B#Q@(Ct+Tj&^7o9I#XdtGqn8pS$dAN4yaL{?UmmB$xz zb&q(vdeN^FJvecghojAE?9~S5==XB`OJk3-yWm?Bp?Ai2uVkQ8#63bfKXhG8ovAqw zgVO6ipDR(#RIr5yQAsP&`BK4hxTS(EwiDV?uq(jf<)G74u>S>qp!u0+~v+2S9z4YLBHK!vK;>wFlmJFdy1vD%D;9^--$57LW%(RI25us~3S| z0Hab}PzA9q7dlnkkfcwk>Z?+@Ahi5R|3~yI&T{<{{aW<;FV?TeKEf^&D%aGn6>z?o zDz(3Ut=L1qzVTXG{kjWwV?w{)v!%a&g$9}e>kRsJB^?kWr`v>nt>~*?uMxQWtmj@I z36(e+_%KivmVP}|>er&Ae)a6BUtN15y9vAA#?2eRS1?fI&MqVuOjPM zR-VwWayAwE)g$$56BUVmZzmqmugmeEeq}l14=+mn3ctIER;Gu&pcKn(J@;0SB91@@ zaC{F7BHr)PHC%{5a=w}VYD!--^ap@X0I=%|!S>FZpph_!=KfI{d%Pw$y*-^V9<@Cq zk(5XzY3*^(Uf{O|0DrxN&)EmOZEx_j=i#XW+c`Hu-JlU^>u(yAp8jvn1N=A8DS*Fs zAMiDMfj{a3wqL?e-v_*BFYwLfY`=s*b|3Jzy})08G21WUzZCjxFYP}deft;AVEe&h zX#WfQfUnsL{GTpi`z8DWzzct#xXU6BJ3`GT-RmcgRDpI1W=LMkD9N zmrxtMqP6)R08ZnU-I`;8pV|y*0l``OU(8vH|QWx>lijl*Pv0b8yCRqY?0p zca8-d7*<4ri87QrVyNRIK=yAoJaJD=sdVHc+n$U z#vWR=3)Jfm=Ckd%TPLsYMUU_w^FX=0W04muCN_QHpQWA2J9Rv&cBMnyF>42sfHudj z;-XOO_Kf_a3VU6teH{f$wI`&{>ow-ZovUCo!lSI}cZfSp;!ZCzNEmw0<}aL7sZ8Cu zT~$u_1{!v|UpZl1#dc*z53ZlHFLc3JkGxTy{nXrS%Yjfj5JLMA zVI>ga)0d$`1NvAP@eW%+DP5!TP{%}j{JuOiN8ETNtPY;1-P}d11)YKcz{R`2qEj9} zB+)4i=!UrSL}aj2nEJJyQU_S8Qy#_y{v-kv)sCv*b;=SVOHZaLM6bUd1IKLXOR(~uZ9?2v)*O+7yNU^hG z0_zCcjq4h*RP~WZn2b9W$q4U}M}V(V;uGCbiuZaPK<57R@&(X2ILRf%qk!TOSzBh^ zQ-#QYR23wTRl+D0_t$uW@TQC7^y1l6yJ@O%%9l#F>HsZ0@2 zT#CzI_1xdXM`0Y?LM2JZRAK{!>Y{=tLSMDK1>QzG`+ep9(qYi7Dow{VKIpUu>2=R1Nk1fr{SQ6&KoHIo z+!w90XxCrtV7)rdOQqP~BGS164i28zg9oE zEaH)p+aISR(jx900Oy6+{y?-=3shDpVFcn$rnF;-ap5V343K%0?94R^dIkQiDJ#%s8hh&E@;K?usuI20? zTyI;96U6PI2{e(CAD&IZJ#4{!Mz}3&2$v`-=1XmD^3Gj-`7&%fV&3#{1>72~fYy`o zB8XXxRlzk>jh@P>LmWmr*_Wac8#^@ps_$ve+=t7NFl}KC;4{=%xK{)+Rq!qXFpr1-RQYP-p^>l> zCP0tMMpqcOzJlJ=b2@=S6yz?ThaZE6aT&jfK_!Y<*u_u}UjhA7_)`JaEWmt4t-fNL z%>!C;UY9wZR-ek8Rv#>OY$?Lw!)3}9Wda2}iav0-uL>US8X<2!7m}4wc|F*HGsw-j z#v`VSkFjVS%YYEXN8?5$UWkC(KmZ&bV+Ah4LUEdhU33%mUo# z;nhR=-*>ITT8Djb6T0prdOI~m$_Z`B0p?t1(+wEF6?Prxp?`CW8d%(pC{Ya|>2Mp%T_u(HWjRR1DF7;>ooI-o_%XqO!f zikq;gF*e|u|JT6f@h&?krmX9F5gGE61z<>Qy)hx&ix!sSiN$$%BWErk^SayT5Zfcf zwjhD++5wyw6*V>?4|JSp(s3f@Kc0%1p|Qd9@Lud|UejPN>?gxJ?`<=$d+=fQR!o@i*ay*~nRNHEvK9 zRIu?sVtr)Pyl!>n_3#=Bn)q!$cFg68Z&T<|)bA~WoXQpsK?`|9;PtTGcHl;Y{L|Ig z3!ia#V=sKcM{Bef4>C1!IAcNdW?}j55U+3GRd?aFHQI?HKx@;+xMETIBH6RRrW073 zkdNS@V_Kucz)~@N7~Zg)Cook%N=6It&WI^)6@e3~kTeebA;YFc&j(B_8bXE^Ef??A z;=Mw=*NFGa#e1!I4~h3B;=MtqppKQdMxD&sv{A3Kvq*VSvy6F$3mQcwlKA%SSDUy$##MkgxX$b0 z;pi#r9ws1pg_e-w2LYNa?tK*#B3SJ2G!?1BqRYrvv1o&QtdNgJ`Do$;77vvGrH*J*@ZF%apz2wL0+-Q!<}5rn}EDnxE8bCGF)Z~*_+ZJ z#nHDynMzp9Wh*1La5WIl?tu-ltIt+oApjfr@=VZSXt9d4BNooXMYv`3he{MV!bgThyPms0P-yMOer( zfN+lHAF^;|$iILXKtHR8C!jlo9zNTkcLGy2Jlr81(~o-44!>j{E6$v^3HFt%TOn6{ z^!oSv3%S~2eEy#dTRqmhAs?*(k7)oTF}3u=G!K}tNt~P^o{^X~8x6!HjGHs((YP}$ zz2k2L10&*87uF&?6nQ8|4+X7EcqkB>mge?f)2*XwYZ)e0b|#q+zjDC8;353H(=w?HGaTG+|n}Fg&VLFci~b5aKDe`twYFX84jX0u5e&D^S?#sA>-=AsPPwnVM>Mq|fKz?@Xc(4t1GZ`lc~0R}(*mg8?lt8Y2AbWjgdxh}&` zyu_W?55z2`UD1eI4-zX88yaBjcpfHwDJp~h&PIc@4%q*ExnJG&y*j(cw*C)pb@poU z5LsD0jK5IDEqbMRyPW@o@Q0rR^VLWr0S-qG9Hjgeg*ZK><$S@j%F5u+oW!-917U^0 zswnbn2rwwkuO?C#Z;in}z7F9R@M}SjG2`J3y1(Mi53)=WH}XZ2*t;i*y`sT3G}wo} ztSg0eL4Pe;DP}XM4Y97|KbVKiaL#hRRq~%IlFa_2r?A4ZCb6ytF*pkC0$a!bL6knsPTvDK6%G1(Lk0j9l*diLapzB1a;x|Icl{KY-M2N&%IXbhyKwTJ zXNU^%@_l51VVifU-#j-iHR5D%WK_N_GzxkeqP8vMgczYFi2L>&;BFmLfaYLyCl~eo zI!t~o;#YrUElf?JEuSHy@Q6_U7;*f_=V|d3aWBv3YVkS53w{};hbxR7@JYAe!t|Wx z(WnR~S&jNX$yGo6kmAE~`N3h_V158ZDz2Vb7ym!aZI?k2w5`L0a?U*&$lbni1F z-HG&$>9 zV&UmI_}Sl``U?hsqw&B;8Q5LFF6TL~>~PV&ptc|to#PjvyGWo5nCN(=VPe>TP8H~U zlDeW_fbIZ+?#B=D(`<@YIElDFVZ5?}x8Z@srl^w5=7X1E4t*BFIeTB5zi=(ga)e+F zh3cYeTx(fFNAM@Gf>aj4Y;T%wm7SvnE zWq!VAClRIL1wL3&)Z*Od+4h07qBtGw-HI~prwZ6lO-WZ?CG7Pom#XDF^hXT5Jm*)2 zLw=Ex*O39={Y+|YJ$Eb?fX4k#lf9NU;sa%lD~R7Rf1%7@!ThYHT-xH%ARo){2wj4b zqgZk(N=8a92kOjl0I2wt{lafr^0@c_TslsU)`!8& zod)ML%s^M%>Hi7ij~gmn;rS4--187K0ExhTbuw~@&S&F;2Pxd#NQ_`^qYB}>o?F7Y z+y5?D@O;b_sfQzfiS%qj-5ZV9-y;Vutpu?nb^=Gnyz8~Eu&YoZsL#+oylZ&p{rB+zr;Ww-f2inay|Nc;-K32o4!xmT57nY5027&%Qllo@Tx zMQ_l^To3X$6J`it@SBNxZVyvgNJUB<4{|h1k(tF7e)TsFkN?MEB&)RqVURDBSO?0k|{ z-|OL@3{j!XSuC>+W#a4HPqX$pbt7M-*LdDdSeT*y$SWVUfm3Za9H8RW z;43J23!sld9U4mkS_<^-_#i02lN8{AB6qV$$f5vwf&zXc1+vTwEK^1bTHK=MIg5pV zg^Fyycc{p4XjJZ2U*$^OvF@{94GXxQ7H}0*o^dpO_()p7ot6dMgnZ@r=kqJ@gmP=T z-&AhVGU&`8e`5hVO5dpQh2#kQMvdcu5cXL%qFCkvS7IklgeobKFbrI@?mcYvbY0H( zIQD09H!?|==7{%V(?zra$hP~n>IyY$w?9jT@rBUtQhf||;SdZScRu=E2AmAb&4}+E z(5tZN{Ms#5qWnbIX$X$C%2&sob4B^TnK9FAtn#nbU`!-PHKL|!B*c`OxbtiP&{Rec zT#XXFCjn4z0o*B8AF%~>CQKdt@)D*IFbJ3CuiglvUTa9eNWayBdK0D|zdyQL)YAkQ zL`$ELfLW1%SA<#-^P;p*f@HFp+3m(z&|8xha2_Wpib_m`nxus?6@ zRlfr2L5J9@UWRx4bg1a0ZtRYMA_kklL4+$i;kC!&VQ1)!NXe)7kY0g9CUW^6TsqbsDp&i}f#>&*S;5j??$o%Zu^4XoU6 z7Ln=LVMcJe;1A4Zyucv42_fnGP11(7W2M}HA*v7H#(a>xzS}-Q3$$a}v$mU6Gd}%8 z^E4gkKV~2F2l+D$r2ow!p#R@bnDhtqm4cJ86sr*NP>8*ivGgBtyO|B*Ok2p?Ca~Y^ z$6hn`nSI?r3Oxb&kM40hNZDcQLs-Cz$DgDrb%s{=jt%Hy0dao`b-oE3M#U@eN2yqp&FkiLAf zfzY=Ax~4>dNo5PEAO~asPvs2Ag$NDZB=C2i6e;;#TAZomtRljb1UWkOHK&HJwo$?! z%omEiz*OuD*HZuQCH}&Ls}2@^v5fFq&g;K7CxF`g8s@|owo#Ap;;?E5Mr(HnXY`Ws z7i&;FamrvUOX|7r!~Jb6eGI;ZGmzoepy0tYE`OGt2U?qe)vuLT__Ok?QwOlmp!fcZ zDGlZvIK#|kI7b^qY`rC3x3v+8 zwv-~I9)lf(yQGwLs9Epv@TfNIoq(3z3}M6F(c-k~YM%cX4f%p=rXzGfVB`cLUx)Lw z>bv;aiqKr?8J*-Y{&IVUt)}Gc2RNw1@q;rSQdwl&GmWEqm(au_J#$ABKO6-XAY<@n&2d{QGw#@@rumtn%MLYmR{)i7HaL6V1x zy8n_|_|TNX%NV7D^)mCp8o??{?oO}@ zr%Pchd>+7WCdC)FW8`W|9zg-jD`Po{k$dLCa<~V1Nz1_>8z>K`9E~!d=ud-SDh%Pf zupI7TlJUO6UMPpX@Q$<>rsaD~=0Uf5;?CLI$vo|39z@aOs}g;bR#bvbIPsOKiBm$} zm+YX}j2FfUK!-X4KWFuH8;?25^UZ$kHapqx80g5QJtz$C7&%27_HIDW20?dqB6FfP zXf^a3?)HZ6>$Z>AN>``w%Vd*ZppWr`2l)MEEI$!$ui;Xi}GB0yelRbqAc2nFr>Kkf^p1??a3qg0q?-FW{JMrRjZ!siup=bPY z2mr5|9F2_>V_gH)-Y!+6wK>znyO!wm-DG@$tt55i%hHIr5KS!Ao|6%54=@k=;a>|E z9(VuB@jDernfvV%`p2$~F=N*zI6TE}Z04neH@*fi%Zhjx@5)Nw@JMGyb7v0X=t9G> z3wtz4(w$vLfh|#0rE%FGDSn9E=^m)+XM8~SFMPc<=vr~5#%NNa}N76 z{4fL$YQ82dL~8e6#o}X8T-EN#r>IfS57@+Rd@#QkX$5+|p7{&cHsH}u&x`fIR9=73 z#+5)ZW&$JcLqLn`4>s$j6h|FYJn@Ac7(s;lQeqYH3v1uZO{#nxHg5+&LgkCIPkL^b z9TYzHN2vTOVdq;aAD8Xf>8^e-zZ!L#N}u-@6d_rPqoo1p^a8x|4g(jS@V!=xhm8p9H##Ba;U1pD-t5zFHG5 zQ^GZ*z;PBx1NRW%Z82iHG2Sl zV{|!I1_=8@Tn3g^VlOBgwR1@$Pc~uE(LjTow7KX!Z!k`(qir{Xwv5&rIzoKKFPIuH zoQIb1r%dHBR3nEp>b_;A+7?0=;ODGWJ?Cjm3_Rq)s8^sglsBRE3sT!$Q@2N(S7B$y zm~uaDMN>g)QfX4*Qo^F56Ff1_wZ(=%cVd=#R7OGAubf7S0az95QSb4_yrJVcsTpDDq z6JySiO&M_!Pwy2!UWauX?r6quQ)1-S(Vu7|COvm6-~W3be5?1_IO}7di(>YP$l=cu zA?FC@w2K_O%o)L)*N_8GgkU>8S7p|dGK)V;1h^^8S|+nxGV4-iMP*ij%({VDb7WSL z%({bFGi6q>%=#0v0y4`ZvsN-|q|8!f)&^$%23e7k5mEHC65T0yb1ZBKXd2pi#o|UJ z*xp}{Y-ALjviMUB6+-GuA=NZ-w~((txfeYGsm;w`$48o6boAn4Mf@n`huT5L8h)Of zZGJbN7fC3|+{4(Nk0Xf%p#$mDn7&u0^N=3RbX2B+Gxt2E!!nI^VD6PnUm?@bmAQ+U zo+8u5NdJLppG_yPWoKIW(k^_a-Dq+P{lTeHayN7+(lg1-&Nh-@LT;F8~r+u}bb1 zbRpW)cVYXI? zc@k!8Im~8ZvtZ5$&{F2)VVS(OpVFYLHw~C45JdfR7VmiiVu=Y65)eOY?;at5mYG23 zNT7O9Y!bOQd-q@X(cPHuK@)DEglkBFTZZ>E_U_*WT!RU>1?z}kX6uFp++!x(QVG|T zQeR{G`bq@cauaT*glkWM+l!@}BjB*xAneuSBwS|-94CP^?c4M*`(=d*_qALH^rXN^ zI~ObFzIN`t0`7Se?nMc=r=7b8+;jnl{R%7tblvPgc~Q}Tq$sQ z>EXWYWc%7pI7Px0rN9-WhkF5Veb+f}%Ef~x1-3Xn?2W85xz7261jHnrXr!7R&?5n@ zbtlQsnZ{RQtyAB}pZb4feKUPQ z(ta}6H+M<9+BoG-%tct=_!<^=xB+x#)&L=J|nyz7A5co;^sf> zi!FCykyFLgq08Ix%D4-xcd{E9ZGrU;RuLexP1UlCc*$Ta8&`6Q%W$}_Qpr^dgX^lX z@DU3Ux%xqM)$BE_V`_L*xB^Qals5iy3)ehX)&i&2ky!I|8qGemOrO$et$Bu!X5gDl&D^a~RHI_FeR7^Y`e4lfFkMRNNsKJz}E7qK6YFEniNjT=a0_#G*$` z)mZdotO7j?*K$fvE_(LxUw6_>L{nAMft_bMu;0ANoce1`r#qKG@POISY_TrlR`N%2{&o+C1Bb>+ibJszfG<)tK`yu~vNXDG{E_$$C)QH!TI z!*88ir&Rgmx%Hs7QaW@qvFsP;)*12ZyyG1T2wwe#6Z<%H5$iYBJh48};VnKA4&9V( z;V%JZIdu1<9|1m?bm!HB3io#>+KtcVW0ZG}mFLxQ>f9MYV^j8b-=p1Si*xLOy{1x7Enp6_$3Kcp9I>Qxq2qx`kEi|XRRJ? zNP*pp$@<;feaw$iS?P)tz~y*PV`DW-Kuh!wkbq4ofO|1o>m;CMerzMhY7e)k0Pe+V zog@J*^8@ia1nf)!?7({(cj;gNCe4paBwSAloHSb<%t>{#zV#O2Ec4@dz@;%iRugL9 z=Eto63+#^%Ldn_6v_BLa$f1Ar=_sTd_Q&9a;nB-BX%ABYTZYH|fd4te*HHOEf{{+NdovVkWw8AtzX2XgyzNkeLEW@LwBn11!mYDM!j7=p#Dt(-o`WuWR z*kbD*C~UJy(}KrwU|Q_WpjdVdkszXkw?6UqC*d8<&431{p@1A05AA&OYJWrF(z(g5 zH;KNP%IYi-P4B2sKQI)ilBiXwsc0xr9l=l#IuS~@;~CVya4mIb(oit zBV7EZhi8RpC^&FzipB!0s_oJ=aG`G!ra=of4`3SL=4AZ72xd_JqBAaAq{#-I%6*?M zn%#0QyM;;GZ~aXJc}a$49W2SO2mi#KOW~jp;Sja*o{y2{T^doihNa(>B9$sTOGLpv zT&FS~0yk$P79us=XsI0xij4`xUG@l$<^F^jZiINJrJcxR)X}WH#P1*;qF=m`QhOFr zYLzZ=<;xEO>#ZW@%wtZS-gBvJ5vy-qc=93&m1?ic?P4?L?6IvvBa>ZVDV>ZIt&1YH z-+efUd6oC!sJIUYSDo-WoD}ooPQY2_#b1d-?)Ku4N%E!>-t>GhPKsbku^UnBs>|DO zn2bA*#R#NW7q>`!!n(K!@AU{8QfeOq{fGmT@f33rg<$+~A!bn>*^!bbFx;67aOZO$ z10e+7xE>|DFH;e}q~~72_orf2WIU+jk#%)!7@A)|n9iGDdH4>lT z{%Lr}q&#sLuL5z4Ta!QKSgag-=9kfDK; zp}AM5m7#7jH}(Z!bacJTcOgS_ncP6|J!Gf>R3>GJskpOr6I)_%K?5`J!F=Z8rv4RZrn(?UK%Ed%fjRhfyI?zIDC*Iu=Gc${R6b*{!H5VEX+`QpTPJV6y^jrv<>su zwq(3)8`sP{b8&z=va)ZytWtYys=R*AW@)$L4mY&O71Xwf6`;7?>adCENh@r65`bjX zw5pji*eC1dG=~|iEbhD-DGG7fK4-B0^VeoTSrYs?-g!QT=knYss zwh$B9E7+z_oyex4|5HTy1&LUQ^4)l^H|4B%_TJ=dJ|ZS8IlGSUkhArobb2`pXzO_0 znVx$)4p1BVO;XO5rG-ey*#$rhC}ReSTSt5ovsFdvZ1pSUtC#{}$=5-Gh&@cU9`d4| zd-P`zsE_zA`m^$NwScqatJjjRVT4Xh zi2ql}*9oce^|dKqSTeYfkARpI32PjWzD!t&=k6(Dx3;E=*nUX;yol{-B+dq$C1g>& z|E!Re|Hz79El!=?SIBn3A(J9xWfHLvvJ>%c3EA&w?M=w;!<=LZ+3kFXkX<24|BR3w zi+wty;07sVBho@7gzQEjrjYHP_j5v)ViNued(cB+vFyP=Uy*h$gOx~GTM6G<*9^W3 zS^G1SlL)?ttkr-yl5-hTxW^FDE+XK!R+xE7dk`YqhwIJndqn=G#BcqN=9gidUsBFv zBrCbUchL~eFG)5}XAdrg6Ci019);BZFMF^CfEJ@aNF;l) z2OGX;1+WKinAO)FJOmw{!rxUApWyER-s`zMfp4@2&H{5jSq{r0{p!d4pCqfF!gsLx zO`>#qR_Eo2!k>CJn8YXvNmf_WLL^xIU?8U5HW#}_p;0Ss_$ZSb_#pnE%6EVz6S3&Dke^)fyXx) zKNKf;d@WX;>3RI^&yqa8f$!k)`$XxV;qeQ=CB}uZ`DPSKvp@En z6oGbZ>~uYn5P@bUab#!@Esq!li~q&`*s>-vKU{^3(UcJkrxOcT`sI8{7&hFRkNJFo_H|_n<_hqC|-`8nvnMk~o4HNMLW+gF!^`g3?wgR&6oK04kTr zB*Jt%mR5VTt+w>kQ|&qZSLTe7|??z2}l3UjFBM zp6@)*=i`t)d)DP$?|Rp}-rIVY^@QU>`^$4e_qOQys7MCigzl>k=|Zz!@8?8&n2H|Z zKQE0qgnwU5%;MbWZ+HP(dku(dT+H!HQqdQlHenNk0S zoXJ#`kEe3em;neau!knlB(q6noOG9-Lc5tz%R2i%zQ>y2ghkyylC|rdgh|85^>km;xOI-?}QjprjRL1=#NxmW8NAaym?`05?(YAZzMLA^gO!g?MC? z>CBOwZj!I}N%cOGlhVV3E>a<~nrCG^O8>8OmGLTRv5#_cm7Eyz)OS5qIE-qp14!3e zu$DUNdzn0>SE2C7FLl1^i|#*PN`HqjGaSj%k-hCa3kqtiXPs^5|2WI9`Wab%U9$W* zlC=J5!Hh})`sK`Qr_zkK{Azi2&k-m`c0Wt=={?d5vej`jb7iY%{+zP}M>vhDCD;RG zD%0FqoUe`brW#K(L-PWSB~6P*`l1+YCX}kg7d~|yY-?)8a>m-++L&*~=LMR^{1)*f zYhfY8u&_QI4H%h)HP<$8a2D3y%RhTzDV1L0MxVf~XFvU2XJM^5Xbop!iCZTzY{eTC zw=f@@bM21IxqK8fV{3_lC>P^T7ark*D8%j$GtXTz|jP%&IJwVM2V^}F3ZyiL{ZB}^{@TY0*l z3+X39BIv`7_+dtDZ9ph^L2{I=XAW#$_e+B?4`&bCEOToklY@+dCQi9Z2|`ep^< zb4`HaK#2u#J!6(JfsYjmQ1yk_+)DSn<(Gi>M^E;J_lLySMnazoytT~k8UVg&Me;-! zUW~hu<#q^gIoy4pngz1F5(v`k0_6g(Ek$3Z4EX)U3I+Tt_+Dl40KWM3wm8u4ldY25cf5jb7$gE;ARWX=bR4lC2Qxb;|!(9&_wBAptN7RJp zJ^xv{Vr#m?Cl6Y2j_R<-?eJUadK=UAIx_XVy@3&yIvobwil?fIzvxo6U^91ZS5L~g zn=7b&__#pY(j>n-qkLaQ#FB8rEZc3g9q^MBJH1@0E7qywOZrn1f5d;IGFcH+Jfav= z&Co7w*^hz1rN3%>{po^m0=rciN#ko;8w~4!@tk)IoMo7n_c&){SeuN*UN4fMT@r+6y>{O|+RvFykNUD3=7=34n*pwqyb4ibkC!NlKn}yB9$QM~Ez;sVZ z7k?wxhz_^Cw0(KtWNRq-p_O@=d?kbQw1@OH_W)OgnW!!{I8e@}af(h3_c}?H%fYR+ z0g>GFBw~$PbJg~|YlJ{~$58dyhukk&0=`U3tt}~BE-SG*gGn>AhkH-RNm1seBmX0q ze=?~_ic(!IdR2jSygX9&q0#m!Gou>wc-ahYE5Xz~hkV3Z^Tlc+y0v9~rc<3-WqY*4 zWrBK+OQ|x$6|y=F>*LHU_MH;=lOcIBn2%f1^U>L7KGNwh zd4yq+dtQeA&P>5R?@X&xkA0Ay59UBn>P`$OgwxG1Li-z!H!xe2IVpP@;^ z&X6VPU0&mo_S)cSd{e0*i{Kl@d`jDEi;L5y2%Z?e`% z<(HUc+YEt19ONdOLvh4fZ(7`jsk(4ZEC;r+8Loz}Chx4_@=IlnX_j}72(NYaGj3fJ zE+|Pn6Vo4+KCA7O5%PjdZob~U#;c8l+~Z*lNfmw*NI006?*aWJ(6sUaO+=8#CGm&k z=BVvyuidT&5ue$maxvCEs!}nkxWjw?C_0UL?B=m$&w`t{Zc{O0^ovUD4o$Q#7dt2J zEGeQHHd}dPA9pg(@uKeknMsZauB}2mIc*?wcjx873A;em~v8H^~2i zPCQ@{w@c?vRKp~Mi#rjSlIIAf`Laa5l*Y#VKvfvgufj3Oe7EF(P_jmr4(GqVDOt!k zcwVb+9dN12%QyT5G9iZ_lcXrv=gNQR$dg+0?P~B4g3B+w)>y4xzP^!8zP^6hi+BHl z)+CF{+jlXT5I0f?Eyl(?j{SDPqJCqlKP-8q8k3R;4-lhd-$Hs&Sv7LM$6QIutOiB^ zsah@>P(C*$^Bf11PWv)IQEDvUaH(wB7CrWMuiW*Lozq#jt|V9g>g?2mr$_^$#~wuk zqV0(kiTOl?%WN&r-g7l`VE6EEMlyAtB!5cmy;3Aj$Jdidm6CH+&3#h@J8s@PeMH@to0~61mqx97g^4()E6df3|BKU=XJzuGOqMaIJSf%ZBCizvCrN;}{1o14gRd41H3+%ShxOZG@hmH>hbr zYFLzzq7L~*DsxW=*v~V{$jI4yiq;|ufoZ|Q3;&6+j;ku|@r;`0TTYf=LZ4`{BYBvd z$iG0qub#bPeA5M|WOX$;G4vD>@^n4aG4JrETj&q1M9F=++v#&?W5~4rawdThVu-Q#k@>cGr32kjS--w5MaoabHG%cqzsT3OrGRnNuMd@kfxB8?J z_aV|5u{Q32S8txDE+G!mob@NwiS>ZA%4N(12i~WiUXn@T$T6BAJH#a=rK!S`e#fq* z9IB-ZTS@y5JescdaHblvuvZNsI!Oq>D+Ko-zP7y^9MT&fbIdfdG`6$lq*7z#iVm$s z^ne=eiS}@;w?JFAhHq8p+%}2(=Z{9)7a)dp6TWU=awUf_$Gn0FaC^!x74K%j)UFh) z&w>KWDGHW&aBD?Y+QaE)1vZ9#kHc8w3l8X+lp!r zpo_SgC?W0|-Eyu{5%5#kA{;rlR08-~en>r$i|eY6@1^YG6O6=kpBdsd5vJi*gpa7i z=>3WZ`}xZezzZ+^t+1T!slr-$aMDm8JmWKDHH&*G&j5IJ)gj<0HZ^PrZ_H3iiydI7 z7bWsf5vi*>dl{wbcd@`&Mu_g{CM+Xaw$M27ls+raab4GX8?^@n)g70TTJUEb&Jn1v0vJAW{^`@d)#?d_+wsqq;jjO7LUg=TI&8uQy)XVR5@{io7yv-FAD!^xUqE9FIl0>UB1 zWuzahVI7vUI9-l$_5*UF|C`dzBGPXBMA!%*RnH3|m$uT#&mX6eGgYgJS;84e6HTt< zpUjo#GE%O5F2JWMfR6)UbQsk{{DC=AJN|%b#QMof_=Cd3dCFLjST5f`zu}Ec<_q?! zm}>YwsngyfO=0oOTiYUy2!ZSTwP!ZSR3rlOyh$8+iA0C+x0ZVKyj-52;=S@uFN;0dVBl1PoX5n|GXkwTJ zcI~m#B>VavvC|e)X0}`A_z66iiGn3ctmT?UQ<5q^1zaM!y~NU~*j$^KFhDp6Kh*Jq z`#4=aZbxmGJG0cUXUVIUSGDH(vgCk;*2?WmMo98eDS_x7CLf)cr@$pyeFjkh{Vyy_ z9)lAVh8++J_|e6p)VzVI4i zr1+i_G_~o^{}KG)DQR;QaCf(bu~;L79|zP(YXgo(EA z84m?0GI^Rss9w&s|8iCiS)B7F$l`^SLKe?0axQn+%5nUh{+-(y3(tMn>wiS1e`kLX z*UN=py?;~nKi2L4(!Twp#L4(S==A!(XIy&xVwe-8F#d0R_3{5wjens#emsFW<7Y#V z{=b(Qf2MzDZq)d{djF>Cf2`a8rG5J!?exDDUC*8WaC-cxWZwA8zxw!pp~iokJO0=E zjNiroVtBLD|A5rP1XNcw||K%%EA8;&iD^c_kRyu(xra~HZ}iW|LWs^ zK#l)acl@vQ>0fkD=KnV@rpKS@ABU?q{;%G@srn!5_J3*L{@Kl>|FP-*@3}BNen+WS z<3IDOkN-B4~IXuJ*E~UTjY99MGIa8YCA3sg@YtNMS;swV`<$p1tK9w<5 ziC7^W0}n6Nj7^v1+*l7NjoDsUg}u zm==gNT#nNUo&7|Sb*c(a;!k6t7%3~Y;fNLm0bT^VpJR23oKR`s%qG_Th+3>)f-au9 zR|pc@mkWd?4OoS*aXSs2zRsV%K=-@e!oGP7_v)&%#t38u#B02U;4ftX@j2Em_ z?X#|;$2$z+zsnqk(6av%MDEy_BhCe#>e}aHo<*?y>x5M58ML=l_j19oRXBtOa10`p z?;FDtZgUJj7McCh8?+E)MzB|Hx1H%4o2>XAVxq8LQck=Rf^C^EfFYiV^5A)g2=TbW_?=e34 z-7q;mAM=stZ2kL34=V&PQR-qdE51U$W6kp1Y6sD|wPj~3I6CZAz=Z!@pjvdx8&TQ--JG}4zUCq`VXx4oV3jR&;p7!FpQBGi(o<`Emh9Md8Te22aJ zX4P>uo$Gv14JWI+EyvJlrDI3vO*eE9#LOQht^6OSl~P(L=Tn(uNVxAFWQIQfVF{@e zms7DQf%cS-SpEf17I7at)`!Z~oY$V_+DatU0UV6Ym^BU!4({p>`vkzjV;}c(?Jwjx zo?Ynq;w!Of4D&z{H7}^_F_a@g5>A#lsfgm?pt_Dr`OH1%7PrQ3(w5yVQ=K^bW&Yqx zJs`OlxD_uk5hNqG1kaS)8KBy1MkZ-kTheD!l7%Fd?bk zS#2-b7f8wYKuL=lpBNBrU(@}{a{~@9!We_eWP^R@5=tfV30M-_3nkh zLoQaeI`V-2psVA<73qa4{PGLa)8ejy@_!CJIiP$tJzaQ{gTjL?lkW%|5*niDsZdIO zF?yP{urH?j)6)P%`~Ljpe2>2z%4Djrx9Y~vz}_Ezn8u#aYc_w`ua@GjoTaD)q$h=Q z@ZSSlQPuFMINx0A#{CE)}o~fE2MX!&3A4PbR8xFM0|qcDu=5zsJI7L z4_k1O4feTc&~Oz3OVs-bU>ZA{FcxL~wO_j+ha6w1c5T;ilzk#N!~tZWE>~l>PYkG4 z#n(^=p?wfe`Ro;PyK@om?YjjWezUkDdwbMY{i$DMAktU&&6M~`?H5b?6$fiyeo41K zT(vK$qpMAVz!KvECh>39u8MUk#atxRp+D(`Txt_H%|F9PpE}DqZ#KxlRmT-q);gnX zJtuFF@YYA)B?SbA#3|*b1rA4dh-DQF%#ghJc#UTfWvN^F#GMVL=aK8_ELy-0CAaXv zwVW`%BMplS|GbN%UX!@ZM#CzS9E5e)oh4|mPDJZsog1W>S9_F-D%q%KZj7=fVDkr` z`+N3I(`0N{=5cN+8gtZ;!IyI#G}%!Z;qhS!`L1M@A5P zzB*%2{VMZcDf4sp^#_@M`6WF+H&ep#k7O;HJ!(__h918em~Rd~C2{Ze5NV5Kg=!NV zs29jfz628!5N3&l%{u?emV)_}+rH_kdU*w6YOyYt7%*Sl@3O&ujqO{HU&9m#{fn@j zru~ER-z@p#U!Tume=+AXctk>`ME=VfD)Vz_`QyTD`Qs-H=#cWqJKqt6A6))8Tv3E4 zfBacWey;p66o1K~I1)6I#OfiANrxmxb}`eME=>Qu{vm;=JQ@Zzn+5+&*6y zDvh-j*@a>kl7T2?nU_k#PFz%EhXfTOCK-tdxDzF6IzF-7jL%0jT&P0L1T1^Rc@7^p z@gi8O5Ukz*GW)wsTCvOBk$|}cOgOv^Z<2E-W!*YK)gIo6s6|25?l@h9#b=$ZD>ceF zAQXCCTXr??m2$eq*kl`JaLA4G3QTKva!4d`cF+v%HnnlPWhTxf9MYAfo$co zG+cb0vzDA?&uSOp>P9p4v9|0(a4;ot_Q(9mGdz);+@jvTX}BW!mTQFs4!$HlS;n+U zlK~0-{H>JqIE-e9G%kWT)bncN$lQ1lZg}@GM*%&S^hJ9lOrxB_ilHzCO737cbhcAH z^!mIC?)kIVUGKR>>o7Ike1$$FBL{O1D^}vM%a4VKFFHF{7=eb}@!u%?rtc!VK8xv`D(qmF zqG50J!gkEW_!M0A5IpCX-(oDnb5Lr_*^@%z=vrVJ` z(XwN$1C2JD#WE#eY7t;3{DBan}{B)P~hpeGEhBio}Kz(3q&O_%m#O}gv*S6h1< zdi%an^KmqH*ke`4vU4T!KXg3L@v$QA!JpWvWc|yAuegH8pN_zk z7QRHKN3-5TPMU>h)Q^ExGOfl7d_3t$C>GZShB~wuzAfV6|86D>pJDAViILJ~uV@OX zkVVdEZRebpFtrUF-n#G%iDPWniPqeWVc3f-41E$c2+<(Autf!J5FTGO6O%35J3SSy zAP+G%!ym~X!sZ)CXu;Zzj;*qhcfsyLVKa%^jokyKLSjD0wsuD&gpAT+h0|ge*T&M~ z(N2qFofa>YKkc=)FfP?q6E}emI`s#5@bu8V;71v|)T z-23Zv(;?kNuQWny>o29BMNU79y?$Dqey&sf45FV_`)7#Z23v)58mvQGdL^cnM{zb2 zv4{Pv5=VKpILSFaAHM3EnXAugP5v#L9ypIEA$vs<=36{Xg?|IlD{|Ky^svwV&mcAq z)>`nEtpyI9j5=m1|~a^$Jgs+8@T9@_`J^Oy9O~d$ks*r z=#9z-#?d#+XI6y+(UV26KFYAfA_V15#w;d}HG>1mLc>Cnfn_F#NFnYTWU47s9eBDJ zz(g32Q=BcrDaO8Jq$UUL$Jy}oI1+)YJ1JpOlo}O&NQ<5l_`heoBAS2Zc-PK6)OcaV z*q^fHiCfr%b_w6J;8F(s%@$TB0!7)N>0h?mN!W3;!)s^)YZ6DVwbQ*w|erz1A11hk%5}!QuL$hp6#b%?e%Wr>*HmS~7b~?O% zm7TAQL7&aV4`wtyMoi6S(6f9a5CKf_TA=quE#_H_4dq%2OGV zc(36}Ffi9teuQ9tYo`&~r!}JG% znQDY6Vu=*(@s|5Y=oeXO)hT)zdr&Kny`$z0Ocf5tIQW9?S8(0P5wXP36J(Zk;ofcU zavjzudi?l*o+xms2^!NQ&+%cqn~$M4`Njby%~MXStyESsojrw z2O?B5(uE^$7Eiy_(}N2hEEe$+`ab91@NZ7niz3{=OpegKiYJN*QOPK=|Y6 zz7TnPBvCRptPSUAq*eI*D50lef092O;XOf|#y4>O2M;h*5!4gaL2Y<-kC+nQOHM{= zZahW#SNr|GA^IN8MV;nmJIytoD96ze`N=0A^lQUw{5ZOl4>`(yJFHqtxl5%+Rs5n$ zkI1hVtf48b6o7@_s>?Pb% z&`aB*pkyP#;Qvh}_Z&!)gJG-KFbJJR4%#bhjWz@%0p^FKAjP>#sMMVw$#M!EAOc7p z65nTgdtDEGpf&H{y9=)t#L=~3wyrH+uML+(<)Nd=&cW{@5Lw$yTDD%a2Vt9$NRo1G z_`vSFq&2!){8QcfKrj0cIWOPj82zRLm{%5uBBVB85t@D#pT(Xqu_uyP(zBe{4I}lE z+~WgE)<}8;s}oWtHrpnekE%;Bg?$RxA=Uoc9w+`lz1l1Y4j#_2n~GL88MCZYq_lnc z3S>5#NaFHTgu~FA_D5PaFCI5LW6w7e`5c2MLJ7udZVT#F=C)$}xXog=kF6SD_%{mk z^{>-{`2ds8U{@KT%?mlhzd3qLcu$)zdSnF4zQ1)G!|z1v%WpSCQmjkqoAx8lEI!*5 zi+ZL%r#Z7Kzq8>OX_TsNyLxK9Q)kJm>gCXOPU15qYWC!)lOifxOBQ3YjH%@P@8j&SR_tgmX(1+U4` z&-&6gRGFdg9>-vb$>L!2y+^yNGmE~9Rq*>ZdkbN&Lf^#*azfvw+4Ow?`tH^JyBzw4 z{z>#8^ev|%4@TeI4e!x63~r5g?5PiZA1Q8u(5G7SU>?lGeAKo*VlQmopP_H%2HUEK zMrpDCQdBLA5vrCBD#W@HAJzSwN6-(O*2``}0n!KTnXa*9s#EFM2+D#SZnn~gAtV1xp*Ts2 zDts$JsC+6}g|n?!PfUPBb~KDk)4v|aK*ge!Ko=W3)sFi2_4u(W2MIIZGR$27b+Q0v z$O4$f0w~WcfNCsY;v%HLzKH0o2JF>lTPCZHAP=U$K<5e$3Gor>quR9gnPvMUprmv2l>=V5|MsHL{bocOOGjX4$8*na$NhA2)>cvKM*p zT1vBOj+B}(V5$6|R8EX2qrHkqVxrH!+ih*Fg5|-ICjO!#o7OvQLuT1O%utuMtXcht z#)5n>6m5eBJDm2HBtp^9u1F#L&g5e%& z_oQ3vMx9A%!2o6Wot9gJ6dlxk1g{-A3_6S-rN^frmboT**)lTok>&3bI}PGb*(wq5 z*tX=FW>LVbm#|V0V!6fsm|4O0&H~-taI|iXXBGSF51&6&Ph3HR#(iY{i227!>?QuHfDuPN1C_0U5_)N`f7J}VG_Tx9 zOl6@SVTPpiD0~k5_DUlu3x=5SBjFt&pBg(cMeeHKMh!uJd*gg*b|}s27KMgL<8&bn ztM5+iqs(=n`$h-(jHf+XChfBYG!B(fSi>bZLL4W*&i%XaCaIECxgDiwMLKj5;C|;}AhAmU!+!Mhf*J>10~wC}w2{AKUKSm6oxpMy!pI zgq~lq4ht>}A`;qEqfKm^e_FPcRoIDyKT855F{@%`#l~usZIA>;?_%{TH%BwiSF^3Q zioB!(AcnGvb~M%j=_*Ee+zRgJ7Kk{I zvA+V@gg{iA6z2pt2?r;|7q$kHUzb&Kn7m7tuu%!nfk|dx!kT99d& z00CSocS5=6@q)uP{wFN3rS+El-P(*c?Xu1C!)CTI^o}Idg9Sm(n?9@9L6OV-DDtI*3XUd`XVrFiT|cVx{kh&2A2xL8<6Fcx3Gpj&#N9w0(`eJc986i@9Sk zD%I{P6=fw+=aW2o5p)4g6p%`Ma0KU@(e}3jJ>+Y^i-;t|3WRE3gC^pq5{1QVVGW>}BN|S0WY?ibNGnGm zy^1HXYCr=bun(bGbttiz5p?b{qWXMBXh6eo`W@iN;#89=MwL&R5k=HQF8HJ0Z{$t3ozj*O; zMq)0mNA58s&A@haaPt2OvO-%HUFh(46Lk%%JSHjE{F>`IgUa(M%2GL{otoB7m=za`Nn<-KAN{?FvgUJ3xUl+rL0jd*|Fgf6b zZ$p-vT$R77p>Vu3rz$xu5+$+lWm-TclW-(4rpX8pTZ>eS&e(*=P_0E?Ow$;LpL(IO z0m2N3P>qi)aa8^7kXAn0f^zCId8U;=1S!0MD1RQ}>N`{@Zu*9!BfL}ZW$IfS2Ab+S zWuPpQPOdhsU?jAvVUQU=!i=*Z2bk7~BXPns6jJg?d21okY-~7;=r}WuAl`=UCQ`Oq zRV_kei-ca0+QCS?$h3GdJQCW{Fd`B^M&4QYT{cPGKqQV(M&m<=M9AjF4Z{2nwDO8F z9SuiG?_!{kv2-*b1k>G66l_KEw{X!E}=$Tr+2-WPbr^rr>4U9 zcRsUxpVmcRyX2tpHT&K8njQ9}_YaT*g)Rxll;C}|VTq~vx!71RRiz_o9H6s8b;#@o zH0X=ige8x*ir}IyaWE9b>vV@EY3AiPx#*Vhtx?M_Npu&vMUT zaeyg80LTi0*#=o1U|G--{VuWypu1ya8_$J_S5Gv5(U$K z&Uoe2k`E(j<6!3y!4z5%kpT7AjVH3EvVDf}Y%n*QX-5x|iOIP7L-bSoj;Ue81mL(c4jV%b|QB{?BvKK)kUHXL=uwaiH8I}|0 zxP@E0U;MThx5kxsKhJ~of*Gn}bNA!!H}I1>dDoR$E>bRL-=&Up`(HOe+| z+8lR#su8hd%e+D1XV_Oj+bi5+U7p8`yqc)=V%?c`LR*_ID9R&%fzXFD zm3%ICKWi~Q?h&Kx`#%*dU_``nQOtQHSdzU)7b=!BC+u0}a_0B2^W=?~FLDl_^o8%k zy=nL=9r)UQAOg1wU!lPF>i#bU-@pFVAHI{b;j0w*W}kZ~_|P~#`Azu}%C*No!USrU zLjs06cEtf6&~cg1PN?q26R$fZP`w<>_}ct|w^g(n*46Aag4Dz`pIz!lUlp5EV(670 ziJP@pa8oU_s0-fe!0d8prSv1j&^Xb#;?Z!A?8tn67CbAV>pt*&`iTtRfk&}@Sw9|N zS1PasqJKkP`)Uxm@TC2V>@SRFC$nb{7e1Sdp6|KffR&ThPAK)ssnOw0i^zM8(1dcx z*;%!!*!ef4aa!e85+p&kHtN&V-z)n_Y?YK-3WQGs(KpcinjH@w2~H-E`opC?Vx&tsW=??BDOlFa1QpwB@AV z<}r^#C-o~7cM6>)gg8t-?3TO6s_c@IWutzyPZRe;%@_T)L^E*2W582 zmffrl*C$4Re>J_~U~J)ve=;TFlt`StLT02odfJ`|y*}p0!aSUrj#0oPfG{O1IItk_ zB+rU1?#-Kr3n8cE$Gz#2kGUoB2v=;jN$HoD!s!UqupGK_s7#=C?eRiS!a$2f^w1vP z>B0R2YND-CAsignw6N)4dC^lZSEdkXRwwCvv`yI+Abic6mS& zE!Um1Udn&gv=HV-^ph|8G20(!9p|vf*EJHmhtZBUaR3dHi(TZBu{PE`Z6E%$d?oB8 zRXC46RD1z4G6F-7eXBC#NFVX}L75sRDZS?P_~i$A$A0HX&df}~1G|96+3~2^tz_u+ z_I#}Cx<#a6r*6?K*#Ygp?hz31UL*=568vp#Set*?No&WK`t0Azj+k5eH(qI{7((7Z zFZ1P5nv2D!lm2tWM*DBFPhs#G(!Z}cUhPC-l+c^(zQ*+L>wK?t8YF5g3fPZHQUzlC zh&6fwp_P0`m@S$|2s@+Od(^KvL)`UodWgSSn%nJK1#C{~)n4gLw|ixf31ISF}FTKMn1;}o-UL*7BNHcYFZV7TGNUI?ZLYN-Vr#~3YR7lu<3$w zC2;I%EHMx?SgGuY=h@{nDfhx65+E>0+dHJ;DBkYWHv)()kMBm+U+H_tgE7j8*kCwogyb?UhmH^z@8ZI=iQ< z^X-asS3i{;polIf_U~%3S2DVLfM5|Jdc?Ak!7XV9=iFbR{nRZ@GO<>fhHxUhPET>3#ZI*uSq^ywW*+71_HK z2H+=K>^<;+aBOitIa9YQ2KJJ@r9Xbky@nEnYw$K?lSo1TZU?xfA48?`XerM=-5ph9 z|K>OKr0HT_pXPs~s3`}7zxGOJ8V7^L_Hmg}6$@vBFarJiy3VVeC_KDRU+4Gl>pZV? zPG6<=BWlzNgMTE`j`VfEhX_gfi$58iWFNjA890AyKq1sSWoH?>nOK$ z?i3p?Dt^?z`42uwqvCE3dSnR0XRlV1m4k{_uXGM7D(yQm=NR zu&hsCm-X-K60dYlU)A;tYSgN)O+8LuPkD&QOz}}(?L^^fPD5mmE2*%NGcKFRcSR66 zed+eBOkda0movqmc)ew&_+hVppDC_m%W*2L?O&?djc z(sf3%rQkT(Wmp6T6V_)~wtX=m^WFX#$j#APeD+?+?&xs*Ki?*rZl~-m#8n{OZD0ob z6z_10XO*M+)dBlL)ihBqTkM(-o!(CG-&;J>+u?nCo7um&D-P0|{W7y=NIcEU_A4Ja zJ*`r&M#-x7tPP_wJvH9iXI#4#I&zBlBpoz(gBL{DH#j{>K;m4try2cw`jcCg-TBzQ zJ;nR?bcb6!8-uowX2qF0i-FnsZeQ>9M~IQ>>x9029pAsNW8LCJh-c=N`EoT-EGk@> z;i;k5bU#DK4r!@>U3tZ$FHziFbq}G1GjqtEkD)R3*t_4CX?V#FDS&g<(|tSBB#$r}*`5@vL&(=cxw9I^Sw{zb8ErF8`u^Ce$U}zhpP|@AgEmo!n*U zt|-hTj;e%GtE=H(-TesrNY>cp@r#dNLB;qLpg9A&PxA0N%WDw_OtQws_|-h;6u;gr zo>h+VD?B;lPdb?1)E{+cW_o(4u1`+``uCKw9l+>`Gd5Evp`xjEKcO_w9`DZQ4gGuR zaH~P=FVE}K%l)ca&Uk*B=_S+r2yZ+u+s7~-XA&Rj-`7=M|oxt{xg#*5P@F}d7c zhiuJL!LL+xfAyYrw7#eBQ%!8Je}1Br!)K!wm^iTd`47~2plZ4AX)1l}Xb%@D1wTHu z7#}v8{6a`m)|vdnS3=(G!U3{tb*NK9RkzKJ;?twd_*ta-A(>uYUJ|>(_&I7Y@`q5s z)cCo2eDWLuEJ_^{s+d=`AO>e`c$nB(?qRT=SuQ{)CNFR*V^LKC(RQ_4#+u|uPOW$b z*gD+l1ZzBBojsyH>%)u8WF)uD1!8O{rAlC(It%yES*>Afd#%+|YxRZ+GMBt{|4t`j zcl7R~F|8|z7k*W1K|00qK8cHc)>UiwtZl~k#v^BScyC%47l*A5&bCx*kFO=&Yp6|Y zY2^n>M32+0N!V3teWrh3m{W7HeWW=a$5Bck9GT^1a*unH$tnm7L|UC zN=DVBKy;yQk)^`FLs}LHTexG0kc1x!s?5-f(W}Ip51E|Bb!tt#Bg$d6*04PwM~2#B zx0L2Z&&Og^M9gUYH^jqFpt59~1im_fK!qRFZ0yxBORcWx<6o3{@I?p3ZYlCb{g3*{e&Ds_ ztdVvIOAxvqry@ST__%ziADcRdw~Ng<1~2SVlF+7oT(L4f#K#5YkiNfvuBfrt2u&?EwDJ5Sgl*}d*C^)&|HO^*=fpm!yj+?8*|9^AU+WoE6b+;Ic%uURA1#8cBK~-uavx@@_2I$JG&C&_vU-%lH>F zDu^qQyWhdb1$(X z^&CGxpvB|}wrOD*n-)~Z>jTnvxl{)+)lGt2o+oFTu@K#N&|O zsP>nNHI~-iD>I8rsrDhkjm`}u)l?@TH0iy+U!&}|d@GfRy<)lS;vsv+V3kO`bE=lsM0ZK=e^bDi4v@slS6pS8{Ku!&S;6}68K$m)qC zCY16VlxM?Q0AH`=XjCo7MQXS4Ax%Q+6v)5z{N^N5Ky|h~dlkJ2q^MYE- zALSS8mRWk@=29*#$)GiYt42^7oy)|^PvC2Okj~Zc*)9PDTtZcDa{1aPgxv6sgDofs zvin>jdcu}_7rA;5Q*Y!NAs$6I*@L`=L3X!c9T$W?>J#X@g+_#$gyMATfI3ma+0v}@ zordMmKPODp8qhgn3bZo-N&t42!9#f3y=Cx-y9}UyhXxo-&N5Jw-*?id56nQ{kNo8- zzD9w5Ake4hU=J~LJ?=PkQ0y=y#g8)jYev7QHe@q3tX+rEVXp}web5bvVzk@2TY3r;AKWO5XKQDuVE zP8EKB{7~7!^YqyFO2P1iqUf)jFoL?Q^KvK%$}g`xM*=UjaHAvXjfyCxTkeUR9%&JMu!;Z(e8o35~s#>nmI$5fXliUG}a|RG{2QW|#AbGl0 zDh;b-cmxtu!-s0b*ZZP<)e!u1YgI(1l)wUA8X>2Nf`PuHKa!Y|S+Xt6k?>e`nvZZ_ z_P{zN#`TjIobh?7#irvbo}NjNRjpfB(8=4Tb$-O^)`IM?iRoQotJf?OW6LhnYA7O- zSUa`0D^j+LE6dC6+YopNTr44Ji{zuWYG?2l9NkdF{au`L)|!6?b|f0Jgh;ocT&sL_ zso{T7k5BK(>;!!4)#H&aTC{bGlk()qB?9<7S&a5REN5mMfrR(*fs)dWP>~5D0m~Ka z{7Z+GNvLIm{rh)ix@)`0Y}F-@!){n6wY2#X*$9HLw=Q*`kv3c}w*LTXv7QL072)0k zk@N-vVi~+L3&eKvYvGE7rNnCia}m;Q7KoRAaS(@m84wQ#M7*V(5RFg4B}o`id_s}c zVV?Ruq?|?NdGikAqH$hHGOTJNoeb+X-AjlCqnw!|Vd2Qtk^K}syVT6)C*ICV<-atl z>WUkWjIi>At;i4LXNWzouTE2Fv07e=LW}KZ;d1I{Pb2#R?FYX^`!oBs5B)+0o;=rw zep%L+&oUF&mujctC5>3j)J+@ZSa*Epb`j}DtkA2b6^m?-(B{00zRzVErIZ#N!sD+j zC!rYORJVaoC#jgiA*4e?4am*6k*Q<}U*b^?L4GTs#V%lTij@6J$wfbbLSlHKgyyQ>-hHXE1eTTBL|PwFt^a|348eDqJ%{6zH6iD~_F;vA)a${*1`C(b3g;haoQ zp3*sSjzT&EWvOgupn^JuGFz@IPEC}tGGaB}rEC-t>!0uxg}$3*YX*H<%XfHUCqU0y zt1Z2PFFE*}cZTg3#QEkq7(I1f2BWXAb-5Tlb$)TTB;vQX3K@BmAt!u4ejP|I@*t6N zz3gQWv1!GZGlwX%VU84{2VSUK@h0A+^GEnHY)frZCyHK1)OWC+OPgEk;Yh3G94iNM zjnJn0&OSPs+7CZlK99Xe#rsQE(xE^AF zuOV5){RW4mV33u~>M*y~yc@y1YC>t0a|Z-m#QmHNv@Uf;$OXhjSB|UHn!nEjTa$lH zm>V==-{hN-@;eoX+Pf5njwV*dQ9~|1)Ru<%UA&+R%)~io{ow%j^j%K~<4!R>uHiyI z9k_&*nq@o9c&VZ80DOnk%e%s)oI zFKpd;M>&xcq&ZE!l^FNwZ<*RubewFSb1F6yKsBp|J)i6^2H}Zh2>6`>nixfcArO)n zkK`rKp&p0x=dDi;j_ofXIu=>(IS1;ND`+m=idQ|!d48sgkPomHv1*~W{J})JYq6nz zK^nezazL5>wUZM0oXDst324hcQfihnmoAD-lJ|VqENg4LPVgL9fvthv5E)}FWtI7X z=@jMHpR7PGDGoQ?A}Ybd{K})~1P{m~McgU9i?3=*r4mybW=XX+3^mJIqrXlTs7@4O zoY|5KOz~LJCAOQ}R8^Uotb895(+Q^h5iXh^7c@q%L-W>JxMCP;9LJojsts*id>Cpv z;wV>U&p*SN58a!OaH86okj5jEW8{lqIgmU{?$?fdlqZO)g(*t|*>s8+?(v}kAnWox>zlUmBNkXQGEQwpV zz7n=N zFD@XRinUqh`$T8H7xW#42kHEgjuA{;62WX`j%C*PcS%GE=M>A(PtfJIu6Xt$U*2te zZszk_KL5=}-c6qB%NxbV$LDZ9$MBJN*In$(JBLphpQ(JV0E}lQ(b1cvD zPWZ7ptyi2S2G=E@O0OeD0nLj+$ZdQ0I-NW#*Ho9m;~a z&e5rA?X)PYb}t{0CnlGsFmJuOl~~N?Jx<#Uj}96y5?Q6o=JpC`USh_3`X(09h6B)#X(&??;XJI!Z1{P}Lf|}1P-&m(P~Kr2s)99vjw~UpJz{;kQb}g6 zAf^P_>gi4>qAdNLdTc>aP+N*;6(RH~*%8%zmaD)*^!^2SErX;jAjDUk)Lq*Iel!RF ztB5T;;&o+dZx`*U9C9q4$;m3htC$zD-Xkv>+16j^fgy76F~^>jg=Mo!es}Cy*tA}e z%wM6^*t3AbgW)QHA@oVZ33}O^gswSRJuVICoDP%IEF|`@?>ts~0RpC&26$94jt;E7 zPK>64Rf*irc?t~XLY_wKR>8-M;K7w+#FmS@0sf>rCOQ#mrZFl(QF)p`+H~vYA|?Mv z68V0~agHy-$r6=Srt0P*BcY4At|Ow22W^q#(@Z;}D>z+HPNoWnCzH{Iq-9h*l*-Yb8(`R=2+7~?iyT-bH7O8h?EaTeRmiM13 zr}^-cb^L3!vFuyPU4#&D4|dM8E16T&>U&y4tD>VRTa&oFs79-4yU4CYw?rLh2-OHf;N!Czw9z^@Lt^)2|4g~WWNg4IL_q8Xl z@kwd&cvYj7#YCmm%yj{VS7vo#fD-=}vF6XAk-3JS)GF97jL`G*Z|B7V9U*?1Sd1#( zudUcDU}-HP%2uQ*UX`&Z6M@sTv^&Wffil`m0iokb!HRW=7R(L^Xsc+gRp*?kt% zR!0&eIJ>zDN5UKRKMUJ3wpsJGETgi=+ckYP3)3e8C3~IeDNV zk@b++y{pE$qNrjW=0^51cOG69B4i$ZM!TjpI3h`5Yw=Tr zi6mnUnV0uSgl^dyqiSN2*3!)vq{YAU2UAG+DOqPNt>Guts69faq89x9Aav!C<@qkrSm!0 zw~=qK1VSi;Gh0KCm6ggn2DJlQ9UjygUzc7s&gRpV)gRn&j@PHtgWH?xU0Mu_<6AX4 zf=1t#M(I{C<#tOK8>Bb7;Q)f*u3scV=GIj!LqqFPA@HjQ-i)3B^e(`yq~V4BRY zO?*rIKgyF@YfZQoO$Yw>D>@>W8+^YHQW2iPBm7ozRQfqxoejE`Uu&(`o|I(?`nS&} z|IoIEl8daN72;6?1?U8b^Bux=qDx$sLz(&y%+SjXr=^JUB^_GXX5B|xTxx{4`%oJ{ zzLE!B%%*&ihiN+gOe_yCEBKyqxPasmwl19mxG;*ij=V`8qxpaw=wt5 z%~#G@Bypc{qs3M&b|fhKADgkynXNiHuY0}hR}4;T9`!#7#G%NBlc#C2 z0i)W|xdKQh|0rG||72W08OiLMNM_i!u7^wfU2zFvDy3n?D@435S2^@pziU{d-Ixv& zH}}>gZrMJ`8d-jkb;&t-y6?0w^r6;$oT-&%MO)&qSx+^R*?P{pqhQ;CUfFH=hzcwvvY?g)J-f*0ubkRM%oV8m^>!RwId8@hZX~q;pYTV5k?<`6f^P)MMu(&)uo|hs*9{ zJ>qgCm){y^=th}s_^t=T*9(UECw_ggtUlH!J_S$IX0dJ2VqDRTkmDk7y(w8YR(S~A z81YR7fl~#6{6k1hE&>^`wseb-t4gE?5L)wm)!s(ghYdeRuaEcw?R6l}F(D**T%#>b z$r9`jYsx`0dPR{yX8WOC(9XUM2JqTj@*BdSNkj4H>B>L4n69%K?^mn-cD1^%}xt>qroe`b5zsb(PbM!MTO z6{<)&CE{{%#81e#lcF|KhD`mEL0I_THj1n0TF68uD?Kl+1|xD6{*$(+1w+uFkQT&) zM2a!>0*EKocoijTtdEd4cbWb@+&$88s$m@^k)0x0Vu(reSFNQ?0Zg6_;Xkj}b2~+< zVR*#iw12xva$&&v;}%giSWOO`vZ~6%e{m@2>N#X>ZunNrL04W_v0nP!OTXx64X5<$ z*Wr5Vs#$K@@h#LbpUB$p1zuJj`d(po8Ahj;ctwj)=s)eARF=CL|rF+{d7)9u> zTC76ph-CJ3#`1Y<&s*C60Vwp8BrV^`Ey|-_Vv8P_g?$sm+~%vF-9; z?dD2>0^Lur;bBIoSL|KbRbD1t6qvJ@Y!c~L_m~F#V(O!MCgOZ=u%AQB54SM1(hZysh< z#O|x099pTw6hQ+&sl6DJv9-|G<+%js-IwLOyi+KsXk~>`9&E1X zegl!CS*1Q9%QQZ7)+@g5?a^ssjPB>>)|S037RLJ|_TXU;k!fFd@<`Xyt=;hq#gcvI1P340scE;ICAX-JUNQi%t9Rsa>AJ@0wA1C@ki-{?##(J=%iN6qs^{FG^!pQeY z>JYzV?q*5IW`x@gnUXBtXZvvs`sbNA;PM*J6C=JGnDGJ`1l+)kHhIL&6!t$)_{Rb* z_9!omlsKU)y2S3{}5n$Tw4;Iy0fr_ZWWm8rGMc8dZGdP_*6i3c$>4{ z5b*T9TlEyu*>=PKo<8~mffkzJROMP@bpA1V$ktlKQe{||91|qsqwyRkzPt#yocQv? zd4k&g%_U8gbD-YO`3HfT{G7))U^_5JLIdl6mHfKLho>V~BoFsuXS9}DzN@9ZQ)Gq9 z_~s1C-N)p9gt^K9PmJi(GAk_EnvLG_H~ia#O6Q^37 zO0LZ?UZu?|_f&{Ay=**XB>YtE;KB+x)Tk<3JPW5y+?vw&Zc&Zsqd!4HYgw0W4Bj=x zKUDT_$salzb-p)xX_#DneK#m=*{gCUM6rSGed#x|NlE?!ca!=rDW6))J!z;GAJ_lV z;aTy~+``aseC&dfJT;q7%W?!CvbY!%(YdGRMapgn5=vLb+097&hCuhXX=z1++!M%W z44;{N=J1hsb8qtHwet5O{$9@KJACBbN#FP7{ho^+ALMg5&qaLX-EhMGr2P9l_wpIY zcX{_|y)SRv&Az;|`MkyRhkWE+A%m6j|7`H(9pE#R@A9sCnmFtsmN1|0T4KF9XR=A+ zI|dPnjVM80M&g*}2%o7IV;Z(#&Z|m1dU0(9>jgi2Yhy#zEwgG9^HWN5QV0>77_+hh zWE@pnwa|xWdW5erjCCU@j76+E#|R@=@zz|~+f%FkVXhl6o|J%bS#&k=TZ#R+HE3{@ z$ViQws}6R^JS-}${|$Eo*ar&o@<1`BoI2sax-#lGxBfVwzrg(}jIR>E?W~ITO=~K8 zC#f!pBvqS`jA{k~t{g7mK*4s&>|9=xxUN)8o02Pe}TYP1@gzF!K|Z|E}O848$E4=OfOCKn-s+yd%;&~B(;sY~ za4jNYUiWMKLwzwULqxBz3%{xR-_WfaV7c!{PnN{l#1Yh}#yI07r!n1zh?&%b-AZgx zmqDx%%fuwh40cWOn?c(Qt*bv3V~nbwpIu*X`$rm>Qh+H9CNCj0an9sK{x?Jpn-t0) zK7X75rABoCyFp%bAON$yG9<>wFeNYE@BeVfA&Qlw&Q!s-?jZywBl|Q(cK%QzD||>7 zsg+Ht6QI5rPmcPQWpI?WQ>^@{N;TZU9{o%)6-;aM)7AX_e|UQr__(Ta?|;%HZGgfI z+Cabp14IoJHHV^34B|{P)9fiTZJ?HhOCfr=1d0&Sqy$P!$|NQ8-yO&ike>IT*5j=T zYJJsOI9!t65-79<1q$U-IH)^@OAt*93iEz{YtLjRlc1i@|NZ~_>15y5eLd@0&w8$g zVa~a8r;%`iOG3U_@jjjK0!B0_-VJ_DV>Q0^Pol$P?@V@}``ST|jC2X6gDXF@01^Ks zL&q^inwe^Q>7}Vi8V>#PxkR+agb3}WSx54wo%ET z(kVe;m??FDaU@ET*`RW#>O##|lTXh!|4qt3()gG7se4G@<$OhVMDJS4X1BB@ zx%5&lvdu=<_4uYLU*#b5ei$OfS(!m?(7i7=83~ibg!`$69GJ36U)}Ao%D{fVISm z7SHwgSYOfMSzeR3zkoK#k22c${Iq+QQ4a@%lWzVF@}u+GY)ET*(qgs2u_g*X9?Vmj!(rJHwQ`w2_|+OS4_IvcObz61zEQAVO}BlEIq zPH8V6JD-VQPGRd_b4uHv<3y}FE|Zw}g(VerX!jk`s^e z4s-EdO8tH?qwKarz7MK|C0R7z-fv0-PK^<}tdKE49;b7W)kj1^e@+<-Uk28PZ5as3 zf)kv`L^@MqhxNI4C1ZDGV|Tu>JHNTx12&7!#+C%f*vWtM(-Px7ag0BWIOR7ELOFly zq3M1!1rqNb52sw{iH_om&~>{=j}I}Tsc>rEli}2yy~gh%W8z88t4f!DP*fyNS`sDS zWCKg6p~_17Tqo%`)=nBdL5}5o;NJ&g`4)6q@m9FyjrI z4XPzt6+!U|1}WFRuy#8Q5!Y({3FuGI`2;pBT`(Bg)ERi00@ftt#@pMyKJy0HOlUB? z-M8_^Ug(GV`ZDkq0t2N&L|y8G!?{&+{5iB_{hs!#bbl&Jov~EO&O}FnCpmwiH_v=oXPXlWW{lN%0t+K(}!=Z_k# z2_LCi9YfGJT!;?>Mun@!0N)4%S;Bx*4h^EIAOjZgv>&cz(C4dttaStfMRu49CeftV zVIgLpUxxPetn*>vqF-Y?7$nitle+@^6By3~@=%cVPLT6m5ab)#qht(KIqR8Ui3vX* zN^`&_UQ5vh74e!2FiTQw8nL;E@uSXR%nIfIV4!XAwof22CX!f<0K*X}~NVYakO-c1BKKglB<@cJo zEgiz{-1zM5s_ZmxIDq`g7~KIW>4pICfUf2}w zSbU3%$`Z4>_Q|++4sk!8BtYep!-wX+n1x0(^5u#+0QTYb1Sooc24@64A>vdV9s>NH zbof)dXZf3X~1Ah|71bJc9tf4<} z@@e1V>e|Yy{+DHv&GBstLuux{OlS%yW|0ic@zHM&*D76O$qRBW>5Wx%))~LnkK`!} z<$u>w-@qBPoWL+L#;qN^{v-~b+0J#Uuu$!z>Yq}jQMRIgI9XxTrqG&b>U?^&&$4H} zWYrY69j@~+mXK9E;13!zp2VXfJ0XXgBeRe2g9_w$(;Yd#Qr#8lLd|j)gpi}+?>I_OP zjV7m=w|+x!{0}g9_-_KJgA5B^=9ADlLj?W$D8?Q(_IA_4$xEn?!%}iFGmMEH6I{%PDdFS=e7JxQH&);S0@T2uuJEHW z2oA=@M)HEHX!5)=j675pyGks1iQjmXNG`5QLx7oabE3yiV&@S~`PrfOi?wq!GM!EMkoK9G(6@#-wepbCC`b}W;KIcwx*2)ThazD@ zFqVd}!(&j$-i-!YVuhx$zM{Pt04+lyBERhJCKr7(2KpjuDgF227%+4dBjyg;Eq3a4vapS@iV}qlwVc-fsU&RMitP6Dj zo0|d`V1ha~(av=FO9#~2l^nB(Sf0M!bjN-%u>-lE z5yc=nVmhs0ZC^AX&jd-CNuBTKByI?^Th9J=HZQI2Dv3yudzF#H02c6@ut&~w2u ziJI34+;E8hNuxygO`f85Bh@`)J&6(qEL4tjPdGoNaoX*N2cfC>3O!pS8{RGHxsZ3~ zNv~&aH7+$Me+$okJ%iEs&3dox?DX6+sZq;3y=IlAZx-Jy9;C9%pX92*@W++PSg`Y@ z7NOD{93^mT&qb*?`v?{?oyh9J>yv#5!UH;Wfx3D!U2R_ofKBM;ugx4> zSTX!0WR1SY>|*EipJ`&W2!MZlgdD?5YZii?ewpU-pAmY-nZ8k}KBRFhiA?*l{smmm z_bj1Cut_pB9bCdgQ#c8WDH-)?X6Az&69-GYUH52;4g$Gy2wtcjc4GfECA#R zoWyKM+6EyfZ7nX2FF3EWlFRsBYz-s?vqIZW<*P6uugq%_ha#qsCUGb_;(<=ma41^M zp{OVCP}GqwA2J@a-R9hNx;YPusWR4x3K{{GPzN`(H1aRm@7V^TX`A^DfSnz1Ujt8P zH@IzI7UaH>UlYG(el7euhWcpE-y6Gid>=n{dXp|cGq17ESYs$sBbJ2c!_hFWy`LIO zqK3KsV2yHmLs7p|ZAjLsfo_8f8fyAtn^f*JxI5*t4rauq;qey4rWI8Lui^}}8CH|F zX2}20#a6nh%pl;&)&nuqSpOVjSh>&17~9?@haAqH7QsiY;UTeWqD5}m(+pE0%^YBP z4$>0aVvLO2yTL9w@BeV#uXE)qsP(Br_AvfSaUIeEdJjE=@Ae3#a z1EwE-{)`jaci|(qPa`(e_cUy&P{r&w>=y&aT3(z#kTamu55Nyr-GI+O!0r&m*xJT@ zFOpe-VW@aVvc6fX1r^YGDwsNaRxEWziwMsu>h(tl`e_GI#QMauiV_Vul)ihrVNE-3|#3Cv*If2UX(}6XoAG~?~*cx zc+v%ShZ++AgpJI2Y5-%uYx=)0*Zam?OMX0(>-`Sy)%)7*t;f&WPUqW?mZuG>VS?ex zVTGFh#$GzuJ9+*!%CCL|)en%b1XP9%%T->JnX`Q!N6TH#Y1h!QSW+hS$E)q9vjPO7 zJJY^i9N-xk1@DZppP=-zPB?*(%hj(~@+`OfnUw!|w~*A?73uPIZ>iV+u#gkY;E0RA z!0Z!?Y3c%P${*(XbHtR(Xp(RuZDodS?2kE*g*k15QP7%Lw2-rV)`e2)>dMU=<1OLZ zfk>6sPD}(aa;CdBYb0&V0G{suZb|z`yqoq}iB*2$ ze%8O>mf7`G+jM<8*}T7gCWa_&Cx%k+TGMGx@qbIV9}-&qzQ}5RI=cl*A$j4KBbmy;|4^p5$)F*$HiDr#8@^u-(0$ zOQ!QN`)&b zB6w%=d=Rx?h}B%)=w0LGORrn;jJ^5wf(R`Q$cI%;T2-yK6juF2nOpU3Q~8csXYIv} ze{XbJJBM_xz+ha6!`pR}9;5}P(HP&TzdIOzr$x1y7X7W1qPzF~z1FmMnVE{3`MB5@ zvHu(nT>e?K!k8(=HRgcy*|@z;ts~^y9`}5J#ZxMJP-+r38yDsQwf*qh1_^*H=SlDu zpK9M1QKpAB&^gBYHxgIXK8XjPRLPg%$g~3L&*Rt?CI(tk#jP*uMB8B`=yH(OWKM^> zoH{%zb*}oXr~UH;9sg{mxXXJ{%r9Pcg7qXi?Ju_;O)>d-yyWTb>wp77iR>J$AX*02 z6gs0nf65C+mrq*22odujE`N}@S9b&g{;-`|&wb497QhPt2blftzM<76>{xecsdoY9 z?L5tX)b1CnB%i<uAoFS z8FuAfF+?Jp`RO_+CsJJZ^4rhPks~(jPcq%IJ^jwq%A!~*vRO{Q8nW=-0~^l1tB)#J zRDm9`q4OX;zsqPaSB34l)%IQMNgYa`+rZ(L&79g?50Qg1oGy)Gn)ENYd%~^3W?V*g z;-YHMdt|(q=e<0ymwQMD*O2jzFWY{3`hQKWqp_%n+FQ|9{#4MfATS(|`ETqisjf|5g{@;ve&K z?m&!W1_EKf835_i3gg!dtd%Y7+;wb?(I5C2zB;r;lQPp&93Ai zW}g*J%|%D)g-GC&S$eEP8>&l>RX74mmmWKV`(A9OM39}#%1h3LAj>BFOkTpnlyLYw z3T`n7FAR~EN1$O*mQSFC?Srhdve3ZXW+qDrCsX$ef5_Fnnfu;Ice)(~kt5I&h^6A4 z2AaOPUQ_;?bVDQFm#VZ*(|rJF6Ve`U;!_mJ`Z4QiXuA|3?LQ2Cf|qU~t`N3l$J{en z6Gvb}QIRJNGU`;EdhX}c)8}#rjxZ;&)VyH2e1!JM4a9&sK%pL7CN<$?W5wkI!I_?- z9l_&0wB_oaIVU792!@ki4ae-C4ARnQa#5yo(FRrUfLj0|zh!@O17BnI&anMZA9oQB z*{MaH>4p}|P6@Asi3!#}R2=Gj1*{qA#LU67W6n(2{+TTeiPHrr#)~fK+dnPbGw1kl z&yB}r5DZ?N{jp?g6>PieaL>E|q7SM(Dr(0)&Ieyd&#YVn$B=~{#JO?0ZEEO2L)Eim zgP?aibGM1~*akOVgiF?9jj+6~zZK{k*E3Eb`P(d#5$2i#!p%-&z{Skr(N#x)p#5^B z3*{2`1kj{ehrP1X*@?ABZ`5wDdPu+!>RfreXXO!LDcJ*%yPRGVHA4`9HAK3hL-QsF z&yuFMhB{ZB>1n$Rlj+82?fi=67sl)lpy^!SjX)iEd_e2J=8P?EhouJ?ViXdDunogvk?ZN90vru{>XY0;kW;2^@9*M-12B{sYfu)F@&MlDn zvT0%gk#x8712zUE$K%G2aq@{5cG7MPpUh%f&W4>9Qq5jvWhB{v>3fXV< zzcD4)`MaWs{Z^>|jnWXev-{As>gBhN-+F!<_-*93nO~n}Bjq_VnyTv+)@gt}T}7GP zx(=N&yYlOqQ+BxLQf*XSm1VpzC`u1n%4Twg`szzw01BJ~3N#q#DxtsRoUr71)n<5MK4^Iu z?5An)$55lz$j9x+!?2E!fHxbg2PU)Zj|#_0IOt9ka*?l=1^a$CE!cC_@xh)}*i>sP zM`)Mf?Iv$D2rktB7HIMhMc_XWU00Txt$1Y7-lkoJOzblEZ4_ zy`dU-Ug$DBnr8PgutCUT5P996IL^qEb=XqWm1$CB0Dj2qeE?izG`VnZG_`OLC)ck}kjT6-r215GNH6 zOreN@pz$6avqzGR)2(EqFJ1oa{d6UHadR|zaSQ5vv5a7qy^;nVWn95F!-u7jEC5ys zq-7O{as&6yNrd6Rx~!1{v}M9TTz9LQ4DFsg_8Zau-<8CY_qof6CY}tVz$}H4X5&;%?h%kqGFxV4YDd$y))BY2OU%2= z`8_KGj=C>K3k6WpG=!(|4Eo^)pCiR&>`RW6cl*Tj9l&$=$g;7A)VWnroV>I@7qX`T zCEf2DaD53y?`3u9&x_$OePpy4wmu+&wba>V(Ik#P`r3~U6GAj<&!}{k5_cw)BzJpp z;;<=bien!Da^*$aW~~4NxD{S-d!%NKa|MHrsEaSTTf)BVkyMpTc&XOm9AeysGQ(>AEvr7h%f4EKJZAD%+vPcX> zZRRvs7DLD_={%QW(zw1#HEIc}09iMTqfd{Ar4(14gfh zaOlJ3wxnDvtYPkUrOO{QNmp;vTF=9-`J!OMGYjT3$X37JXWCLamAGKm>}?i{YQ1?g z2W5Yj<9(HzdBktC!4#MZFk6Ts(Vu4uGFPW~)`H(LT^seMNq->PB@SlXJ4s^~gC1InxfABlkcH@+JsP*) zVp(Es-mVUhmYiPHc5&2>z~!ktJ)VkH!UROZfgwG7o@rX3e5AryC5BclzE}!%F0FX% z(9*p=crM9e&=+xeBH)=v@#@P7kzTngdxT|_ES3+p#15bM`E6ZNW(Y{Pp;}7*L76oP zEY!yJTp1$Qn9m4Xu@TJ2S5ZXZWv-kJ_rC@)+1Sjp|I%meY`kKN%@`HhA(s7G_dV|f z%<579tEjmt?uZ1_j4(a(GworE!u=}PDomTAjeSRa{4p1{m8r;*R`ny*;U)2ncAK=gEKDqvn$4rghx>vcRQY&Im#q%uJA0sMvtM1 z^?a)!qw4v>q3#g2PORtJg5>HIOJ%q7dou#2SHmk!rT+9XQ|aRq$L5rRQdH>^hq#r# zPm>F#`MnzpI!C2I^KR$5T&2sYRIe_cSlfRUWLIrFGqv45v9`4Z$<@V16YqAULy4OSdhj)s_lt?c>&we}l7d^()XGd#Uo*fmZq{G9 zps?n+km`MqLyeMOJ+a;^3zD;t>I2}t z09piXCy{sm^Hh~O{JE(`(kHNRo1>Dx&ZrHggkMtSoA{)6t9Unj6EA+1dGR&9Sn7OZ z;>5huF}}BI$Y93WQ#)NCtAKDGdBP0VZ+pl5Ctvu(5R zpG`MdhV^o7V{4BKr=t^V{%&3|!0ENX=^}K69+MhQ{VF#Q>~7~FlkrQIu$Mu*yPc~{ z#(LbKOy#^kmgl47f%eKUbnf4%M#^U!dts9{y5=lTW}LeH@80^u>U!$ujf+~k7CQcA(@^o zFN5o6DvKQNw`{6T$ekTN&t{t7VQCC%bFLCqQ>{M%{RtX*FMJ3&w!ljxnxXjK z=~WKVgUEsnzfCd*^KfMP+$wF_N)aonDGK?JQkKO}IqQ^&XPE0sPw14A%EXsEH1Ao1Xo&kRiX&X4Xwe-1{96zvb4~+hm1{^uXvLA&U1IY zlvBwvD=+4VJI}^`fUX(mz0Zjtu-s#@Ez?zVy7;3}%Z+p3h2EWG*#|JkDw+KTM~tur zG6Li+AF;K#F~&LFTLDc&3GGjWG^yUTzo%cQ(>3m>gM%)T z3pOcy4Rjdu=`ASG2F@_9GQbgLU}4v~9)0TJt-Xt^52sbrV41UmJWjKspJCA9N^xc>+XG;h&>xQ2oN+=d5u-s1Bf5kGp&86Jrp`UNX{31< z#F1<2Y^m*3UpRCu>YwIrOiLscisbc6bbL^Jv0u#NqXPv}_y9$4tk~(Sxlaf~*wMRYGb=2XI&G8qBk|(b z5zau@p4n*;J1koKHz5+VH#=z?gU5Vniign){sMfX*f za32;9iWJ>Lvis>5J)#ii|(wf;33L7@S7AU)UbAAwQqA0>yJWH>$N^t zn>@d)R4a^a-mYv4eNl>lDd0{+jYsV*a*38HLtm7_GAXijI;M=6Sh%(5OQeW}TJ*J& zNYRqAcu^dy?nM=}C0^Su*L+C)kow8>K~1~DR7R?KuBM)N?IOjgByf&`=Mem}CL#+F z{v;z-xadno*Or8fE-8yirM75sMXdIEKL!Qud4_><&UQ*ZIW zpJY971TO=fd~lQB#wcNFqxY6&K!_%$dJN}p|FK3UcXoKvJZC^?^)`W4?hKLTV+#G1 z<-_23G0i=h@dk68gA^;|3n!xpK}tV09e7L@q1zKq-rx`S+)x=y-cZGTHA<=#;Q%)A z@>`S)(U}{9{OHFG01kW}JILXLLnQos4%FT*jQ??)biG{AfN#u+c)YO9J&h3=GL$+8 z%^@K{5XCe12k20`JYg7YBfw=D9;5MOu!;l8Tpzvx`YO5}RVxT>RKaUx4y$nJv8pzO%Euo?y;bv4qEQ2KXG){V6d7gaj8m7u4sTs5PY~y%}Fi=aO#5dlV>>5^tM>- zrT7fF;7K1+AZOl2tkQ35>$$$X(Ya;ZZ?6C%)8*!3nG}}rKik5~yXl1 z+gY$2oy|sDUip}iR|?H{aZa*n;GFy>mkfQd^Gvet(e`z2B@4^|{vBuiA9u?wT`9^o zIW075>=zp~oKO}o!+q{#=bTd z1Gb^(m$iS|!iT@cnqgC%y`7qYD#N5dtzAurg_Q}^j796gG}yhM2h6TZFF+55lNQJc z9!#>qhxd)uuS-aRdOw|&VkAcy7y{6Qj&voG9s3b(?~Ns!_YnMRFqZ7-vj%psGp4ia zc~Q`^OP*&}e%`8WscJo)-O93CAaF+8k2A&fKv6P-2UT2Rs4SCx7@p29z`Mt@kGDch&O%`y9{;HXP0TVR>DmBJ7lE{WN=c(G5No%U`z_qgPp zuAIq^5UefZF%Cg{gLkncH*@<(=@Rlr@!2=v zy!=}!cXTY!)UJ3;g1}MF|L^;2_S8zfxbH5rOA?td+ikeejaFYv|(l*+k8mDZO(j z%!5uRBUGf|JucThD}t0{3#7m}{nCh|6$Hv5CEPi!58TioA{o0kylefDB^#@0Oy5)L z5Ma#4&KoK@x8o3Mgg7)Yd#L>*i5sx8oo*y$jiDt9Ps^eJjiOd65Mu*H`Py)~-ARqu zzc>5mJ{sZO-y%nrIOJzm((PN^@(d_6RKS?6Vgtc>A;VwW zhwBUPUD#`j7y4wVr{3o@fs4#e^u5YXHTsrIfO#6Ip*0{pCPci?5OE@|p{U|P+)kLC zNx4vd=y#rhQzPE46|!H7fJiLY&OWW+e1X4G?$)q9f>XEF{X%sOA!tfawaD0E4$tz-kn1F3Jeic>U2Wl0Ar*-~NtsQSJr97IqPL}tzIoXk zvoCDGQ;Dpxk>l1`aodMK@m6;Jx&T{CP}ViKyM(%IaQVVLAk!YQXVWzw@BsG1gn`IczWV ze)hh|>3x@b%ll@J60Va;uBHf|&VWHGC4v)5lziB%8StKR*Lubg#Xh4j>^;(`g+s)? ztydasQtZj3*v~a)`$=TbR5G$bi4O8s(jqp>%h=(UbcTD_$waI*x##`b1Kz|v(l%B{ z>=|@`)+85g%s>FFL{6}MOtjP@X(V6>1mX}$e=Lrh-foNR-m04_gXmKk$odyvfGS`j zAq+&95)keMlI+MxO+7Ir%S?3(s-wlI`NsX;TMv|)^e`CW2BMm+RBAm9w^EVyBJqTw zoUdOh6G(5@OtW6p*hN*uBp~4VdIY$8)NbmbA0qS5?2p-B@^(L^Lf9o*#M)`mR)U+s zn`x=}+v4?Ihwr42Ptn(;R~aBkE5DX%sDWN=Agj~$;7$m9J8n8}Id5F4mR{22cJ~xY z(ststYL@wyl}Pr@WBc2n)+*=RYZ( zkw2f^kHe`G%{8kk2ryd_G{xaz8MZM~Jl+D?UT{O9z z7+{(4TiF{cy22eHTqQ9@mb2R|jelx%&H#xpI*o;+1DHDABmd3_Ifq_8HZqq%qKzA! zRc1?H)G1ZiBeJvvec3_!eAtR1}K8?a+BOcOI@+ z%kk3;6bduwY>LZ~rq=B;E$lb#Fo|bE+Oyj4*}k3(vE-fGH<;^v+f_hJumJE9p?B+S z=%%M6@mAwQ)_nU6Kqcf8u+Q#ezbCf#$_;V*lkQSifUDUc69G^QJ&h$7b;goQSRd`1 zg$D?ZWGEXrGV@X;_(|O&kP3M3?qBZj?C^WqjBQYeY}Rk0iT8Z1%l_d2J@r2uAe<91n&Sq@VxZgsy3-7hp+d({(Qp)4V)wgQ z=xeW59Npbx!J>#21*_A+fRdYq$AZjZFtZGkEBAvt9OtM1s*ZtEmN4=l3#*ue2y0J> zbLbez3?dOq7{*|6SMM%4Fk%_Yu7krzaiii9rDKQZvtT7o%Fo-AbCCU5Sy)GALO) zSG8A&%5mjagVOB5QCSTXb^AxzaHG%^O;HK>(e5>g|RfIEI8#* zAZL0k6h<=dk&r8lw)e>E7#;APu^rH$u5_I9znWRF?vUni*&dvAgf^m^$Br~S6^f55 zJ*}3~k~S!9E?pOr2zn&7An1%VX?Bpe(N1p_TB7U(-YPUXxr&glHrM+q16)O82F>-C zm6o|eF>|H&cFIs#NZY@CHwjm769opZ!$dxZK8q&LhC>!jE&UGEncz(;ZT2W&kz91G z%g%~iZ%Fa`$OyUR_ikEp1>}~4``G=ETyg{m*|h?VFO%2ZO3!2~{Sy4w7C5ihGNV_N zdAn_uPF(GQdYvgeAnpUKPXsaT(~|98=QACuoSmbk%w%jo(s=v*C&FwqRR@?MO8eRX zMiSFBLEa+?=wfR_+RgkZ%=Q3K6FK1QU5ljfZW`czMgK4IG8vyG#j_>_lOng8-Y$dc zY8LpsYmziKUC$W+r(?4)Esh_MtNu*GqbLp}+tbdq5?v*$VG^LHHch86=)U~8 z-YxJ&ix<$Uo6*_9_+B2pqqUEF6W?%~8=LitnwmGpBI7s?eO|419+9+QTzd~ydleyl z_#^r&mhUj9-6xzj?a#@t9Zl;`nArNi`Ib*;{nrg{$!q=5ru8xVR&`PbDa*cHeRF*X z>J;UyS_Z_9Lg@f^@hjxGO(0mi=z8xh-!)s^gO$o5;8!6!!}gl%_c8}8*LA*_dV6^f ztP7qwYGzJDk3ZgnQR2cy#-&5hU3Bv4FeK(ui;kfkH0)KQX`~}!bvSTCOFJ68*9AiM z%Q5@9COKTKxwvt8<(Rx+ZUgcHb|*+`7$k&wgVAFQ*;@ZQR(qgr+Ufg-2v|y6zZQrl zR#xGz|5e%)w=cs1&dabziAU)M`Zn}w9PNLgfmCp}1)wT@#Iljg?S@}rl>J?wR*>9H zk<<4@2eycg7H4&NEcL|)&-ehRu)!)G&x_&$!u}1ciCE2%a9@wKch>G*HO<~*4~FfX ziUs}|>tFF=ap&zbpO6FnXVb5SQqlJh^fS_EwhhnFQ{r7kpFTwH^4p6;zbLZzO2dcR z+W#Hqc%*Az`&n^(@72gfZ|$GGx132{hVWAHFd%Nl=|&yF*>emhF@)p<~VUEq93TOt__o6CihNJDj zZ}_rIvc`0Ilw|tGYp{8Mw#S3fBx;YT(lt;*(Ilt{_9aK8P|~W1^fdV+$&Sj9cXVml75?rs=pIsGn|T) zsbyH7U^b}u;-8`vy!IHb?un+vReqQvn27?-M(3mZP+mvvTdQ~hx@?1&@om5EQ!}>a z=t~STQs=TJ2CbSKs@uSdeeHi7J{*QP-q-edyMIImMcx*qAb52FfYCbS0X72xKvI(r zj<{dX>GEa2(Bhc}p&7ClPY>A(%92$((B#E;S_R?Iea!2XK7R7Pusy1spAS6yL&<0` znz~&ZPbhg#SSKWZJc-+7C?CNSp2E(b;Yy=E>^2kMHDdIJS$+ zR$y<$uBVfW!NF_aZRQ-A?JRE6J8$rTz47OgTxvQCbAirEdvB?qfPqz9t8vi~NG`+( zt1iqV{d4mG2}_&QNAfgq-@K!)g@L27uxo``Lz~O^EAjn0uIfS8aSWU|_=jK&6-`ZF z4Fg+F94W`HYZfY{LTIm8s-0+NBi=O{9V>MP`loMNi4V%~M20`{Q6z_`jgEM`es0Pg zZL1@DLzepu5S^G+K&?RY)D<$pj18sbJCmvo4j&CHq|2`+fA&r3Y?l7^c2xan_f{S!WM;$6`w zE@4$>m|z;llFORdmEW5EMku`vedjWF=)$$HdT&wu0+%hPlWj(p0$rxAi&1xpVua*v zPOM+nZN^$aRu`*ZH|uF2*vTtF+wN|>DeHer*!#g zKNF7JZ>82&Y9i9*zxb(!;ZE~p%{s5yyLj|ZmiN>@!Kl%7ZquK<7muJU;LDiz;2(9L zG&GPh`6!F=DwD3R%7yjd2RM0EHQMiQKUD}eReshhe4)DiROkF^i5^^|1>4JtNR^%{ zbPSjNpnA`BkZTNckUl*d#z~ja6@jn8SBBpM=yRtX94&;yWcLGWRs`ctcq=6TL zSvztjcb(==HXa>z8oB6yWu%mW~a)3_?ntLzsxxvf@^ecNs%0d;*z5LVMfO&DIH}7?r6rG zQ948e_YSHNy%K6AZU) z`|eEl9h+wPbJ=%Qyijs3iIA%SjJc8YQTAw zPnmbO>)q@=XLs(^eY^^7rdb~6{@mM5nS?*cz1o_2bw}>i{>-b++^fH2UVSb1>d-%o z)v!4C_B8XBYJ*NF_x7ypTc0y4_qLI@+6gn5ErH{nOYl}XR7R34O#(i%l2%#1{#_9L zyFuYIC=fuqnGLJOZ(QX(=iM}AbWdebPmw)EDtA30h(r8`vWr`Z8~q;SEj?la;nyN{ z_>a#84R*_lcF;<#sPOuVyhl#+`l1}DII#IbaE9wiXYZvm#$YU6els+6FRz-}+`vel z2h;{hgHngTc7Hl;NJq%c3gGp=ZnDw+k%L_JzR6|rKO;Ux_tg-$Q~qP08T&D zI3X90Md-1!QO+D|@7&o;@?|v0y~CkKxHh38TJgyH2F0lj-Zh_PYR2ew9D2^NyWWv5 zn(&GJ%dINJTKxen3tm6f>VLCX^xrSkG>ITN)3_4e84t9O1bX)0fUtG0sZjPz)0{59 zhta0HV%a>PRd3ZkW_jzMguboD;E_mNED~5wES7N2ml>%olYG26FU@6y$3Y_4Ftj8tk zR)LR`Qa;M0G5tlAe-iV({r?7V1 z;VHswwGq*rlu;~SYT*?0YJLE`rRFzTn>Ez@yO|E7NYxxq{#r{IW+#(;vlZ%@YEQ!# zL`k~*?C+Z_Ol6e2SgQ2@`L&t}<(?8{@Hc9sz1=fuCLb^REgw_m&p#))W{ZBho_#V^ zx{GICZ_U1bnAh>tt%Ad7>TWF)wvGj`DsNXSoR<&4P_%z7gcP+O(W`X%Y-;Cl2OG2l zdgOa*gS1IzEyz-QZ)YgJ`V7Ulz!#A9TpqpWr-&@QcP97w1Q0tc4g9p)$js}jR zn=Aro0o{Cvfy*O+j&e&f1kj&oORt0qh^v7JKM+w3Ld*Lx)n~xkliP{VH|-9nG=ff35-B-0m3PX4OLR_x^9GeE-9SuQmCkc6U=3#m$!_A;o_m`JWRdy`%Ih+ZUDYggj)YWJznefov_H0VBk(S6$E zKHcm-?RB5dcc1pVPba%ij{79IrG^pr=?eGhp!@U+{pFiS0ArUfbib9kPl8jv$*7QT znRXh@b-VO_H=p1AruiggmHYG;_epkpZA@DfR9K#TQTXIueGy2e%GdJ(GABTybom`7 zM?pPt>Zs@LXizPqDUZw})6?v57(TjskvVlD-t@9vn@ zS7*PwqsGRbN$-wZeK7m%jvP|+%y)O}>Kn4p?&#IS5pdh%jvtbS%(FX!@>=13cgL{4 zCHw4-Vtq&U*&WAQ1zFiw8cYDWC-0SpPOzQ#$_yZYSq&`uFG0T&fA>1?5#(p}>!vu2 z*!##3CpOrF=G5%Qw($C(a)Um6LRtRHL*7+Sd5=Wb4;=_FT^P-d2Jev^cSECFO4>d; z+V!p+2~5G8UEXmU(J@RC3c&7<^>6}jn?8D2a3 zccK?bR@C-2CH8jzm}fZ=(IO){tAb673Pfqi$jS9-vywn-2lcQq^AIqCz5bz6IrhxeXYEBQl8vs zwY?|^6L;p6XtE}ryrC@8bEOG|b71eWk9NM&8S051AHoH7G^db2Akrt!NKE2X; z&Jqvf9V=DxqB%Gg10XL417|@rG4M+^Yu85jv(uT5?o;o4`8gwOC7WC#wDDI7IpjL7ZTD`TcGzXxKuk?yRSG$|KrKfE!U4il94y*o>;6l#szde> zc7I}j@tMWmt~$;#dYFFZbZ^(^ctF10He<9Ogi*y)G_k6}kKo}j z`O4JJcc-+&FTXX&qgA_=!x85KLKND!3Njdm3f>a1PW}qKb$oNN zrl2*zz3>mW)vn^FK@8{Iix~L3)M8(SCe~V)hxjypSp+vC`f)SBVakbCyQF)ElDdLN z(>C}!_SyrJH!tR`$55so8~?icG6H({QlzK-&`8%C?I>Ma%A{d znGVDX+m5Wk9a`LD{QuMVkC?`{MJH?cGoLmM?-GA5FsTrn4^>`C{R>=6?a+F@?$Ls}F2#~WczoD0inCp3I z`SL!j)CaaW1?hQIPIl|7#KM8SYK|24v(8^P>I8T~kzzGq0mdO>mYlL^IpgsoX#9e3 zI7Ar)A4qy_wLK)UC&D2pHl#1WAHYJ8UYLGcflSC-Z%(yYt zG5hGa{d2sfP8LZ1WEkZS$aD|tAT@`gb~~T$qTGRcmz6S5AJBS-;TKPSI-b0yj31U( z*I-t44coyrm{eVZ-1izQIxyGd;A(QrekziRG;qRia9&I>Ap>&_?=3oDEIL(YHpOl0 z>8-QbBwEwQy*`poB-6NbBfM~W-Eum%A{*9Oeap3Gw7yUpG&v7Fx?)dTkobD z$)zhRE~wYf3#Y?P7tDO535|v6d5WvU2VptGnZ~%VY-bGl?;8&Nu+o6ZNHFjH?;=60;!ATJ!J)AY9~m#(8QU9Zyh zx#>3M(hU}-+pl!%+;n*D&emIzxgD#wnio!so30_3&Tt=T>sY!@rL)|0>vQQgXKu&R z?NK_vn{FhRt_-ZNu8*bj^TIiJv*~Xz^HK0un65?X_PFVKbLln|rt4F>4Q{&qxpW5$ z)A^uooK81gHTRkMurjw}?Q2rHMmJq&F5SAqbQ_hf+D#{dJyV{}&*S>51F}=*rqf|J zlg=FexgV>yLFx8mP03)i0|X)WNydmt>%R@veNWdj-)c<(p;FX zQ|SiXbnA2JHW#Mbqjc-tbR)TRWrw>(kJanvh121t3uZnF{tDBzC|!e_t~Zx%Lt(l; zrK@t&?a!qbn6P!ZB)9AZn{0WbWY|L zpZ_)A5-weSqF46PQWb{ES-V5@hV#=$sD5j$NznQl#F-#HN;r*-LsIPc$0)NkS3?%X z?1!}iqV^k7d^`^Rr+r(mc|oDmhnyu8XndKx)%v?J66zvyM(p2(YksPRc-KgMFRRZK zvHvz#)E*xuqYblE*dY`#-@eCCg)q8(XtNuBEcyYS1*z$e5U3nS1iPXsY@JdiI3mEB zuXqRN!ziq_dG-tf_sl6vPfORA#0X7=Dp{-+-k7%zI*fpcdai*N#xtCb1=XQ+Q79cM ziQ}1rPx0E24Ej-iO-ccFFkTaqL4Oo2r)YKBlMYg5&@D5lGA+u)BDN-24SC9+Bo?4) z+zUMIz2zE)du&)DOF2ZQw}oqeV20(Ex8YDgGKCP>Ls~)a9TT?ukPLBmp_S1=nRhwt|g>iIK@Lz9-{!C`C>T#mQ zikN-iZJC)%*9%wPp@V!qD%TJs$K%UoEYe$|iT(zAjS40NVC%i68VUY9%P)$jZu5bH z{%_W!l1)AT$k-gIN5+&gR#`UAX=vhg&Mia6NzFXi3j4(Zgw*zi?KvgJPu>*gA`bml zY6+D0KAGv)Aw)1yl@4>2s2&i5c6?+Yy-Am!w3dAKLAzV$X#0p*>X8cG6DlyY*`u2U zve`_2MlAeT@(N!df_7cKye{qKDkpu(6S%2xUg%(*C09yk5HD(eol8$6zwO|3J?Fol zV4C5Ye+_uMM;UF-oJRCZL(f2$Sv(GmMe-B|&mU75MC5V1pW+bYDn^308x1)gT)n{@ zGbvwh5K_N4PvnRA=kvA>jnW-Nb&&ePNnATLl}JM{5@>?r|B8MUh)%aR+U`NQNn8w+3j;d-2kp7l zyc%?0Rq0g&y2NchIf^c+jPi^YI@XOK9yq+5e}k}XR04q(3E!BmS*HcC{F3f{9T(zM z^8+Afv@HxP4~xSr!&cg@h|V}Z!#jyf`N^;*Fb#mes$`~m9(m_)7^mxoz_;u$+0<)4JIusBkzgZd(c^) zGc$x+(6%E&ut2<5VIGB5%6Daf7_?SwOiZfhwQ!) z@6Ru&z?r|{4q@PdRWE$o?tS*wkT=YIU#Paf{i#Uu&}ecVQ!|gbChl%hI>__s}nhDUB>Dr^1;D+-0wzd7~v6={4rAFatQlm8d|5fvI^)Yu0 z($7S8+4@iJWE+qC(yN?CE`2Y>(wu zHe3z`VUYgatb&*qHWuX=wwClIvH_JQ`ZBAKE|u9IhP=Khm|t%6`sT^Rqz^gK96!>b z;GWC9%bqKfHsda9pik3-=c$-CJQAyc)kr`iN#KX%i8&0*rD~47cF0gW3itcq6Rx&P zn#EwWUEVUFKxuPYSoLPxu zu$mPW%UuQiF?x^9&p5T+>Ad})Qt?B)fuCx+DXXp8WfjEVNp*`1MJNzh;&Fun_f+;M z1v{}347~FdNKt}5kxEX;yLBUZ0Cop0mi;SHsKgl(U7C>=d}>VJ2ox^W6dS}avcV|KC4&nj2j|n$oQ?+i8Ix%3MD3ZC>RPo#4UZE+BRk=S z>4!s`-CeN%y>NEVmNtQ0ZFCpr{eX~Bh*tmWOzm}=V@@sK4j|&Mr?0|N|_S${{ zESNrz88gGUzC_Y zt*6BZXwZ4H&%*-1r|@n|?$J?Xxx0_S)Le__iL0zr<~I(PK5RsCW2M&Qm0GS+hd+kJ zY()_7>|>=xb488f7ezMbG{lQ|3@UCKK3Bx#+#@-In97r|IR6R%L&O@4W)#5jUl5|- zeQk%~e_bA>Y`nXvi!iS*QLFILz`;iy9HfJ+?E~&f=8WA0Kx<65&9zduY9IryLv8x% zs(>H9+Kix40Y8O%tLU1>-1vUR{c6VdFzy*&)V|amE2H$Db0CyA_qw&l@kiZDk3U0(~jZhzgoss4B{^(^nEKJNn~(-H@7 zUIuTuo=w5?LH+SqHSQRiMTUo!mHQL8mu+m->``O`h&1QBBF)8Y6;T-0My3fA946Zh zPFg$ak1w)Eh8%$$K1F7ahVfF8id5tSUPo?Vo`HXo1-w6T-)l#jc|L`J7?EZi$+YB> z4(BBuP|~nHI@}mbeGL*4G_2O;$Fo?(0d9V_fd(t#W7)pUGZ1N}O9u70uzyEhx!0;( z`4g$iNC-p_Twjgg;S^ejGJenn8&0)bvQIWqW=^_wSe zF|!8`eh6P`@?sQ{_aZ+AWe~t@0CIWFVYkY}TVc7}H3DD`Ux#C(D%|w3K3>i(={%6T z2~JIRWY=-un=!;uk5(bR-5s~*GDOG{;+Zl*BV$WHdm7_9?b!1Cw_yJ}3#*@L1G7NZ>}zyZ->5;0VyW1EXM8g-w_R%@D(?zWZHX-`HT^H4iOSF+~bPr zWx8qq!CtKsnhxp@O))CYVUf0fwo)a3aTOg1JoR!ofStY3bP#h5Xb5N-8M3DGd>{#l z^k0nZGkZKuV1wuFicuob5!lO-pP8C(VX4wG*nJV z9xq64W_!1@{3NBszt+bm--eC7D@O{1N=or@;c zaDQI93yotNVPKq!S*<19+~X75 zenVbyOY(|4k>Wr)F2oAxXx&cdk@Eo3^ftR&up6%3@4Y3;fdx=&t95I*%G3Y{Y?&E$ z=C#3GkOO0E0lkCq1qQ@0_ zPA*w)ib;G)g_T+eSfh^p=1EkNntmY^51r4z*Z4W!?fm=1qDRrt%Csn4?i?xKF+y)iK%Kl-l@oRdG878 z=eXogqYjskrcAL76N~*xUb?(VD#QbGPo%g)JWx1KZ%;%?^Ye;#J@~t)JR}AO?Q>f+ zNwa27XnaLME*hUXY(6{Bp8l@0?jG2X>4A9d-&${V=jIcpXJ5*9%l?~VGy<+$_Pj-U zlV5v4c5$AxP>C{|j;l6yv20xB?aw8m#_4OM?yKB0f`(5ux**QEmy&nZej-EnnRAwO z&3JFwr3Z;EV9myktXs~K!;l3{;(sR>ZN{;nB8ubgEzGG4{wt1DBJ1$Qv7X-sejBY+ ze4X}4I0Qx`R5tri9Y&=p818A~gaUul?{t|tH^}!AmJaI)xOurE+vj2?6J;Z1;FzXO z&@8!FWtDTIHW;(!M6*n~=u(UJ2tPKw$kdqHj+(5t>vFZ7q}@Y1RcfOOR|D(J;wxz2 zS$SD9ow-XwH8)Eym=G_Dk(ysg0^wb=s4V;C8^;Ss3JQDsXg-@6E*`7De60Shz?Io6 z)tDW}Op^cS1^F|rICZkRx^rn)Xv=I6uGN-B3&xG^Ol@rcV06`@ZIcbp3AtLvdUW#l zvxD&V$0q5->qm_r1QA4v{)vw)`=0HF`(>u%%cefmTUGgD4TpLC#mSnnuAod!?6#j~ zn{m}-&A6-}OJ*!zGK;%l-fD7b4f$aXfQ)4eFg$K7_hzzo{v$tw8QT*$pCu)jPZS>!}wU$gnthF zsI&iQsK(vg*8Eso+zCPC#vjzzeD8g6RXE=A*Uj+p$(qzzP`=>eru(v4&ZS9a<5h)= zj+$gZg9Z6Bb=^PN;LpgVH4Co8EVxK#;d;AId&LA8=`b)7=`+A~=l8$U|k0tE8H zz1jZUQZY#vRu*K*bYXO|={P5s7C4Xeu(^A7M0yLM{EynG3VINM(uU5B^wNPk`=0l8 zA|2g22I04w>fo@zI{^7FrpR5+t0;(S%imElZ46+J8tct3k5+FAviz(d3w2D%uA-AC zt7BC`20_4MKhBO=!(?@wU66%3O0so4VurY&QO6c!P#w$1>UiA%Rza3O967%0)KQkL zl0n_mUGqvw+*#nDk#7>ce9V2oho_6#W*7g^zO@& z2um@Td;H9#%g;w2g``V1;LEH+sgEPI5jxEX898MH_KczAl*?;aC^+^lN|%3zoHDk6 zY=spMmu1m<%$uE0T>wHU!A2u_7Hq?HDl*^FQi)U&+RX9w>GJ2VGS#Oa|&su4_CN%M&qNXeQF4ki+1HE zuJn&}6oXtIn3m?>!}`M>WABQj<`xm5uRXeYi*RG8dvDtXA$!V!J)!Qc?G2%mkzzhM zwEC4r{F}x7$7l1ee`w0;S9DW6X!rL!CB^+8_wvsk>>nyC9$ftjsz!)6`yF3#zxUUJ zD6y9%k7h`}XumB5oGoRM;=bCaTko`KeR}gjwZt1Sf)Jl12XKB3R)RMlx_B!+nmY7F z(xBx3LLNKzapQ*hZ|0mQL*XwomWEr2ExBtr;Kq2Wz*rbwpUZ}}YK+COx9btcFKpl6 z0R<7uS`5E)h2HJAQm#P=aff+f+3r(x*8Fj1)X}Wt98|!4_RArohn8na{S0~G+8VLR zpHvQWjzbwR+jxgE=iMVt1E#}I9KtZTro(#1&(W2c^VLyClC|9}&`4Uo!+q~W;bZnJ zwko1_-z70$%GVNtB#f4Q+`?5r+0SgcSwvv`^2gw`oT65Npm=l0gdv@p?N9X1!ow214yMbu z{wtqGG~(+>iXRA`)p|Wyv*P2>1=AbJcwLCg&z4aa$U_%DByVJu*ldf|`Q^HD6DD;DYc|CMRfKnd^?alJ|)zAL{%Kq1`W zBEi0|=HTTX?okTeF8t7xFQpWYXu#Tb3{+esVc;XTM|_CC5%vkJDRHIvq-Jr9B#yUvTWmU<9{~w_mMF>^@A$J-6_}tb34TVV|1rkQ_TE# zi^q>q9q^JjM)$bonT?`@dk7_P>Tc&I$da6bhaUwGR&b#cj=T*vxF|nBb?WNMb(ko3 z6w$ze$lfeMZPK${+Th6`)JPDW1hkN1MsFi#KeWdTqt@7=#-w!Ur71LHUo?3P+M}`5 zbrqN{2F}kYC`e~QD!6dtdp0RuA51+KZv>Jl0$BhMEGP@q7qwjptFWQun=*MGbS8Pb@zhl66gqGrj1 zA)HOsEZNMZe;D)72#u0S1i=g5oxZk{M)BK+U34E1wdUWbBNg{M zMTT)0F^Q1lrb;`s$>0H4+eN2l(2lHE|2kBb*)Cc_+-W~(){VA|Cf$^AgLm6-rM8U* zwhb%f`r8X~mBT&o0W-->H7|^pJsai%4a@?2*wzq~zj>#Km&+I(q@2!1kBB2mEoz%? z-lWUpP2-^c2aFfpGf;mE_r3O#9`sEcV)j&-*fJ%tmo+8beH}-j(P3H>3PV$#lllD6 zc37sfUyCO{84g_Gi+jU=Hb%e(aVE2gug8_w z%Z;mv=Hl)8ZwfX((z>2AcZ4eJ+HLNtxZ7^-PUB9>sUQHnIA|k{;>U^Q7fnvn3Y2fP zXju$p)$|>*`OylLdW~Iw2FvciUL%f9ey?&rIU^r?4HW*`EvS>vaxk(0-!F!nK#Q5dRa6Hoc^n|JlS)^ zX!g@|k2AHO-7E25W_O3!Z}Z!C2&n@S#FaPVf*3jkwygxtmG=P?-LG}r02EOukm8KK zD8=og$qFPLihr@>96`}6K07yV<&=_+CchL6NJ;T1hVy_1jsZ^JQAL4bCnwL6&i8)W zBFlNr1n_b|@bcFh?pJTqv;!x^NdiXXb<058@XrkT9DZsHUeL@4n0^Tt6Uhg>b4cOA z6-t^wnA-xti*- zR`Hte`(nP=uR!(r72`l~3=6Y*~> z6A&ie@>K(~ayD2v9~QF{p8H6dHly7d_=CNecF z739Cm-(O-%&^XfO`_5Tse$Ep2>A419ZR&qG&q7#A2H<*z9 zRUe%DJ>fQtUJPHkn(?oA@w1!6VK@j!bDjn_ka2(2VL9PeE`!tu=W?*^JScNQI5?M9 z>Ass$7%r1IjTCV?F=!c2J&*^e&1c%|ilkL(PWOB%_c)>z)CWIjnJ+=?!`jN;2lfNW zNBVM1v918w6S%A=>s_e{k%K9Rp};ZW*RbvJZAd!H_V_d5h|?Y`X*X^*C5=RBY?3DE z)0bEJq#VOGNX$WQ{yY(X{|d{WGd0W~uCcuNZ#K;TVtbN`G+Z08J>4VRqCHh}em(Ph zmQ{YAsdWVBAm&orn4FLJ*+X@bp6EM1eyI45pOx|(FRh8|WNF5<*xv{S&8<*x)^^Te z&XmFT@uikqXVaQ)6x7m{6Rn9WNKA8D-kWtl9*!GGX;&P>eBgxKQ^L`op#wg~iJAlc zw1zff`}%W&Q)iTSEZrsB*8-Ue#^mzGY$@{;hik>BaxH$mkaL)^iVWYl=pkC#!L980 zcb5p*SxCH9g71G$;_3Dk;&L$V?Fq*6f^5~3j*ng|_m^@0J*pcYuek3olSA=?u{Gsv zk7J6qww7B5YGb9fl~0Gq2eE6?rtI})k8x#jcmlUN@cErDK>QJQ;RQr4 zQ9JYLz+UHyw&1DPnO()@6i$+ad>vL^oILybe863wmrH%JHG>M}j<&w&RHs5G=nCy! z#C6f;Fbn!Y{P@_PXOViS_{I7}wMySG!{Y&WFQ}qkGh{G(c=1_Q?H0lLKUcM<=huI~ z{b!wB``24Uh8yi&?LRC2-9=}~q*a3Ne~w9d*8e6|Dmu}~_x^GOH(O6)H`02;<#Lp2 z1!v#M{?Mm}Xz-jxM4xfnGuc%q_DoJU;Y^d=EA@PQ@fy^^I)QZSGyOn{J_9KFOqT_P zH_J~^eNsV}Y0zP8sccfPwp6~uC<7`v5}U!f+qoQ9kH1`yqCY~_w;eF~*L*$soj1vSLB}yBZ4~ zT@i?V5ZL!>mheq3G48!(kcIIQKE-7?9z%^r0ed9Mmp?{1BqNz}NE1dYyzfdgMeo{A z|6rLUzLf>YU4huuAmDS?krQs!360h$1nFivg8oy*+6G_gJtup+QAhTjGc=1Kq!8{?L#*&48-0H?E52!3%tQ6Wi`gu73keK zxbs@fR6|e67q}ICD5UpoKB;qvkCrW0w-4R&4A*u*Zt3EMUcMGPJYj@p@g zAOa4ZG)N&izufD?+5)ms@n4-Q#Jx$uG!MG_e3}o~%VEGxKRJdqnMwCb>L7=L1@xg+ zSxls}P#h`(mHnGINKk4=*!g#2NMXA)EGWu3Udy_zv`3OHnjGXJWchTL%q6Md%OxV{ z##gQJ6+#?oiZVG(^Y}v!y39cLL%u-w0p^}BdvEU%=|Qt=khx>4w1rOEUX_`xjL`mQ zHOcljNFmV)<(oBK<*DuO9q-qkr`S&WV|^-|Shq)Me?h7r{NNChq%*kMpS~xq z@+0qUmt)36IHK0Q6sr8I_qI>sul{u|REB24ih1xFNy^vhxuMEHuDAQQv`;R7I0@ag znbgmd@spQO(LJ9z7AHox#_xn;zYEQLIk@iwSp9VH(EGKR)4%o~x-CV9D3R@t%VO6) zdonomXsvuz!bcdtz3Eb^*2MRZOdm!si`5+9P>lKmv?HY0#mjO7lOEx$?~GvP!9XnQ zDdv-ONltWsa$7%ty2e$rx2LO+WG|nTxZ2+3+-Qs9c<8;_%0oBJN|FDu@pN})Zx=7h zjf`E^GhSmFdc?qP8(7ve`6(g&>$0Cc$c4+^uNF~8!<|5H*GV{Jg8z}SX)e)Pr-Wuc z8{{TlRto1m66ndP?Y(9wc<2MJ_vPE9voBGw^096G{E-qpXiD^~Ezz<>iEe-7=1(Ng zS3oF_Sf1M-xkZ;J(EWydrt%qS6QS4<>5Jap&rm)&0&W#c8Ou+GDwpL(CdqjuJ(wx> zVOTn;@2;2Wz{N{*!|`BG1)U~0Q29IW?a#~TNQbGD4ueu7iUPuVxaqx_-^=9OFE@qT z)%q%T?-A(@SzH=!rT3SGu_CtrlNYDk(8c*W<+t&#iqV>E>43wFZMuyW+jJW%woSLO z-Q)`t+=^|!{d_WhjrRXpP888%`;>0;Z`6OSbd!5|;TttR+Gu7OH8vo(sA^gME9p8i zigseTo>f*G`!Ed|?+M2KYUDSv!;#+?Vru{R zq)6CKq_tQvJL&vdc*{=vC*z?>nmM;TYlL-iqHP9yZ#r2!6TyMJ#x6O^;4ld2vN;HD z#C0a;cQASF86_y=LlcFTH>Gi0Ze*Ad)IUG=OHFbY!wD-fu?39uIQ-|vI|LTyADDk+ zXr-SsI3wq1p2(PTP-^IRpN304bB?bI#$J^3cb3zVj^{euhzP9p@-rH|Nmh0=; zoUa&}sooGsEDNS=9kBBweLyus=vwh=AQtv(?QlQfPx@dWCGBhK}B7p5HsVtL}Ch_Az6 z8~a!+_ji-83=i)185MA?R$PGe)@sBQUf9t~`}|(nZj$IGdcb?z$3iV#u&(--M#jG( zzACLB$mBH`TgdWp!AB$k?t6MGff&W9kUC6x+pw(0vQX}*BMjo>AEX(CVsmK;!5G9+ zM{!+8ac}p_V5se7?Jf4(+RXiEf4T7|=R;J_Wyh|E2*bstP%h)t%r^r2KBDb^WObMM zTHiG7->#=i7cjR!$*75`kjAHVt)G?Znz*Gu(ESl2x(wvix)-9kAfn%q;B9ktH?XJ@ zc&M$Mkkc8YiS>-qnB$Ft)AWYROmAREfT%49H@UlubK|+FNf=hkS`B4{Ik7CX&I(qy z5K6)rxFlU9l`+4GTqh$vU>_nM0PPtSq}0Os?mxrSIxh6iy5ubIgy}NBFgp#nNNX3& zwpY%Y9UpQzVO5}qZy)#P)>gjiy?v_g4OF}=KI|57cS#cV0s4=#Kh5a|Rc;9_`VIQ^ zi4pai9N6y?)S_L+$1zjXrPjTeBVW?>8}w_f-_U|YA)P55!syp7{f5>d`ZXQo39Vn( zqVo!{=r^b6*RsSAto)d}=6DJ8cK8CZ-vuk*@ZR=a{MA~)W&|H(1W%5iUf(#;EPdQ0 z^e6{q9Lbft();w08FIclU4TQU2|5_KI8fs4enB^IhEmf<-@3@^s3^b3A%v@XQF}IVInm1K)Qaet*D^NiC->=y5E2=nuQ7V+_dwb9^VEY zGCq|RDO`wDt@n03Xqd%3WnYT^#wjGI+tmDnf$rBhus$QnR6!EFI9L*X0SKQ_!5>Bi zf606Mrqik5x}7?Is^n@AYUL^JG7u0Qo`pKs0L;C339}qhFTnhgyTm7yYy`e)dIYo8>F)epp295n2ey@xj2pSGcy; z7t7O=Yf8Ak;%^?(=JuiD$Ex6Mj4LzcN_1)io%PmwFg*2Ut>iO}*nd{{6wtZkUNK-R$W^<1NIe+K?cuB~O|-*Kt)F35m;IOU!7pMzUm=R%+6Gr~~pv#5(xm=Ug;%%Ye+ z<;l)67c=>Nh_l%?&k=Ate_T}|n`$*gfTJUrhi>+UdL!ielF2Om&DzQ*A{&CS5~SSc zC%fkpwXq`dS(eP7$!u+G5&ImTHaXx&(pQ8k`!;j3k`07NH1A}#v^<^dXt9)&SD3r* zF>G*uOD#HP_cnUGoj(O*m&xhXu}6huiu-~TcWnivdGB9LY5Vo(@mU->P7ce-_PNaq z%v9NixTun%9^5>=mh-25W}fUDP1p4BL#^7!=+p->zX`{Jm9Klhd<#T`dRN1hxpMY` zVR)7h5QzN^1ZcWyV$WA|i>;fPMg+8AH(gtfQPiP{L9=tBzf#1s9uMq(85b z-q;|@X=_Ms*=Fu)S3UJNk$OX#wI@W&=yco4N<{ehwc?4+&?jT;Ow><1;J!yTSSp_k z^iD>#csjnPP-v$-JH`#H9?viS0s@b5#F4|7-& zaj`nC`^6r_Ma6-O>vF^Sn`vu(pn5p`y3YAAYwsb!0+;+@O)3;)z+fS!a%FCK{N_ot z6gi1&*$HV2f^+jM(=>(Z&UO@|l>Tf$HqEU(ZGfBkyBeO2wpnAL{hO zT{8uLsPZw{OpqUp6$N`n2YSW@85e@x@g%NeZRh_ZrE zt`ZR%V9%OVFQ(Z?Q&SWc%%vZDJ#&s(5}q0<0_E(CNnG~DFQVo9xYCnrt2i6_c|_( z5NS%I5NV3<$#{Sm#GX~7F2N@awe`5yv@fk(l@VIodo#8ChPh2XA#bieiLya693z)w z%Y-(X!+|@Sj%P7*o58RGPZy-k+*0O;;pJ$<*o{+?f9C9y7cg08KZN(zFH&Q*n6J9O zp~c~C{1qOYlTO;^v&S9Min=$2v;}9MyfD~(fL8mF_tq|Hfw7lsV}A-_A?X&|km*kl zNBfm)m@!;x)^+6^zsa!9&US@^70%QeP9-M2C&Tr+4Nq_&HB zDL2!cd?8qRj!=1P14JWQy%vYExqcCy_w2TS%usRJ@-LF%WKg0jmy|=E`y_ph7F*+#}BEIr>(JyHR zuq(wR(`JWE;eqvIKY%I1kYhD;eI`zP0r9)8*^(tnMwF=srtJa_*OUV`@m+xw!Cs4 zAhcED*?2X9wH&}nZXXBbII+jX=*(2EoCti(!L)V$+4wngiRXOmn4ZKJ$;%2WYtM>g@W)`(F`J6 z((;$KlF0v}l*xH&{s+RBrsZ#vo<9H4XcBay*TY{I`$f+W_4g*Vu{U=GD?jvhe-oCZ z(WCjmoglJ7%0ZoKL>bgZl)>;aha3%?#=+r}lm#l^lV`GkUzk%0*{8cDR2lbvd9T!5Zwt&mnXtb*nd=s9>Ms;a<)35RFa$pwUY<;2>o2S9 zOlj!UZ_<1|lpaR+qJ#Dzq3C3EXzAEN5vp>xhI~@6g@Y(H=};|}jP20fRV@X(?Q6W+ z!i$^|G;A}a=DoUt3|^6W!Vqx`qGPLdZ~qOw{ll&~h88#|Vp3#`NEZ^s7)6Di1lnU$ zbazLZ2#d)gi7`m7$A)aT=Lk18BeqJ9FBij+JrW~v{;W*3jQ*!r#r;8y@}l}Zuh%j^ z$QD!hzCY-hPau<>2XvOF<72Viq5b&+Xn)rw>WlS4zIhz<`tp(!ZEd}f*>=Kk5oQH1 z!|TgGEeylZ^MT3};U9)74@5o(;v_V^v&4*NXu*tZj-3QmvLrl5!p?+Tv`zH*1L0q3 zZS&~>dECJKEiLIjOZ!!t_D2F2UsV#WgcNk|M6ScZ_(fS912WOlMMJk*Kk6Y2I}pB` zm8MNywY|&WR3qhPW0^vUce@7ONgYPD92^#H9l*u!z5TA^KPMy2Ezby}a*ws5ft z@Qs`{3j}*xkso}TDRYz9MG^BUi?19`iFd zHmkf$RrSocs^ZF6=rQO@U#;cK!`exQ1DTw__FzURlQTshl#?mEzAR2tF3Q4xI8^!U zwtwJ{l%c$9V%0kjd%DI4GT$K)a5aBn&ITYFvP{AJqT`l?D&N0pt$V&6>RrUj9O~?m zK;{dzu>x#SmJ)qW7vpOX+rb1qr%zY~WO^Q|Jh-V?$az6$rzz^MtvtEyUkSu&CO0|? z9Yjc%cV-A!tFS#WU+QB?fpe#wQQG^pYB3k0?qikm>Chgm@UoWy(@F1SGeclOBov#> ze%7q~;Khq`Bcp43u2C+^8BfU?;3#oJ0w|HY_L9lc#fhRF-RJa>O2bz(SU|N&_Y{cBMcU zAbi|IP;G1?!-2s^5GV;M63d#-0(3>S62vMawsMMYq9_D7#_BRa6bU4MmT0dLI(x?I zX`l_df+?d+KCyB?6pCdS&?=1pQdyeFsEy^Zp#TJ+Ev@azkTS$L9L)wn1VIepf&fEZ ziPpLj^EJD?_g1hsK%=i0o$~iGuWJY%`j8X(r7C03)A)mx&)n3uym!{+!C3tpd@^(s zq=eqcbx$VH^)EzEzR*mQQ1VZ+z?B6C&+=H-$Bct7$y}oH!)@P{;jdFZFl)f87q+^5 z_j`wd6kTuct&E2HIP~5dSp5D-W*iK!Z;0kClQv6DC_%=^Gmpq6 zC;)Gm41O1?{Ak;~W?;;EBdNve<&a>{_?Oay^biVZ34K>X8k1vkzMM8ocJ4|D#ojv| z<5`S*O&Gt_H$^hJ9a$1kP85xg(C4=OILUD_ zI62HdAj}4a6})C)_NzBtYsN#Z`}B4QyHm4Yg35eay(z+(*@>)~v26RPW$~NGQ8`2l zkqqSq;{+ew2WwdiuE-aL9~vb)To5s`FM?@;9^P1tDEEHthq5ZDg-q`nPk{XydW9pP zr48G>{(zzO)oib?AYW??`mAtgy(2Of+2&a!k&6&)j5$T4@nzxj6?hXvs97#aQ8*%3 zB;h2fmp=tEW_jGHI%Hfug5F_Nl|ETxiO7~6^0e78;IWnvn)wir&oQ#6XdiABCnCcz zgKHkcBXq))42T@(&JF}lR(d%na=8T}vvCZRsn}WM7>+zC1lonpgPkt5n3HEd3wNF9ipA*^t%t&Biyfj~L==o&eLq8RZx zP!v(u+1V6{G`if969v*J+>9TVk%cBFn~fd|Xz97E#pNZ};8N?$YFf66N1qL8KTIVJ zew?uddb?$9T_8<0n}NdVZ}cRl5sv<@zsoyR%SeGnQLDeGV&4xdBEib1Z@LEkmZ4h@ z{#uo(c~B}p^q}9(ob*;Hdz~^5C$>B7vs%!DFgNS4tVyv+=m)Y%s8n`6q5l|##8>mY z)_>UFaVUV+v%<(Htm9VYsf``feF)Xe%nRr}i|Nd+Wl!DziglpXWx&?Y2U^?qS{;Ym zb)Z2?&!AJHNpduuqSWgb#7u>?+QLe$CUx&b?JE#~{1*IclPW znT>#6t@L|YS4Gzi$!VJt>`usNHYJorhm{p1TRxfOC~A^&u`DNSs?v+OV^~7)7BL|{ z=hS4O7(K}7rClg|fr?<|p-r@=Op}{5MFfRY$kUfEX}=G|GGyK+v-=aAfxTUh#l8P3 zG5H;4;Qb_j84jvIydW|D4!4^Ia~&q9F#)ViJ-jdronTNc6I73ARF8CRHK@R5?XRbz zIufZj$%8#yHk%_zn7P9ZHy%sU>G2qnCIosWi0Mshx^RbLI0kYvLTN_HNJIUn?5t_h7@+QW3yT!eaI-o(2s8ow%ZusNmsgP~|nyn}CBACLFQ-(&wSf1wRyVtjMHPC2cG zeJe96w8j+3J)SKW(2k$iHyH5#TjAReVq1A*ZXfu{AKK$N{w`@v{@(kuUncVQNAz_! zTrIFO`zieKzafM8A6c40!OY_Jl=1N+#xr`}Bk?DAJ^tDq5GLd~xT>~adKXDm>^bl4 zdvH0<$orKa^Ai70zL%vqUaA`$a0tv&Tbusy_eF1KC3Jr=VH*%baZA-daAlTz_%)jDPrOh)&v+y@rn(q=CD0sdx*<{13lx*5KYmN*s=nS zD-Y+<*)#WHNC-1)f3W=c%kl-*VZ-$98_U#9!u~ueEeAJ4*I`M5=l(mMi*Kf?xTuEf zB5r%3YgFJX#W|G5RE??Q?U#PgN0IJ$E}DhB8gs{E@qz-pkHxcbbg88lo%UFK<*!H{ zdye)0dBu;Bu>1Fy2cBj#!23g8Ozs|w|AAD}X1Uj?@L!0JKTg2BN92x|8m5u*rHJ6u zZS$YR*!S*{Nqz4g;kb2aeNH~T**~7H^9l5L*cKbj`*{g8#;0pzo71uavHdK#`nk-G zjSO`@ImAMCiY(Bl{2W0fKU{SXTHgw!yTWlN+?7NXY zMzP{Obl#!E$DieFO~2Im0@(s4^lEJy$xl{#yM#SoMCUcAHw?bVAq_y42P$9UhAK8y zgSgWm9x7e~Ue3)v{=4I-2W+F|7!wRDkqWy_QGX&!c>+Ca$e`0NtOR40dY3pK}|H$_2&Q%aVpDU{u|YwA=V zqZY40OvFE+t;7z-*U9@=_``nB_^*$CJfuN-GUM|;92#OBFcP0b^QV5BaN={ZG{r|F zwMhA-?#f*&;}3{{c%45mxd6zmV2S@pz&mCE{ssBdLKtbrN^ai-6wKN2I|XLBKu^K; zi3rFs4dwwdQ1M^oh+xxgCqfEN4UtBp{mTK5kJTt8@Yp^Oi424XMmNzPIhn#vG|q-Zq@xBofn7MkI4pqracu+ANf z|Fb46Iu^rOb#GmUXi=F%#ghfsPh7BkbbBy!NVw0k6e6RsTp z5qlPh)}1i>Nc<2CgwLz1IJQ4Kekl<14*#1cega&vhmMy5@nZu~xt-4x6foBVBcBH7 z5is$Ij;W4MA`5q!*D_w|{gZ&+g zcL?m49V-47(sJlP_fW>ReUZ!=IDwbG?Ndp-EOkdspR3!n{BaA6@hYa^pv( z%6hYJ((&v|NXXmnMxR7Pbp&Ru-t5omS4z8&LZ#s^90~tz2LF7x;buK(SRxH#Rxy1;8IopnEzIl%v$bSBQBTDAcQXl1ATB`S+n%JS5*rPk%YO1cS z*>R$!J=zvK&MvVXf35C{w${WR%72Z^gYu76?|7@Ny(L(^WAKWW_O9yQ#U5@r+;O5k z+OolW&yxIoEX6I!e~<>06?;p1Tf&S0F$oQIA_6qu9b@xF}O;7e>UXPC=>=ItMv9ff> ztI9j*-7%z!8dNwRs4Sq)B~q^N=H`dNZ5PaY#Cs2v)x=&9%7iYV%KP&_jE+cx3=+Kl zOStg!KaG#HAISdc&kTnSWdDdacquWHBK7>899aP}59at+{DJ&_G4oQJ=!a!F3v;^W z#1h4=7o&$DX+G z0U;pHQAJ$C%?GA~)qCY8`S>*ii+8B_vSOj&J8{4Qy?=LvNJJ^ZgYiOQ5E=^|q0u{_ zV>M7xa6A;~?Tt%>hz`LiFMchT#|yIC4@xA>xh~LiyJRDT=)mf|cT28Xw%)!|=zIu? z=%hg9Zs2R-I;fEI73+l?L&aBulj|ErDuYkXjAP%Y_bwPxQx=d+S4gJhR>k>xyfX9` zpt0um1~TKn){9x+F6)O^oSP#olS7q`o%iC$3@KV45DZ7h5ly)-dBAAoxqM~P6j;a& zKvo5xmQ=wfbU$z#bdA|m*!_UWT@}qji8cx}YA7k-Lar4vT>km za`%>!tgZ>1g3!BT;QbAM;*)_*-W~6$s&8^r5-kD+sr*rvVA`R z>3__<=jGYooXK0|LtW88cS{K+uPqrW9xEl#$7U_@4Hbv*F@9ep@5;P$hL`alWIG1u zY>|wGqyc|m9`Ey(_yY6x1@t%BL+B)d^YF||W`zdMN5t8^_emz#vL*UmIzjd(@gmW# zduXq!((RVA|KR5Y4$ZicTpoX1?xsXceV;I+Yklb+COF->S@A#V5iK`E7g)FV0NA{D z_0hJnXZ(l2_huJsSM)F|Z_oYvR|v9eQxi&|IN~Wr|7?NopzCapQZo4qeD#i7j(!o`tMTxw?hAY zUc(&Ke+@m`_4~JU><0ZlhL0>vd#sIBV~}By?oga5x6)$A)P#)1OJXIgSygxcfsLV$ zRKKIxGkhpIj#G$pYn$q_-aQgxgLu=y(0PY~O%HLmh)+x`dQ<7*+;?g`ncir=_kLVH zislxq&^)qvIraH*GfzRUYV|Q!VZO4(@9bZXwrW1_;E*&$RIPCRAL&FCiPXnd!#0_ujcl}SwW zuClu9Dc!GgoWc0AkhHPdc`t-epGrbA`)j?6o^D~aa|WMjv3dCE6OnV)SMPX5@$JRm zEAAwzemGMHcfA>kk(w@nmbu?|;-R4#KOo7_jDOL8|4#q?ivD{sRUl1@Cz=1yfC+K_ z24Z*k$?;h8ePRc=qfXv;e(xg@Rr(=pfrQl%F9E=>!LOB*{#!pr&v5)D$NINvzu*Y$ z*e7E5*rhLmHUW}*?9$s(lYA#7$q`91F)hg#(sKDiN|F);*Rf|`NX_Nh9jUo|h9ssq z$c8)QGvi}rw*b<6M_h6&5sbZeG{}2_d9M%{W!?i4{`Qv)3gEVj|0sUm_saVkDOB$r z6++77f75*Q$-B?I4+=~c@0x$WiT~|A34f>f?=tvy%KJ|9UM26_%)9XXSf{+l44p^2 zb)LH={D|v)@)vaYpwRFCou*}rz}%M1Pv?J|DSw^Bwc0pKpQfjNtHz__1H@B4{l3uk zUg3H#bG?_i-V0ps%Ju&CkCQm#uJ;#tw^gMb(R_TX&4Bb8JD$q50hWnmKa&5P%Nfq^ z_~l0&-_V=A^hPMwV*>-3J5g`p&}G1_vv5<6%=QuF{%Z28 zZHO=Z*N;(cuov!N;=OA``V#3Uq^19I%%-nCyY#=MMY;3;^jB?q&nf9`|9s|0iT0`6 z%KoHZx68NjQ-``O+-u{c9E3GU8*rr`5dUq-^g6z5kIV}07?ily)bzStxZukLrvBe# z2CeZ|iND#w7rUcV{CE8rddxerWWljA2i}J5*mp$0t^=&fRsEd^7P$*B)LVFXB2sa$ zA1M8WQ~X=<1(U9S2k(x3JJE}dJp$=Mihd}=0zrI~8kGS2nqSW({hI!tCjGiky8ODl z&B=Jp)B2=e--HQc0vGXa(XA-9 znz=JAuYyuxoDh{K3Cl78_JCSu1WWiaxYaW9ELmU_zG%ULC4RCMmFsv;JfD;7PeG8# ztdq=W;;Yw?SzY|0rNo>Lwumb>9oY#|6H;o!9&cUAcc>Lt6LqjYOXZw~Z^%w6g*>W$% z!`X-U3r;b#x*n5X;h*;q$5Aa;k@s8jPF^z)1-f4uJX4n6z|Qq}{C2YRezq@gAp0Lk zEsta@?`b*O{90W3wf&d!J5T3VoXGD2SAO0R^PAzy??QKeC(l&ApU~tVkAEL+*Oc#7 zS+}s|dy02mJ`?o+|2;DYnoDJFtJLIi#$90D!zIT5x^-7fH2(S4onqZQ>%L?0Ja65H zt$Uw!@3QU|>*m;emRbL&tvkiKS=Mz*kUQSwf2&Q`VBKF>_j}eoY~4ZYj<@N`t?S_1 zVExxvx5m1gtb40pJ;eU2N!|Xx(krb>g3K;wKom zH?04B>wo1O6aJueope7;^S@@{o%n;+e~E>gW!#+&DM45v2&NFiaCMm@66Dy6JFPB{XV6`eb)6`muq`c;GJ|kn@qYY zC%qGK26x~IlWy`l71R~{X7igdXWn{$d&Ib1 z*4=2`2=N;>bX_u|VlIwjK-#2?>RjNfnF3hUNcx52twth>{?cU$*f>-Jgq zh;>g|w`8ZmUvAw6)~&N{t97?q_ipR%wQj$48v2Lq%w^+Btx=wjF zZERc|ZfT6R)OB=hAP|2|3(V@+Y69@DYKc}SYJPR3C0x@OZ9FCYqOSGpTf(mNHoyWV z@(FHiyuQVqj|m`tX}G1Owq;{Sc&nkq&=u;4xUv`gmY5{0WN}LP(vGm3g5lyVDVjp! zYa2Tg*$cka?HiLJ_?K?zinJy4m+r5JBL>36 zuV~z8{UMgS{4;Lsy2QV_J>1gNaeX^=*X(c7WOjvhP5C>t-W7FvmwQX#RMkMErjay8WCR@$oj;?mwYLr^1yQOaFS5OIE8ydrHQ94+ANBiQA z)|N1>raIcOaTV0*f-Kn*ZD~)kMP0po?ef6NwaqQfa4*`@ytXkR3RG=lQ&=}%QeItK zy|!k_qSaTdtqy&*`g5!B)C3n-*DtAATf3z8%9Wqf*i+$xSFEUAvZ7wcB-7Vcue>6- z!h*Z-23G{@gVmwnHA~1MbS1chSFTufDqPKy`X!6&*Dk(t<;vA{0+WXSiqMsdszYmQ zSJyAWQsBxrxMJ<1)k~KyS-Cb?TYbfnwO6dHUa>kfRt*u_O zb`_-9^h>K(hw9e?OEMk52m+TI2t#3N`%B6h>0|B+lNROpB~DUHw-ES{lb5mPLYbw!jIHR;Lot|BXu6&6aB}3x}M{%zbmy6KyWe)H*fVv zI$N6B*0;4Zn~wk4>Xj>kE3UZ2zqEsnw^?+ZR2kCQ7)Izsk!Akoj`joX#k_&dUF3=2qC5z?2po! zBK|fwGa_ayUPaD@sy|2H53OE7H(tvC+1}h3ZeFX^Osn)#zk&_Lvrz`M&27md{-+1lmJj^PAT#di2rGa~+<)8GSeT9~u49j+-4} zQ%bEjY5g}fB8k_TIj!$%Z;~m8LAiOINxx*}$}3mWTN~S?)}kH$&5XA#{`Gh-ahErV zPxNfjcGl=RU(etzT|XHQr`8*6{@foYZ-zscQlE(Jb?E=R&ZQR9XPI*caTCR4$7=4Xu?+HRCac6HZ= z8yef^%po#a?-4((ttCu@Z#RwK-_qW=4lZgG^v@-0>d`~ibzQ$U+|t<*HY(258yi=) zbdu7rUm4VX(-Hj1I<)n&=>}8l!FK83O;k*hwYn`@xD_3iNPnkQ9BtF3GHeZ{)OWDm zX^fVDv}C6BA#msTS8a_%TQ=%WKBMat^bWr)9jF2unnkM9%bNYyhZ{Rv(b@f9>}u(1 z(V)Y^MbY#kh*B4v9gVJUiLNz06DB5w&RokqmwY|V@P*O9Dv2Xh7TD7GU8DQ`DcnxN zozflCVAJp|?%22y0j>K^N);yHY}KPoX-Fe(Q@WC_??n4cn=Q#G8>O5^{4|>>aZ59O z_{J__d?H=4n~emjd7FS=+!aCJ#$eI0;dBX{hFT*Ho)%7fU);E1!@9<%8|VkjcW-dT z)V4$-%&H|q!nOEvMY|KOYgsSC6|wD}Ga5Z%MF*0b?upb&#@hy&;H#ctr7$(Erc6^3 zJt3W?7+-F16cdS;=hS(s&LANx4V2%1McwK|nB?GupPHU2p9@mpkP&I5tYZ3WX1)#{ zJ)x}a*rco2QFdJU=yi#S-c!sOsL1D_E~i}ZBaL1y%-#`UOhF19-w z$EoGh{BiSK*N~LRJ)xI;^?W{MM&6`NJ*E}w=C6`)Mze_cQ(+`tp2Zs)BN2avzZ^M) zs?rqwB-3R&$S3u%dLc}?F{Qi){vv-lt(}(E;y@T|4!*hm@~-waTI9yz#taLi)#{Ddl>CjTE}L4c<|GYB;7M6n2e?zUR5$-erfXOO^2qg^oY1C}%Z%U-fl2yC$ajWOaiuLH z|FyuUQJ&IHwaJlCfs@B5M`C&T{EV*a65vWlCgR0FF?_thX>+JH;?Nz9cvlkcJjt;l z4TzTLz=&&Xrz^#DdJ13)9Fx&SVl*W?XJ|@^*EZ8s2zPvv9+{-qwzZFdCl#)GlgPQ@ z@=JxQZQL>foU44NWbCFd4K5|))4-)Y=s$WQ?KCr&d>NsgCDNrXR5%FECf{nK zJJzDu!og%sy8kJ$x+E?YooSOyu~^7!!mfigcf-~>{?!o~LlUL1 zNMzR7#vF-xbt*ll=Ieq%Vq?-ZB^mR-iMAV3ZkG|y0 zmasTB>o7ATMvbMDrA^oLlxG+4vkJ%DUi(C(A85h9NPb z7g`ejOZZ5Wgddsz!*RVVe^Rpj{_Ld6R^*H~AN?&@+h3 z=rXL>u9LkVTiI)}sfE>R0gx8>zTf_GvqiksVEc02(xpn2pUJ^!bC}&QZPha zG{-S(;t99F9%Gbc_|}UVurR#UpFm;?F?%*<3!^|Uyt;8iSBvnyLR*R2BBp-Au#28+ z47QHxe>i5iXsMXvUm^xxkqK?GY$aQUu7`N45gImjGDl68mqkj=A8fV`RW)53J2hF= zVmr|deWCmn@+3~rXOlz-fy-81xkB6G{l?ysSZwo0I=aGQ4i?+GZmSqv+aj&-v}H?6 zQ&+NDq-vy%%7`zveGChN*;yzv#u4_g*wrCc9xh3?^|eCZc|3Ez5RsJwK_*YVte7Mw zbfU7cph5*nos0AmnAx+XP&F-4ZLuDy;Gwn~TIkx^=IL)-he2x=YeuqI)3l)rGbSb` zg8+tiCJ-mAfy1#f5$ROy8F?pNp;J2AupJ_Z`9T&4G=z(w)T)x2hz!HIwMnV-3C%@^ zISDe^og~$zSG4^LwIfZ!_^B#5%VO6CmRMO!q4v7;u#`lb;6bu}mdv?+4ozI?WJFrD2#|J% zg<94=rSyr`VG>J&(-k{A(jrYo5^licCw3I<{cNw9r_a8qV%9~Unsw2F*>gWN zYu-h(r#r-?E@t}|24zW1h+5p*!LAs=H2m_#97hyh;;(R!Ip!Gu*$D;87?rM`Fyr`} zyUf--E7FqOb_odtRyB*lO^EDuSOSf4QYI&m>KH7gwHad|+mGm!Ehy@^qOfbT82ZEr z76{X=&SL5JEX$Z&D>iFcuHj_SRx$4C(L~lkWgf&$qH(tGxRyk zKHX}}+3{a5$|e?WlFUvcECC_S?F^*J06)Td+pNW!`w2s)vmTT*0%}QXhCI<4?E1p6 z@iP9(mL!2&ss~9Y1j9ntENx<`gQL>C&J&%m-04`lUr1ZgUF$-<;%IaeH@quYCB&lv zQzCP<)*2DpRJ;&Hz7G3p)#5gSFSu;DkR~(a?)F$(3t8^l5Iwdt>s~MOvaqNko$H zU)QY$GLvZ_LLH6Tpsk~1mxyIm*shyvyt2_rqxCOx5>`|9EFUBl0a0{}jFg-bs*2dY zWH*6rN2^@JISsjGV`p@$E}D(Uo@Uln+5OaR=q;bocA+VKNaV1sC`ZaU_2#HTj{j68 z#Ka`)H!+;(h-7$jkdlaz$999y(w*2bv3kYw6<2)C&z{J~O62BPyBwX99Wwe$(8hSjEQ0Q!oMB#l(@M>A)B+jg# zX)sltoJs0{)!No+Mob9+hp~+saAo4PEmVgj(;JGIGAIm;?1wQscWGru(Ab(ToYLBXc(66x^N2%`e6zq zi-!i0G=__2eM&=!9As-f!f+PjommO6W$OsjK2qbY#oq;x8lPNA5QOp{JFoi>o=)9G zQ`5Ot5-dP!yv@*BI@1uEj1qr4!7wY)-c#`;w~7x>N4iBqqtd48R6N7#1*gf^@FmsK zYbZ#?o3QLTmc1~l8#@LAhHig9h2M6{IDd`x+a`7~M0i7Ifv zq*jM*mBZuHc4w#IxN)4#ds;rJ^xF325KI#sHr^5-Ei^Sg%~F@BopiWUENKbIRD2^? z&Jxh+c+;f6v1@@6`D!_8`vjcQ^^GeGIklWA+c&VhI9b|#?;5kTlvI^}R&qZt^)pO- z%6?u8qA5$mSE!=w9tZi@(3nzR% zn|v+*XC*d;pMg#XK4sImNwdV+5;p5JQU=lC|>PM+mRd_AlJPm8#V=?IupLT(YSk>jXUrK;|@lR+uvo}&VY3{+xRWU^=~!qz-wurc&Bw678(EGQseHv!p2*=wma@J6Fy+ws^vDF<=3F4v){Vg z+6}$`U%L*!st%d_)eJ-D+0rp!(^cGT%WK`iTa3Tc@^`y+2e+H>T@K%O82@hT`ffFT zf46mSGppGg=b+{DuKO+ie>ZN0E#IF1F#am*ud{B6El=GqO?-n5 z@3d~e4c~74yR17s`x~VhwEy~N<*#|Na9Z(~Tfg*=wVgc`;_dzO&vw82@Se*){J~p? zYuD9rfX{Y*or{z@$nVA)RTd4F-_R}}VXY;OpLx8_3xT{Qev-fB_wC~uO7fZYdWMoO zzt`aUapjTlbhvL@xPxba`vrIfUmVwuE02RuN__X9OuqNxia(C)$Cbx{9NhuVX`6IUJwpQPXRha{Y&+l^li1d_*r z6L;r}NjNEQ6>tr>@|+Itb-?jCo_H!hp!UuJi@Pg@u%l2@acT68wQ`oQEI;T;y+V)rQKJ!;GA|P^riFV&=33;pBrwctA9wkyIpYZeCyKS*5FSc z7bN{%CzIusc5p9#KdwA(eDy9kNzpJ2oTUHuyGeW^uln$B$CbyyCwzVTog|#pn-91K zTzMQgDfitjI7!im-;XPg11I_VU2uZ03V*o(@HlYdy6Z#eh|^E~xbnF9?CzHWx5owN z;FEjPNf`GZ=afPI(-6n?^|bvlXM5)JQa@Mlt<(L^Qmy@`ARwb8K=s% z;ourPXND8Lo<#ojS!x82j5EXHBj5O0!0jgAoo4|z09?QYXJ55bf87{XR?OMp@>273 zhN{_T+NBd$K=^omyNTPK7AFVn4gmgB;(nSIH-@;BM+*D@;PLs*c|z%^1Gi;VNtSii zO^?@|;*`MucLDC=Z}DfU+$Yhqo-n)~E?0r~1LHguu6GF&9L~cr!b9?2#ZU0MA9uck zq}fh9XD_)yQr-prZdd%hu6F?x9M0p;SMa(YcfNO%<|Og`X)tbhN$q`{W!QKEC78+sC#UDZV>%0aSp5-pTN7Hk$5a=`(1dYuAIk>PwLP82+xMgSNP~W zD_7O5TKk0$?-`hO-{QZ|n|QeV!@Z5%m7*43a_!=d=9X*Co*-i%PAoC6W#e1Nwd>m2 zuT5@AS=q9og_AVaZi>vUxac!;I-A$&EaZC-=bMo8dB0+DeMpX-t>S%-e3V_*+{UY_ zB5Zp@h7H@uZUy8;r_nnM+{X5@iV8JASQSd53EQIln={n?6w>&&HcL9So%ubA)||Fx z_79fnMRmqD$yUv&sAyV$JvJ!(GDn@Wcx8Rrn#I+UtRXE-0$U05<65}YysDk4aMwuK zu9UE##&>sGm;~-l3tLRsy{Tau-=37PI*o5{YM6%WO9`vja0gSvG+aMnz98iY3s2BN z7)P5?vW&lsogqe-si$e#up4i@v3VV=Qg{E>@+ur{DhpQYJa(sqU8D23*Bw@G;Pw!v z`7UYe4O|~#t#ip|i=m}_bf$Wpe2l+ri_kJYFH^lJVWtj&Mz#`Gk_?+CVY&I}YRS5u zFJW@-SIQhD=7tGu~64b!qSRQAzaRR^nDf4sHl(`cJO9m$NmChceCN-a< z&Y54V@;zf!RwsFN(8ke4Zw0T&6X|2(zK$!;=TK(F9~PySXH>D7AFaWaa8FK#%9)!{ zQ98RoP32OHsm){6)cHkf>YXKn69&fjkL~l-)sROOX-oY%YHVwcDx5z~6&}vf;dO*h zqcU=%WA)ypfApgtKWobA=iTuee&qCXKbz-Tc4-K z!qc&H#|##9<|p_%P^Q%NGtifg7`&-|$_ek?VV*3H%34G@CuFG!E5~*gHTWt#V{+7( z`g}EJZr)&SUry(=V2%pH+c8J;RCe=dwFd!r;RV<+Y#wgECqG-|FB&zF-Iv*!L4l`T zo~tfz&R3;xXQ?sI6sYX^dCG^Nyqez+b$X-|af&zvoH`|*vbf+iZNNd>fTnFhj#>b1 z#cx9!ZJ=(ZQgTyF!E;i68am;Ma0N_i9R0@y>nW7F%2$PRNB50t$f=vQG)FCsQf9$> zbhH|r#k`EeK$|sQ`l!pFD06>iLq-K<=5D>^`wK`9os=!RK2O!nQR-%XyKQ>6-;v~#YmMUzfAI#5D{hw6od42;HKGhGufKzF3o=I71(#mr>Co~jSc%+}r_o&kM3eSxWZc)LTr~FFYRt_ z?qE)TR%d2KMxE}v@VWkpu@hsDh`YNu37fsAj6XzGIf|a?QQdgbr8EPl# zB+h5k_;JNwfh$-R<5E<$%KDpdW$wKRmsX;_Z2kWWSH{a9;L>W;1GrBwH~Ibr7crtO?O;vvTJ*XZf#E>N$P`*pfv)ruxYhcm=!y zUZug8mW@(ln?H%%n61WoKB4kE84E>TQK+Kx3)K0oS!&w+vC4O6!63b>Kc|m+haRDM zf#vTe-iuld-Rp4W*^6<6~D)urM%6B${Wt_)NN^gp6Vm+_xujN zNO`iU6XcHg|HSVZ2?IsC-<_s(QK7mh`nPI!^Df%$*VRPNPL-g(&d7Wpw9loYgob|V zQ=S)br7q*R67n{#v=0@|Q1Oih_Hnc`iMs_pN!(~gOsVfzHW~jPHXFF2$+>DWV?jxM z5%ug-xreiKJ;(Q(yndxede-0Ayec%P8$rVJRb26x))lJDqf;|J(>x{Plk?BbnCh93 zf#e;a-}Q+Mm449&u6uk2-Up5HOvaTml;cXsd|aWk%En*tB?EsKt~_tx3ViHlQ;x6P zV#;yh4ok;ODMu0I@KFx0%DpeEQ}Vu>yjPM=XsrYHW!-{}>wj;z>6R6$i(7L%7tJrp znC2Oik>G!)v>ro$r{I8oc`p32NjIg(_%FuQe3*{h|q^Ihu zO8qmx_)D}28AIz&c*OrFejiF0sM7r@X{6>zqtbB+=hDJTKU1h?w2r1POjX66@rn_4 zAg^CWo9u=xTb=;rxxI!D9NXn>YTumHG-lF}MjI+_$3Qa;WK zSJTeVRp%eg&6x7c=!^;b^C;iA4F7kO`dfZ?J2bldr3(s?Js+v*^K&waJs*084;JXN?lUrqPO z>9^bTPwzGGcRXOye;@Zubgv=poL?BYn{nkSn!z5=)=|no{~YB#ky_3Q;0u3k;IG4# z$JFx&)N=;)d~(=&&L!PFww`kyH0eK!E6?|>zx3ilbph?=y!z~nsq?ck#?oezoHFue z;K5{jx&1c=&-1wQ!9?e`vLtdcO&Nxa*il19rnWL!Ef$uTXgbaB3#JOBHc(>&kK98Om6qTGik z8y@YVjKYI&TlZdD_MhU)Skq_yq8B(%+tVhk@3$r-h%0>Ch$~S4f-7mqK5xdM)wtsC z$IZr#zhKgr&dXyfPO&l9+k!zi>U z_7&@|J!;96Y*X_6Vg+$qQ0rvpExXm-g zZ}ANhU;1_u#_jiP$x&OP1*(&IZB96&!Qt7izk-)v0Iw`jnGjV6q6c&Wq2=c@7OBIBu(@q~?^>ofR|=BS4Eum|uv z7`OG9;-@vG!FVR-sfp3iY9jb2qPt8a--&Z440xts3qoE@Vcec_-#LTD1LOLNI(>Dz z9N^gF@u=(!Pa>aGzsbiPX4=qb_(3}oe$2>HGmbJxr@k99J?dV5+m2b9Q~X)b%xo|X zPTLu<2ViFy9My?EjETuyaBCX_wwTMZJ?e3bZ_wq}b{fc=mX)EhqSz6*R{9lwS+AMA z-F`jKg1?FSgNCY69(5(Z3Jag=2XPvV%N{bI?IDYD)S~^P)!ZnwViV~H*OXk3iduYb zzo#Tml{AlG9#M$QE>I=m@q=Rre4Qp7KXbei=6)qBZ8%rSkG?Z|KrBYn#%8IpZ<9x} zS9SV4>g)UlBoE!*$7uiW2zy<^WFD~Jr^H`g1TWYhso|a(t^G>~E0Hi{iNw48DQSq8 zd=hx^yYnucmaC>g&tz;4W5Rl7rssEq*b*JtH8DrgYQ1Tp!9$AA=&##KTrBzO5xPEYo43?*yT?PW*fGhN`7$!f#IZ}QH zhskeGT7C_#{MJk|`R&4${Ju6!euC55(*|h~NqgEeS=iGG)R_I)nrJ_J!0{5lLGqLO zb^CRnjb^I6X3?!Yrq5v0>YwUSZ}4;ajN9)So2|yK8`D|XP*9g&flPIcb#;#kmz28a zJVUpqXp}07W~-v+ELAi=Qx!2T82Q3Jh#ANhe!JL9B6PX^rR);UqYdSSbM*X{Y)m?l z(HD5sulXIc>D>PGbW(phKhpWw$g@^9^Soj5bNh`9&O-(xhmkSGE0N3NMF#UjW{yW@ ziVQX~E??w@p65&xyI(X9{*8uzxvJq49`$$p8f+eJza6s%rcK6%70p%S7z4z9sb+c9 zM1J44Y21E2C#|=0Qn7iGPSWfF{w03>l17iIqqM(Zwnx2a@kqSe?@pu3LD?KTxyDhS zqdH0d6u1V9ex^$_?Q#=ifFeD#U>W$F{2$!f~rK~u43xz{&5GPi|}>0?u=^^~EjlWxNPA~k_D z6Ylg577pasOiUF}g{qm3wTiJ?leAI9Fm7TEdBE^rc0-Q>N^AsaD zZY>)04ixt1_l@q%Z5UOTgV6GvH(H&yXzJj(1C#no`}BH4-FOLKn6DPH-tigMiRPl4 z&4wQ4xmfmzlR57?8R3y#Q*+hSXL8gztQmM&PmnbszA;iq`Ry0_q@KVd{!yFge@@uZ zZ0-LfVS~0D67Tk>q@mv2c#S+PL>?9(5A#)VxM;x0h&2F|3`v zjVvbJo-cXSjBS+PrgQsCOLJ5y?ajT`aOBYp^@W>FyZjNZZkMu-c_-_b)0yMUFHt_$ zN3n0Ajau6xeDkp0oPR5tA9XivMuD0^efZ)3BsDFzm-S1B4IVV@0GoLYv_T{C$ zi=JC_L4mpec`_Y2b1rm=-idr2WSy%|Y&m=2gOq(YI)}IvQM+QdaQr!6H~hH*SDqVj zsae)WuxB>sscFdh$@5tgT$!i!znvE69*fhlfn@D6crL`1rv_KvN=0_k_Rd9?i~gIp zQq!k#HGu17o9>H$YtnraS7?Geg@z zeKy|#o1acN!k^QGoTCo9{LGPx+CppYj4#-aJl~v7Y5;EJwdcSVyJYoi=?1>7`zra!&uTN&iJ$dH#Sa zZ)5hDa6P`SL>6!#^?6#B;bDibcwYZr- zGv(WOze!&z<%*j9D5b~>D~AV!XD1J)@=AF25$S}2vVS*p)Z@yV(PQ(FZP*=FIyRVG zwVU{-_zk{ic;WVYO0sCD=c%GPU&XXZ1!~gU=c;p#PEq5YnX0_|{VG2?O=UGtR}Fj7 zh4_{6t;IC*a{E0?MyVyprILHt%OX2L;G5RP7qep4~lwV*2 zvG^oD>8EwenB_{Bt~>R^m#_ZPl)vugDPu&o#MMtHTPHOh?WMo-!q=IcTcez-H=;1P4uU?eX8Ur>$K0X z&b@yec*cWclIlF*QQzga@B_max8GBcCG&&9oVsk=c5~E}XRvAQ_hKs?gKk@(8Xoni z4g9v-^lrb0^}BQLDIL_ZTX;H&bv5`>L|e;?j;3DnRMit6bsfJxo8Ij&otv-xt11ME;?tj?~>sxa;PUU-N}Oa1RdrX4D|5A8ZuJ=K zoKJhy`TVN*Mn~F%+h3&RKoN4lha6zO5l;3CY17+no9?uIq8nFo{;Bof{#!$bI%4G9 zj9cCF5U!tNHYUJoSU48-BX<2T4C@;V#TH=|i}Z{(4-Y&&+4AL)}Sx6giIk+C|)- z`PHF;Oas>K*Xz8jM~}ZR+5eq!aX0x1&I-$ug}5(*Q=XDs6IY#Q^8Z=B@s}>jR~OCq zu~#H#*u5eLnNLsmDfOO(&z@lLRp3fqU$OolTfb+*Xf*+Q+yv}#6WG{0VSa|1FhKj9 zRiJGd<+RT-+Goj~+CHOoO=+LI?Y#X+iNSkesmbR#T*;^O67-ORW(&l3&&Ym-d+?8(MoRQ`Y4JOA*gsxyzD3<(B=8j(mMhy$WYmB8>rN%>I* zgcuNTgaB#9&VXn5d+xb6ckbNxoR5${hjS#K zKE-dCGx?kIWM-doQ^q!({)+K-{u`id-|CNC=Pq=eY|-_1TxF+U3FYI}%Z0N>sE^iN zt_riJX4c>3@W}m8iu^0-7&%Tm|ICSY{#&87|5x+OFzZUioqxCWjvo2sZ*mvgu`gS4pob_IOsPV@7(9cSeLs%a1$rI!pJjciP3nPuW4G-IR`dYjE2}YT!n7)XaWoG)Q zn3=}N>$}dOwqj=9Odt1OW*Q@}Ycj_MGxKKpc$d;lW90RfuwBf|o9Sb_Mw$zKzb)47 z{>v$L`{zux&*S;f>YtN`tI6E&7IAMek$crE`EJbps&&7s$onyuo$oOy*Pm`^^{ewe zr}zP}K-28?C!l=trrXO|mk;N>*3T8g`sr5sooDXBdS?-1SW2E8yT4^F;~$j9dVas# zx|8aErZs_Q^M!|p(|<>+*4YkKF^6lRE-!PL{z^J^PCD66W?ub$$2URV4}9%nbs67v zhSN^+Ox@*ns14YDy^YLexd*tIXL4+Z^z!a<=6X42SkF4spEY3wzf+CmIR)f9--PK` zrpERbc8}@G4_VKWU*qq1l=~0P3;la(Dog%Wau0R&^vvgZSyv5HS1rtV;=nfx?giwT z4$ofXX@LE{vb^6sM77`MP+!5e59Ju-$g|fRYx$e_c8ZVVDeKMUbMkQA?p;Y=S;cRh zuX8V?f9sUx;n-`soWaXx9_wD$zQ^rOKe4pxLmz0Z;5#2SnP-twA9FdYaJVX@9SW!C zr!MQgwEL2-k)aVjo?pwpP%rmdhpNHMddYGNwndjWc-hP&{aMa|VcGotG*PLo{QZ&T zpZIv+xr*xGpWoQ3dOHMq<60KalV4^$zt7%tt=?3@hLnGtc!4 z*a!W0E|lIz59vO{wv$$9zpu)3WS)_YTWLK*?oYd<+M%M@i|KOZw;;ZsTfcwvT~x}H zEq;mL)*cQx)Vrh;{ut{uma~RssbTBckKAmP8@AqmbJ=kVJfr4W_^465Z^b#Bm7l}2 z*zfXpX1^LL`z^l*%^b$><3l~tK3023zmww=9mey`Tvf<3ze4&T$Euek%KGhMlqjz6 zb#{A{6Kga0BWry4+Xk)6!!NO;@{#M4umEeRwab#_^d}wTYNjjI+f1hsrt8r?Xj`&Q zwv@tuzPiYI`A7_@e9Z0i81>2f$iV*fjUs07KK}Od%r^a5c^SHu=bnb|#?eF6UVLiA zK)RFo`tY@l9Pqt9hIpT?Jm)de4KEm?>h9$^hLLVEz7*+B80l8v>m^;g;S1qw!`EZ@ z+VOScJ81Yi@pa+5(8phB*KwTe8=q3mm&qrelbS28u$O0Qj@KNnc}AD>islbBpVZu; z*`hgAZ)d4qzEN|$X11nP3I5BaP35yy=ksYE(CpBBSu>?MTIVa)vGg-x>o2#{y`h({)yuEaf8_H$ z&2+wR4O)I(r%$gxsF!E!bdxosny={f_vqyVdihCBr}lT~nJ<{C|jN0iSvy_~Cg zI?whW(d^XRp}9q~PIIp2b($kI&*<`dHGizRUo)<`LvxF!U(>1_|CKZ2usw?K5?;u}$Fet#MEG|`HvC1W1rjZ96U8t^b!WkyzR+6L0u zrK#EdWtp3P1}|em2co7^eHkw!{bmlKTE>(s&0)rWf10(o$voR@tLmdm{K0_QeZKTo zlCr9X`ru|yn9=0J(Rvv?{$4w+y+c+S-ixw*>4qYC^#Z>?%ss%y4e61wGj@=2?qc0y zrumU+eHcodNeGw$XPL|%VQwvIrpnwO$_TR7u1No`D4*rCW{OUisv6Bw>_qW5<)rC@J)~ z26{qhvp(iuvrEm~Z#%hB?Pp14jJwU7De0c8i&4#ZtdX}FzB?4~>j6%!sZ~^~y$!on z`j1>T-t>`6*Ka63*$2DSv^Qqw&#a?SyCmxeM)5vR1ymU+^qFQgdMC^MYIOQ~W{B#L*6p0>jRSnR&#XO?Zi4scG39tMF#ez*I||t#>sub271v z=d7~^7!HKhEsIyKSiEe;bi3p)IOcmRo0_BErW&s``-VFb^}F4lSWxL%yl|2GlS(}( zDVXx4Hd^PkCfzzG#TRqDRz65pnzYgsg{%VG-W>1p zXp5JbkD}qG&EEP(FO!8e6JKciWiWlU&DJs-R)2!Ov3<>rR*L1T*4*ToHq&MA<42U& zds9nGnM_$w&g?V6KzIfZT=255Uh33P-^kgj7AiK%KYnRryi6`C)m1cpuH^RWnWw6~ zb#?(f1=9HgE!9j}&3L)qYPvX)vgi2q&hVW_s$uQ>;&djZKCT)94b~h4Yi?S(dIj?m zE$*-RC3ac$O_bfxz$P|EtXY_9yb%?U+D4fdgZ|N|>LU>*;0maZDVoHZ*T&mFKb5z! zNoFep;Q-f4BKy2{-WvI<;Po=pG7Z_Z-Ye5PtEj4}4mL#sJVHuufxC9At&Doq*!AEF zQ6sZzk@_B0Ba?Gbvl+_EtSiA_{f#r#eTx22Yi7S`MGLv5HCJHhHVU zwHuYxYGD4f^r}GP#`*G2a7W08GAQZdBcHPChZ7>y?RZ0GHf-T zZPJ2tAu_1TS+azxRRev?t-O&yotNoUqy*+DqqpH9S19C-GRQK~0<^$>~y^dP5n0q-WRYBPSy}#B$keO%XuYqoRJE@g2AnhUh$#L>po8}nHmaPsg zH7SrD0vYPQq#;tWslKs<)-2)FtZym_PAi#KVxPu?)^n7ZY9{(;QgC`OSC5%s@HWC+ zk(i#wOfqOIGSa%adT%AJ}grst#0H1u+4{ZRyW$0I)DGQiaCGiXG*QiSrv$0aP=j|-zxdT%Y2I}lv9zu z%BEc^G-OFcFEoYqT^zHfMC4SwlXZ&%>ziw9<-W=GHAuP3oBYi|eGy;FQ_^#{#UI9F z;$5|wbG4y@!@D3NlNE_l6+@QvPocPK8D}%YmixH}9~m#TTEy}DhkWd_F`ZI16F!_BlOTF9$+Y9Cq{T0*=g zTKLFPu3P9BTzZS{8!)<@I{y+sT)V>V8$LMZR$Dva&$aG_SKP*LAEY^t5uX3#6oM&l&v>Q(B;&(l?3x4MN9CvgpJn}=@{MXb0-t`h=B%;l+6wGlJWW0rL zEJYgO&#>&*92SqFN27&fe#)^!3zuSZ(H?l{D6vE6ZrFB=a=0G0!&$xbXZ&vHIf?y- z^zfxqcA8%JA=dU=2M3F`$K-lz#a zUzfP{qu3Vw!ZTPqTD?UK1SZ=xW7#Ffy$p+yM(D-%p?z>aCh5(%cF)_iGxi*5g!f}T zXfvK&;@pp6Dg44WvFsBZUzm<>7tX^*;}_nF6{9_{?H#VqXfw85V%(2m<@kkfV(ZW; zc=s93ZFDOgF1Y+%_8o1;uS*>JKfFh|_=N@U zvtMZ8cuf7C^97DNYnS7MYd>JW@cUp5zd%n$hv1Je`Fqz5ySS5=ItwL6U9=giE^+O= zbf^}QSD24gqJ`tII<#=L_WNKueqHz=?V5QctIzsj-vnSWFKLU{a>K zCQDt9$vJ4oOG_NJ<68O~d4=nXh#NzjvC$GUeF{5@Uzk(OaYvhR&=Md0d2I9>4vTxx z#pq5r=c8Gw6zzsDYTXShugg-)@SCyD5(E8*>p6D#h3{kA(PnJ3#5~8b4*bF=ur9P2 zr)@;bFm00eZyaNyYSOMCMSC%;DWyB6x!hJ$iGN>O;q1IzCyrrkq$5Vta)RSSgx$+*EWa z@mQi^59UWRpPMSZ#n#N}roOhE{)u0$Al`lz#{zA}6HBb|cPg^f0sO*wtJxp487C~U z#gAbr{KCa+X#cky7H>gMLYuL_W~^`<>wBdRHsnRwV3Ovm^Nw^Y+!_=SJO zJZNE=pMHZDc3|RX?mzXbJLxxQ=KoX7{ioW|!k;wrcLHt3(wcF$W(=;x;{E{3CO%d8 zE35!5T(^ny4lQiJO3`MVt;FBH+(J71!jo7CZN}P4%7?f8WfHycBn@v{7_C%YpvJyl4)Tg;-o_1Kg<{s*aW-dMTGprch3r~E;?%T|%r%txgR`{Lw(q}Na zb}@IJ%H68}*1$3MaSlrwSg?&4bG{RqF|iUWyWs))18Iauu)VS^SpHeN91q;uX8%1h z<6g~}Sc#SW3D!ei;qS2&TA24a_Uj$4f$%JrhgN@^rM`m6H7y1&e$c)ST2ecVKDRGtGit+d5j}D9f zf)1j);qeaoDB6r$lz7IQ9;eQI4vRzgQXh04T$G?a(H=OtlfHpA?^t#{^D}V5e+95aQ(<44ZQIR#v?(y;f!(oJr@mcAJ4nv zXdk@)D&EUPx5Kw4lHM_d2Wtay8D{*2#9_QOiTP7VBYYP-fEMOo<4}jtLML_h=o#0jA?Cgwsm6 z7bcBxF{YR^PgsGCMw{^x5?irm3dae*@J?(lTDTeWpj$;xbts=`_)AQ_$MwRzY1ALT zumQ8~ouG3%eE`2OHk1Cv_YvVsv-p04Hscy3-r*grfcOF7S3kyk?Py`;Z0ds+-i3+Z zjA4-2hEtdyzwq00xko??Yv%EuI9k|!EWKUDCj|+h`BIADM9j zW=w&^7R*^m-AFGCVb7t3zrYToGhzi+4a5~le8KZrA8CXaS2)xN-s=}ioWMA=880Al z1T$81jlnN`9jid6;Ec7jE!uql-;52An1Qz|*$>hPM}CraMhm}z?L&9M@83>8Jw=~@ zUteeIPI%}JyREw63!l>Gk(YQ0>>zn3K_4dlst)eLY zcYaGl(8-%b5#HdNS2S&ktH{Xox zhtI8wL;?-#gPWZc&={Fl#EW&aBh__*hN?(OLw$9)DbiFEE#Wn%*;SE-DI2F1IUA}P z>uY$iagDh(igG&VM#IgK=n`Hn(4|fM3#HANXyqekBv9SV3wWFLG9K&wOJ08Qd&2b_ zdGn<<5b0lOu3p?iR(Wx8Szu!z=nTsL^NOk>yw=*ZArLNdHrFqxme+UY71dM)BY`64 z)c$hjPQCC}=1$G1)7+{3J4g1pQ`7Cn!r+5(zU5ZVCH&d36;!E<|PY~&SY`Yl`KuV zljTWIvNGvQ)+Iy9mSk(PE!m#jnT#d(COeb+lU>P!$?jxNvNw4mnM(F0*?1TCG13;X z+*n?$Am)q}$6T?}m^)S;^TaA+zF1u>6l;mK#@b@-v7ND4Y;UYHwm;StI~ePZ^~8E( zCt|5sUrg=F-IceiU{~oAo+m2#JxS(Aa)K7}bdjQ9SNX1%U7fp9yNY*L?q*SumEv$8@3GgB9XRkzcc zHQKSVvb$!fWr- zJTqq~{UW@yzQxKxV!h$`YtA{wlK*Y*?*MUThFv@7f-8VrOO{I6CpR2d(S@4(Dr*noqdABLX5h6jpf;TS0&(llVuZlVMst7JXL|6Q zV5nYmKrkG~e|SGf@5>wCGf=JS`3>)(OYRi6=7roXh-=%kw?jvX&h{*{Sx$$L_6CFO za|p7KXRTfW>9XINEGx&fOVRAsX1$D~QPEY+28#=#*+s+_C@ns zjbBX?S$5dH#;Lmm_Ewl|tocG5De2X+=%ZVbhwERbGDY3{_YmJlN7ueQT--+cW_H_6f<7Ybr*3V=W4~4Vw zrZQt6ynbnV)cDeNVe72cDT3%}x%DY2QV{QN!8V(h8$W(mU;`tp;@&VeIbw`BD~!Dz zQEvR?9f4)Exm|QOXV14OGwwYtu+D8q8&_==*h6jaGwyp^V3*oD#UDnp0g+bYn;!{m zVq|Q@6CY6pj%icTEu``NUJGT*B6~&l3>6J?hg^|v=_ey4`z#KiAB{|p{4oR>S(H(a z6fy*4w6pYye1kIVl(9+A*mnk+u=I<#lQMeFa7v4-fbIeGMN3yPCxjidj4-yvYizp$ zV-h~D{8WF3Eo#k#To~m z5ZIehRmRbq1Xdc|K5_VmXc*aZx;RW$j%vx91cQqj-)oeWPi5t>wr;n;K8T)d9ExX5 z`&mZkn*w{JeY~+Vo}1do7+0d9kGJn&4pIIH$s!1N~wH=z)aTmM$<-t9gLaZWmhw*!}zd#-_TT-ai6Sw zsBPpRN%kBmo<>>P(NK1K>=@&i*9G=|Y<8G-n2LNal!bKMZ8U8FpN{dy>h;3bA3L@d zjFIaF_E)DtE#EvySZ<@iz#<|otj_9d@zbkZhT;d=F9}h`4__77tMQ4()a|HId|Kq2 z$gPR?TNcrfk(E!h>I#9iO*mzI^c8{qkPvU2iD#?C?h$*p35I-Szf<|xshrf_*(NYK zu~$OBS3sWNw}kEwkk7sj?KYr2mFP7-@-jM}tygU7KA;8u3n=atNbI+qL<1JtqFOx5 z-!|iIrR`|KuY0K@p4Apy@cSKUM`UlY&FWGP$;ug73G-X_?G@O<&RyN}(GK&Pd6&>r zc{oHe7&4bE48Z8AJRHVhr2!Z{W#wTrfO6)N#Zm56>Ow zRNBhEke0GH*Jhs~#pS}H4rJEku09tr_zNHQiRx(#fenWufEISxVYuOBMF88JVG@@FtZ%5e{QvM%~g*r&UVH9zIMxN2Kydzu)ij z`L<9+%C1`H4sv?4>*dTnbK_kK$CBcNT(&5(zfjIfuS8h_;a_kczXlk_y2IhFv;^IZvj&G(R~8TN$Jz^r-&eX7QiYQ zsm?ablH{4d-?PKO=MTfIWx2t zTy}Qv+i?%TC8K2}hj2cimpr_f@lpYd);)T5?$S`s@xT0jx$IiIl94XuAk6ndpwn!z z@eP4Z?~$5(3gbtY=m8zGt>l@6v7%#V2C#1i_Io}0_AYL~?f`5wRSU_ZHfX$V56Qm_ z?`Ws6fuWaS7~*>N4ciU4_7;rkq@I0yooO8`@WHE`MX2Pt4c%N3GDFKagRyu6gOGut z4}ih0o_$-%fXm7WO#+X9dZu>n*TCcRKdE|uwWaFqL-k?;nD4<3-`p#;+XYjww52+x zx#VV9X$Dn&RjKPOkCb?>QebcN>e+dD1A0##-5O;49MDMw6Bzq@rFKqhz&uyaMTiMv z&JJL1ht(gGn%em*3^iTDH1DHE7#f+t|~A+X=*Jz%1{0lPD>F?(RH zEQ5ojoVFK66iN>L zx008@+gg>DZ+sX2-?wR2V>7%r@7vM-&2N5xjnSq2>MZkz!YDP%480SYDH#qS&{d~V z`X(?OG|U&?i)xbY@C^;6dZqg1+bLq4h- zB6(yQ^eDp$F?RHgGY$lY&-;#$c6@+xhUyi6_XB~Y_v;k%!CTZjT(%@$+accEKkd2- z%Aa7f`$cxRmuN;Rd10`*l*|P=%;>Ekf4*Ogal%_LEB&Gh2BVE@LUj!WWZd^YWE}T% zu{P_giiGOWK#aC$PchG*BQ(~RyX%xyTQt)v&vY8 z^0huUvKrOE>NU!5qqC|rSf%o{Tr2y!vk@7t4iByq44%s3+|u2oR|9+wpXo9+%~R~1 zjD)0v+tEdQrjf+s?wWu&@7P@EWpxI-RPN!t+IDBxGBSP$IkDH> z$kQy16tG1fqW>8I#-M;rHZ3QNE1KV5mbE3BO&|1vXi8=wS@w?6$dJqSTeGKSPnkAp zN_G7Y(4x;YitsP7=~-RGu}RFE)vwdGcLDJt*QG>dD)bxNH+> z5p~`RZ+mcz8)7ep_YNYz<)7D(@KnMl@L6^Qfm#4K7{E(G;29lQYyr?81nvr8Mv`fo zfJ*g1r7%HgS>OBpX_Zv#N&EOZK=!(9VPnUQm&@41n`{Y(vaN6z{j&G2B>YnC%kN6Z z(~ckhFOq!^&VRz*Q5^X+d&}ObUdq@B**z8#s`6DqXe-4u=f523wFtw`XJq(4&i zM>>COqCo;&nI66s1$Ypf_07R%xGJ=zyn1ryyqgT<2zob#vFX_!ahbrh>>>2%J|sSD z8LfrB#)@m~kWu1Nfvq1hgdUfWwUn|FkY&jkC3*xlGiL}r-pGj$^HA2USYmyglh`7+ zGn7)6wJw1Pxt7>nZ=>t%x#;(=lof@_PxI3XbC|5KFxDs6*8fG+(}@-EwQ>Qv^uDjl z_`WXV`(fN?Wuz_m?&dkYW&d6*K1j$Uf<_>^(pnFNC(_-w3NcN zIv*}EUWU(?G5y=o7gDEHzYOQqZ|VOMgz#H>(L-M)+Q*K|9+N#*_Pt?C`pWO$el`Kz zx~@jEq_{80N@!L*A`WT$Wv^#TJQg;R8Qu36Dg~92!(7Ubxyl7BMrsL=5>`Ca<@1

wox0xI}^=fs5aZ`b~MELW*=?%oPg1Aer6lrw-OjqqTS2G?UQjCXtX z4!B^v2HDa!bH2|m$Z1EN(&m%SvTC8E#-%(*h0DsWB>wnRPCO+mdu3y#>poO+ zlZ-VjWo0QP5L%JFjzU?ff*vIgBW)-dWS{J`491%FQ+ko<#p+VrVfo4-t@X=C z`AUm?U$$8jo+9oZ$~k0@M24XT?RDLIlB}SNY=noJm1VutNi>LynxSO-TuJv&t|5G+ zo%@o^u`im+^R1x1?oyt$3aq!B=kC6m%GjC8^Ts0SBc(fy*LVvr3iJ!V#RmE1j75f$ zj+uEy#aMBTOH~dpdt^Reg!10u%`2+uDJ$({-w+ZUKdIx zp^OkpF}e2ykRXFD*iO4%em*)|3fw_|o04p}5Ab{kf~=&UtPG_!OFw!^k}8Ipv6fDy zmlzC(km4yWrJ0j@6cmDTZ-TRKuNeR~ZA;u}F&O?naIUq{UyvI05kbK{eW2*Whleuw!12-Grl~3y?Ta7=S%K^+*^?2+fE1>Z0eWSw zy$F$06!%cLrGHV{Ds3AIxAY37BQf;(fWs05tFFv0X!)g}Ze5b8Q=cNNUeH8l=m|>N zpa_aI9H5t|>#4K48w}MBm@F`eF3HjvtRuwUGL;WyKEruHv$TqS@?Utg>EY z-B9a(M6K7l4<&=OPNtU|YMq<`2BUNaQb}K3;t(K`#G@bM5~FDMQV4%T+2{L%G)gBm z1v}qoFmomSxW+%FWH-IJPEwkxu;xG5tRXg4Jfz}}3hL}7vX+qG^a@>P?(3_4haEJ} z>NF80gY>pqPGzJ9lvYMBi74DWFw{-y8|r;!2sljAIdqUb!$ZlOP`95LDESs%vgGNF z9IF2o2#yh8mL4{M3d67f04&~WVG3jgY zJSP=E+8oHe1)a8)PTLzqJGX=rG3MbSFj~)q{B{xVJ_Q*aktAP{K6bZ2=Z;w@&V$#3}>Qo59SWUQ1t ze>7(cM|2sq4lsRc%UHBV@>Ih8OY^%hs~5=LMM?P{zf}GX=@{Y_IJXBUb>GM^DbB#m zBcr61l&h>*kB3WHTFUy59O5p<6evf_-u$E*jPu61>3qA!xv35Bk)~hZcw<7)gipve zp$9H~pL5z7CKBIg&3IrIa2mg|r)RANy;M%|%{nxowKx~r;}a?7Qz+D#g5}6~L35AA zY8f&S4AFO|@~KwD*AGhxaxkPhGI^4EZ_UW;?W7*rJ7pcZ`8sSC_$`CTkItB~uA~`9 z!Mdce5;09G5tv}#eG@Z-{bO1NEJ`h1=tiF?XqA{9=8*$9{NN`FY`TQPxm9UWvP z0sKqtA~rx>zI=GABT}(zWKOFDQ6;aUqsTj@2vwSo(OSwO9xOAkJin8A201pqeB`@q zNxATZe5UTj+2SZNaa3jSerGeqVvjkK3Ma~cdw=d}=rrU5UXPZ7f9mSyR78W7yD zY^2y*gJJmvT|Hn});BRM>%E>!kjLwAK}gt#3UZvip0gxs#-jC-=VLgH;1Vz_#6{P@ z*e$^^zG#czZ7J31F6eYyB+qj?UBIdkoo-deZf~I5?zfBrnb)J~RNE1k)~Ny}g{bCp zeUEK4g_V9w3{i1|k`5s&CC`-JobJEa6EhGR?MZLlCj5nVB@=-P{ug`lF}rxPC2O~i zOSWex+6Y}?>|v0tCv^wiw!^wR@3-8uimb^X2zOZWc#H;i?dHy{`*WZjz$(d8zo)|V`bXVy(BT^Zl3zFx1k*ytV4H%QhkfR$DP4c`YvQwkGbejkm8IuCw z*GirR=(S`_7R|)CDB!ka3+dK3b|y29wEcI}QEIRKBXTiyu%@P?7gnnQdvU$#n9#&@)T2`M+;sc}j}oS%o|}#dHP?;w&tT1Na4L(@ zWSajZHysnmboACuM`}~kk^cWO9gqAcLmT2n+Ga4+O^1zQ0S%_35T3gBc5g`7U^+PH zJPFWXI(%!Wm2~To&aDR@fL^}&<$ac0e6NQic|Ge0*XtqsuibL1Zbk}ZpHIhXuq2%z zR#Qu|0`{SL8$zT_P3s4en-tWN7B5oSQ^+L}pJ@n)t3C8u<_|By`hr#hA3h1kCxJU- zM=5!O4>TeR1&q^a8l1-sgs3}@uTnDTJU$y_;I2hIs8ATYj&^Xc=p5PvedhUu**oBe z%?vf?zrDyO&bNQ zaMHlOscE0f8P6?85JM{8jrPh_Ue%YNy9kEJl8?f(n(TYjMxE4e5qttWF)2g37a52U zhbf2smf3)%OlhC~&kpLCBg3>1toF0(hK2~mt?1ovsezNcQug?Z+o95a%ioW3;ub_~ z1@Xcuv0`Cc=9%2>e+rZj(ttqgxImW_fqgSMLz@b?NK>-1S=Uq+SmeSZgr;Z! z`z>E^KM8N@QL~uV8hcv}3UeteVrz#XhHcad^24$w`m1|Qy_OArgixkTcPZ$*v`&RTPIFg!Il)pwfG3rqu=uC za_lb_+C+CbJ6Skr%x}#DjOy)kBZ8$r3cp#4Iz%0Kj1At?Nv#{UXJu@`%tXx3K5buF z)(1`Ko)6NM6W#fPY}$0IL|ftVl1qGF@Q^2(t(e}k{(=q~BHoxT|s0?D*0+v+ExdTyBm+~f*ZpZ*4PI{1OeD_K~~o z%>1h4BL0z9?{)M+T;`ExY}T#O?ypL)b78rg2EWhr%o@1-cQ{EWcE{XEs#e3tb2{oQfiM>3XeaY9lBUeF5$% zF;pA~@a8&fh2D~SOhaA=NU&)0I;`L@nsac39deiv6(^pfbV&3ehm|0Q95#Avhf4$( zh7G!{y}KT?kQbeew)kc`Qx3{|>W#W9?DD?59GEC{xl6gD)S0rM=Q)a8-aF>YzG-Ic z2Jku*z@b{s^%jkwpU?Ca8K@=u{r+7P60Tl{V^LJ^V(4ru8dp|Y`7IBF0PEbLm>4a) zgTVVZa42>gEc=46i8`$CkYio_Z%=3PTCa!HTkxwqLkux1M zLyuiY<%};tA4=kYG1vp9lK(`69u-bPY^EJHOiU9=+K=}IrTvZEbgG8W06l4wr}JL6 zlV14xEXZanv9XxKMVJe4%g(HtgBYKQ)=Ap325u9<&8bMTcVsjK=^hZIyWjFCm%?r9 zPDjfsPzNd!fm&_tG%6oab-o!t#s+t3WVGKhh44>MQQaLdB1t%YR1PZ8?U8+v_OgX% zLU4epF64|*(!#$d8>f{*Yrgf4w37LmMEgVqa>_Az`$jz=+f5$`BAa|iB#S)|@5Vj^ zogF)-#m^87Wp1m#getQZbE5Xx-k@r3g^!MflK}%m7wTo~Xk3xZp&y}3U6CSkZ4(YT zXlI3QwK!7I9vA}-yE4=YXZw)DP92Y;#S;LPX>a8V4)9qi=Br&SMio6cSBMpd4FJVM zL(uC<;%Qp;dOfK;9m&&udD_O)xjbFK)3l^fJY#qoERmkT)ANz;^Z9a|DRT6w3l3I0 zAFOUiEe$a0EU7r9Mnf++T3*P%~rKrf!< zRKCDml8rrmOgGq>n9=%E~FdvtPZ3lnDHq5SLPd*cx!QV1NZpKiN!;6XX_oa~ky`#?)mrf&5-`-#HmNCNaQPkOA-Dc)-F2IHaI3cl^2H~*S4 z-Lhdge22xHQl;hX4i1Rppz)TxEPG3~OXE+^klbS{(Xa4yR7vBH;F*mxuFBA8bocdm zqEN?{_oy={dr7G~sHT!*kJqtohAb^UMcaCvWSnfnwr#N`S2_CL%C_w1yDaXN+=;6w zg@(CZJFLw$k+z4X2H&?va4E5aYnq@-`S`AYlr#%6Y^;*+IvpmmOLum*4beG#s7(h4Ovj4v z0&D>7Kh#cx_`bEaS|huBPlBpl5yDyItWG_tH>CZvnyI5d9-q%IFbVUebV@hJ&A6^9$+6Jaz33-G+0{|y|8lQAb zyRbK*wSSM4q}r(|!JfRuZ3Gu|IN!!Ms;QK1RB0l@!-_}obYQ+in4j*TLmaEXS}bva z73Cst`(vo9_C7KJt_5fi9GbJ1JcMH^ zrZ$^tj(5XF(Ifl_f2V5ytY_H|v}^t@m0qx&O?@CE?p}}`*B+Y$Vnq!D>$EzYI}qm0 zRxV@lVqn`I=-%OG!aq=foz8u*lUHP^+hKfSU`oWHr@tTQ;LfA$$B|t_Ej*Ln%J5c} zWrs4wl3V~EBH(=h%F0)=uSfYJoc_bGbSdR=81ROC^g^?&M5X|>h){C~wQOIQyyP!h z0MGfvp5AY>A4KK1QSE)Ez0?6Z5|+ zt*;)_0K#(Y0_7+w->dPNLSY59Dj+DAGb$#a1Zn946gy+3azOUQmS((jz5v?_544n# zDne@BSRU1xL42QbgDzKj^!x;5&hm_k$#~9N09yzF%04(xzLMdARx*-Bl!OSMfb_S;!mU{I1w}v( z7XeQa0T19@#CX?!dzY#Y5dh;NP#%e(%PEhtoG2*h=CVH7=Kh#-j zt;4nhhJB(URsz^(aKV6A2^hAB2C)r*Z34{eI1CW>hgyWNFCVhG=W;w+CzAY2`N+fR&mvh_%$=tq%-iq?PHL+XDcpdwanI|)n>w^auCiGV-tahu-;xR^ z7FKa4+|DY$<@8({S@~7Yl#w%~@pZpq@cbf;XQd0Rt#g5hj?ZH!axvSqa8zMu1eSO# z=j8`)2%RVpxJhtgE!cvV2PZ0#UKx5zlywIunW@x3G{#vc9}wQx{vi1@_Y9#AE-JD~e<57IzWc?9gJX+xGyKj^dW` zsc;AOt?9q!a+4z{r?AHKTV9$?jt!2Wcy(0oXgYd=Gbg3s7&vo+=0flc)8ni66I+KS zd*?^j6hRLqnPpj#zffnz5uD(b7*4`Xc1y9nu=S&L+d@VPpD1DWo@&RZHmPM<=V}GE z%$?9DnGh~*hKrqb2h9`3G718Jpw_{mqZg<0A!kw3qb$n1;{Q<~E8xgzrN4%2kZk4{@9&p^VYRnXV$o6_7i>v{XCq$N2KF8E_BSu3C2fj&Mu znmkVJmcfFdH0y~vs#u)vgVLOlV5XWqMrMijP*b&0o2c<|&XShazLE1a!vwZxNhk3_ z0lT;)dc<9qNm~8*&dz*V?wTHgaiI{G@+Rhp4Kmi({H#y&vaCT(kk?Rr&1Y&yHJeVD z%Wxo#nP+OeQIGE= zlE6-t#kgtnRU3W_NdOlWUu8YUAv0hKoysywXD{#D%&AmRvSzf)d%H~z?9D=1vaEdQ zxAeW2bW!WdKwdHWR@^1t87b(%KR{~E&pk!f7@hv+da(I*tZ1};8~uKG=6}e_JyxeO zEtL-2(l=QjYTYqY$r_H?zEKS|XY z9;W3W@>Q3k2UCY5n3{rm!d?Yq&x7>xxVuMZ_-$V1730+2pp* z0ER=9C<5ED{40ok?8jxv5z`1a*M@CS4t&JOB$j0EqC@Qg=qNUMd2+;0GdU`DLOE)* zk-35Tjt-Rr(DUrX@?`T|09`btam?35_A`rOoxBlUGI@p;$he>U28y@Hhv4x;J#g9( zp|50FPbRQZZ&a*4e>};}%nc_UOW9WMNZYLM8w~?CKEyPMHKO?Ejy(F_x*CtiO*f<5*~C-KM( zw)v4*_p9GE7LP*;I0k95!wuDry^OL^rwLRi#j$QfB}y-DzMYmDHSO0YA(=$U*6Sm; zry@}jx}C2jl*4pV6Xr9d83jqw`KidUrLs4qHC9oy6-g3~Kva##l6jyR$s&43IF=j) zq^!9x5{25G8Dx7aVG9gOeQ`%l=Nnt8jckTWA%WVH0oL3kENBaJm6JrClusUNs2wAU zs)V!n#8aF98O9RUcfkp+ENesiZ~vqEiR2fac!`};dfd4?4+SmRpKgZEr%l5tzvIWR zo0}w%H<_Ebm6erWvGOa%Zf3yigF)}WiwW$3xgavs{Ln)vXYflHUYUHZ`WUgT8|ErL ztI=FlgSo0(STOL$ET8ZY-C$1`*xQ5P=eKc{M)b7TqksKpSoH*XzCiJe%+{}z|^$?veVSH0&^Mo>YB)U(O01he-ms!6ejTu2L)nz&Nzy-<`n(Rf>fCtP*kT|a z;4paV9Cj&$om?KpPOu2c$1~muWW2*Jvt(&3hrxyC$O~DQRmoCE4*MVg%Vkpm`)(Q) zR26`QvnN+2w|x_^>+NI}JH0B|JezPE?WD?Dtxh(l>%gFyoXT(rYXB_*01~TQF_8lnW$!uy4KrMmQjV z1!zHV+oRB>Jdn!1da9?mY$9v@^swH#yY~TXm_t81kgsgh-Mtp@0r^%v&aC?_8!>Ir zLOxx6E?n%$UVSsye;tR#@AiqN)7+n?Y)^0^VGhy-r~q zb)*+7_Hy!-{cx?xu5bJhO$LYX`%eWXZR!xqLoR_Z1-!x9eyK+*AHNS=$AD|~aW-P@$cRJZ zLF;vt2Fy)o?$fQ(fy)EHU)hDVUCc88Y;^v18p+VM?Fr!ZhG_{ocIIElU1ykN?$zzk zfx*U?xK|fZszZa8=@XI6ZNyL=8Z=GMig4?qSpIVnQZwW<+NV3nvy)C>;&I^=iEdka zuvebD`G4A{j{lr#92EikB;BFgr#Qyk&nFJs0gV4;nC97`^6T8Q$A?0t|7Msbj^T!B zK$CIDYCekI)-U&&)>CQ`um7xXVTT zjeB6a`5B`{c7}kkR&cEj^8=*ZwK91yLdcK=50K{Ln8x+4)#B5Q8Vg8%VHPBR>2dbQ zx*id$`hxG0{m~6}gk`*tX0Ajv8jQvFr*QV=g&0c~T={AoAPZQDp7dG-E76wiexaTC z*v;(I7YYL_)>DXKR*^@9&6I|S4MG}fB^&?Z&>`I1+5-bA8%`tztr`x}(EfFu76#a^0OytCAc;i6^gW$qT?%+c6!SoZNFM;P-k z7Fjo6XDmkO4EVBd4$>)nGR5Td|B2SSH}9^FFsoB?HTY0!Nfy}rP8f8ynKiKQcHD4}1J^8;CO##&n` zq?J7ViGm#0iLCi6_Ww)cq~+WRsbrcm#Q*2`QKXxip9+rD&6U?*RTv0R1QTm4x>Bi7LCG2V!6*0&Q~F4k@RU@_dq?q1(t z{Af6Pas9ae(qi4_f$>t=*Z!8kCqYtX~MvJx0jSDpH( zLzCga%~~f5ebdGcZZNlQJ*=_W+6-gPHxqM8C>jlAYHLScKb*>kU)OA_!817b?m&8F zK(R?jbj8*{c|paVr!*9M^)M;6eS4R3z{v;PRk%i)X}PZ9!^kBK9|ShCwXc@3s@F%F z&oCmV~`FibKe17nayr%R3>#SQs!#pU8l^|@Lja^<+?I&LoO+^8rY=F2g{*MD;vA1 zzxhHyp`p}WKgHcQ&uogZ^qdS0PHj+V9KDVV+8nXe!Rj{M`oBBq{LOj2{+Ic{`nrkv z$kELQfI-#m&mr?MVr3Djj?9Pi2J_*#PIU!3sct@yUP-1LF59|a8pirr++ss!d?OZf z!7lkcrJ=>wk!OA_CJKAxbU6C;NzmcWCAtphBad{r6u30xS1iGhAAzq(o6MKL;Yyjs zZ^c-KV{WdV)}X&Z^jg66x96>P;7pu7FtGBClKBFvpBhFXveU-iQ_zvFny-!a67AEI?&x^90MF@vLdp$B_CG>|6 zki~=E1u+z_MS#+&)VsX7?72hH?#26&%NLqT=z>6c0;tAOIwN!_9@1(o!({K+QWv7% zI11n`S&82oY|u;PQ^HUNT~el;C><1`-=ijHo}BY5?of*$z6G-H7JRZiWu<(`u?A{3 z&?%y_`mVV62c#~yQYrf^9k_8ku1!ZRz||8*TrX!_l zui(_pBZF{Ysh&DHI&}w5{cLIjSyCfeY9kpO?9m_#EX=;cdD}fdxXrEVy;&Fcg4?0a zILPkLqRR&)&tc@|N)?B)%j%8NviImMBdg{6^u@z_q+;|gD^2KW3vhZN>jls+@f(Sf zVTaDKjx;tC9fkXBWMxtVR5%h@6vq!Z<*%o0Ulx6|kNYAz4W0AEDRnT;Kdaph_jD!B zA@=2qCpt&@+(UkUd1XoXE_`O|Py4)jGPY1$%Az$m4~SV{(Q z@%D_4kL(b78WwkQ`o`k+w_==m#StWUI3=|H_#6rC)%mEgWqk%3PC1LO1v1!!#}eE# zaXP#L*|K6;Ur7%W?jgei{Q>3Ugs6V;peyqa39hxlFx(f?h4=8B(m`F&y`BDp$eZ&^ z5MBU)(|?GIP>j9w(7TCUX6QY56z91z)8{mEWzG+qC&_dspw@l6)K za2*JU;$CPgMCa-4JcN_4_&mp73A54MP*O+76O#14PT#(K%A1W7et~2SJ_rbO9aAw_ zYn$gmQtVb@#^c=!M_{9i+v0F%G$twf8W@`^E&NqAE#T(h)0=cE+w|)$;U=ur_sCw8 zMsulR*?d^Z@GH_JMv+?dnQHb5Qh-T zvQBOz4X=0%wWiC4dIo=(7wo^F{#}%18jY!Qk1BjETJ^w zt7VJUm}MykDW7Q$<-Cg=49~G^_?As=6q|iAG1bP4&qMLD_gO;H59WGm2}fCNr*TTx;SAuS`;ypw zC%a5I(Vt|ljp9Vv%fKy`6$o=!Agm*}icC_48CX2N)k9D9g%2fno_j}yyVPsg@-lkU|sVt~jne~PINH`-l19VxzcC;Q`cd}=PDCS*4Xv<1^) z<$3(L43{66T~ZD|f@*pCEy6IhqJj<!`=W3~IU~*Rb3{8(Y>HFpLqlc>-y{%0KB@qivkYe*CEK_>!40>R_}8 zjQH106L9=f#{D;#rlB2+cRVQx$M&_D+tKs+r12gG0tXgpd87GoAoEP4tsft4z@{-e z=J2gLU|CC7kKS>tGJl;kFyoDrS)q$5# z5-4HOjU^=NCHTny)7zo31UPrWI{dd1&h+GUkOFmpKYd+G2ej0gww-wi&tSj=S{nXo zpoD`3|E&(ZgpxoBi*76dA7Fxu<=rvC5@H%l&`T#R<^8u3&h+4Qz>T&o7o@;{yNYR%$93Vbe0-;sFs zspRk?f+zed?|)?-c|B`4@lGZlSnG4%*fk3Bjv)9Uy#J@%|H^v@P#ab8^cGUZ@&Mas z@jMvHFC(gXpt051>z{RTuSpJQzvPy(Md&Q6;5K3bgxh%FAX=_&rd-Qkcoesx^=zsX zAKAU#?oqj()P(kl} z4-$d>Gn?JCfq{UV$qSAyT<-%f9}5(C#9DJ6JLou{GVm3QVwr)IoOz~rf$STGHO8^x z*)B!o_ow4?YFB1uaRhx4R<_6NQYw*MQeA_)%J*>01iDN(Lxjg%w%06~1zw9iW@o1R zNXd7y(lL@)`xXH3XK}1E<(x~|=L9dr4m~lH6OxHB@bf0rAp!0Hj$`ubVw1^$6Q@eP zl{~BUEy$5sYfD9)oysJ$Q+X)bseChKA8zysJzy}5#qU}C&f%AM5J%F}7GRmc0)Z$%k_0({yWJ{!k$fH`>WdnT2z*vew~t7I|^3 z_)-|V=VGonC5*jwF-DB;%1&IgnomIBn$YdhyhZk>?_)i`xy^WgbN=TL%y)Kyy`u$2 z7}eF$obCK3SzOtUY2PG>U*F2Yzr82f+YF_{BH_@M+*K~SctJ+Aq9)33DeoMh#dqo_ zwOgC9b>DUuU$?Lm-zJ*=g5}jKLUp-z(Jo#KW%jS5#GN*F%h%J5d7*mwgKeBg9hHAV ziw$Mpe{B9WMgT*;UPS`tvz59Kk@qLj9+qIVbmaXwb3#p$J*`Ob63EyLC=Z%c@ajNXigEZI% zEXEKh+i#t=Pa8CqJ^aH^F-K&_ez1nbi-LhYRy|PsMqtOPts=s{Km9OBEWR zx9TMEF9VDE(J5YP&2Ig1vbe%Pl7!^Pf%5%JK5^9~HteTLV}jqnOqcD(m#!J;uTw-k z(-Csv03+7lP{)ISF1S2Jc$b~M+~2gAmK@$`VKpyk`)*+|KNlJA_(!Lj65CXXnSgHp zd8~MH0+VXz81Mhvz{+dmjl=O=Upvp(8qfG&dW&y&U}JviVJcfkqw6YqHx7!h6~Ek) z=4(&Ajm|Vpfwj8}of$*V3^dq*2zq!OXNw~xGd@n_rr@{d1=iu$xB;0u-35HP06Hv5 zh;G;OoNhNg4mmU$`<(WIPWv2ai+)Y$^)+8$!KvhB(uE43y+fy+fk$x!%B$6B@fg$t2thJZBte|9Os=dkqgxTdC82hj(*L3z*bO?GcuCWtw=j zKU;MrUhLYQRb2VmxD;2#Zusq|4p06;y2f5%pcn9N!Mg%0hf1#Ygg|B`3N25(Tq%3C@y*!4`ZVK7AQaW7#uQ_Ta9qy(a$Z;2csA`c zw^2v(Q}$>$-|>EZYJvdI<~a7kZP5tP;vU^gR#^%Cdkt~LiO8(0Yw)K>{_JWzi|2qp zXBcn0f?4>_c=t_LNZn^^#I(o4LJF@2)~3KNA}6)itd8q4|=*ypcn8$S$Hob}9pwQY+% z7=XvMZ+@1AgM$QxG_1HXc3eOWeV-&rp@x3A@rC{LS68DMreF)pq(Fwfud#))`t_&cuU znAIez$+22sr~h`e4Ao!jRtYTrpG2u@6>gfn+s+y1kGE&C@vaTZ}9 zz5?O<&7`!~bQe8H_^CL-swEC*$Nq^ib^w0zwJhUjmkq4?pDg3kc;0nwi4ekSuDx%p z#@OHBpCry~{cbbiZb2;y5vCgV{B2NAg$M&9%l}4``RKc}@HG787(tthoVZY7e%E>) zw!-A~34QYMBi`hcEBGci5f|^__V^T>p4U$NWl(p83N~?AnEGv~kSLDLQd=|=(#4G} zDsq|$(*!l4g>cGvx5b$~_q*;x05 zK}~Ka#EP%|q(0D2=qXnGq`uZp7~bKL_W$}q<<# zd^^rgFT@Z#F|v15SmTBawq!VKoLKvYJbL9EB6f9q@j3o`(J-Xi;Li-Q z%ps}OW00V7;CQ8-uFea*1;z{w7(497{SSbb9_2ysXASC8u|l5sU5;j5^UObTl~{L33i%ax1#}fn(~Q zR$+kY(PItRPjyhU;)O-VCoraV#tYLUF}TnYFyr`$=P*79(yo4MQ2QhZS>mE1b#{W# zwYBXiQPX5+$%oZ>H9@di>aeopFy+|6kY)+!ii7G5>gNeUvT1uT%V+7=UX4f;;>Aaf zsC^TKp{DG{tS6(?<&@R-h`KRR7@TtEu+9*DV}F!jc#rx7?|8)CrKBCqNfKO2zFB+v zcQw)`Bqf)EfX<0FcEN+%xvz-{rk|6t@&FofoS@xuSiQq0bTjn`@JA)yj#Ag!P_+M$ zy4xlUH0^^Y9;jbol-jH_vQ{5blRFD`)9A*mkSKK?WpzHJKGj+1Zmc+IP!D$&MwwoM zL(l(wj?Xf=o!X@fa7qrU&Mx4b(U^73qOPQ@RtMFsU4%^239JbMR4-Z7zbI?n0kuik3`+fDe*@K}Na_zKaFJfZ!Mr3vTUbX?UwAOV4lHO#_N%{l zh1yFSsXURYGYJfD-mlI|5_*~98?*XHs_QB1%0Bg2k}$~h?!JbyPPA1claaM-pE@8J zSizLYEs?(q5EAk~O4$hJsUM&au8qgu`Z^{-^XW_q?E-?F!j+Pxb{ z@87GA>;_o|H)f4)qbihT-m7lvh5}FT339d6M5upI*2{a;4k^f*(U?^hq288)tn@wV z(iF&2Um2vjUY5dr>Q^a3jJU5-{VPRC5!>xkle!D}rlAe^mW5{Z;qF3$7+tBp+Fi&p zo!cE`WjP_KwUqVxZZ)!pkYT#NF{`hnPV9j!`)+kHWra3oeb!ptPg$SrQnemv9n(`NHEBB=va(vM7b$DUPBpX_tljLLYJM*vRouN>b@vj+nzRoZ zQBCT_UWEFA8k#C(v|ikp_1a~F>Pi*jBSr>b@HpY3J*64c#i>FUQ*Q?Goso+xm zzWQ;hkSV_LzFy$fooa4xAxSK2zQHNyDOIV5he=a8!aooP7@-;4o9}>m2moaGfi!_Tn7FQr3(AIcjhXexs+e9lUUNy zl`-8aJ?s;S-&+(5!L_&a^;WL3lQy#WIY%r64cSZl`lj}3pt_VDrO3qfnsENH5*tVx zrRwGWLZ*8M;4TG0{(H>MJwJrtLm%9VpMyVG!@u#Rzl~N%A@&K`${8GOuoXCzy~C7{ z3@n(`VM=ZWt?7_{UiLX&lad@Nxu`P2;C3h~*Osn0+!T1hXTkT=dlG%7X{Dh1Yud2p z6g_iG9t7baLH;x)K{%98c|rXIYR&gp9pHnF zfI~T9 zbNo!ztsp=F-XH^=xbQhD=t?8IoPgJV*OJz~87ZiEr21aEU<+x1235aH7rNrg8h2Pz zT?7U9gsWXLgeVIACk6`9%|ekreq8-=px_aG zE!EkX!fx?+3$@Q6;dya*xO!%gP$)iCp`OhWlEtap)WR$bmL)A!UzXq$>$d6e_utjw zU$szE2ZOFhbM@3#pUtpk9k6#`$ITV3VuuQ+kxLc{0KG)ZeB0^BJk^kUow6K zD}iU}z%LWO>G<7)AHjNIbhgD0>j^_E{JP;sFd8{wl3BzVDm3q&hRba#T}lB)nR`gI zEDVYEI}v_s=~RZ`BBUe5%@DQ2j+bia$ zoKZ!qn9w``W>K{X?gFq3OY_rTCE<(Eb9P)EmW=Zq+1>|}oQmf3K4{IwI?kUBk+HWg zWLx_jk&p28x0j_h70o~ao=fm_#^?5#*fv{CJDu2 zr;pWw$%1zr!{z613ltpKfXgPG$`qmtEOG?xmB)!sPMFh|PyQMv-?4o1HQ|yeooepC zP~HEy+G`5_KJ_7{j-MiIiuTGyX$YR@l>c;6ptyq;)@$9X_ zP(j4qJm23YIB*qRZbgqfgk7Q-olE$0-CHvS!*cu<;rA?l>+vJlmOvA^mzL1eQqlE3;iwSa3e8QYAWmpPsrt|T!js}%Zgq7L7$m#ZkBcxL%v`KK zIa}y14qUA6pDp-|%BKc(%p763)%TL#eO~z_(0vY)f2JQtJEr-|)!lQ1PmRyLXsCE< zuJDK`Zs??To`=xT<_dM%JfVyG2OO%Qj&MTz2UkBhuU}zYeIVF-u6NyTU7+`#6WZbb zBsy?C(W0Qg*8@bd@~Dk>ZLIO9AR$+TJRrD4@!PfP%?~2lv}$d|iw_DfqV=Dy$a@G5 zssHAytu_j>QBd-dlxgHGyhe(K zCu^gkg4?Zh(XoydCV1>}lFFmhvBE2urc_o`T2xlsw4^dGos;|bde6*eFVH@p-}8Nb z&-44|yPs$F>pkyz&wFOg%$hYb?^(07TlAt;JRfK(+29x6P|NsRg<6+@2{nv=UZ_20 zI*FUYr^*B71IsjPz|A1mEW?UeUK+oBIkNP|ll+n8TD;RfQEZu|)fL+KD7R^#SD^*l zT>7AKUZ(38hYN3Q{(>VnECao+uacr2rqp_R+M(0;o&US`30h;4t3suqOI;&!;qTzB zE*X(awgU$aP|Hg%2|jyAhWjy`vbr=bfj&E7O|RKYBRp}K+FfQ?twOvT=z~?RTI^J7 zPcd;uNGn9F8Ig-3VR(DADbdU57kr7vh(v36n?ya9=f&%t`4=lRtJ5IyD(F&IS}ts~ z3vPfRyCf|a9}TQY%Y#;w*hD0bH#j%a8#;!Ue;fu4b4i0dj}e`%16{05>_HY!ezmks zqeR!|c&y{ubH(ArgDx8$D?Ra0sL2Vy+>&IQX&3e>6(=VW78EDn>?(>Yq->P56N_;r z$w?^~8Dx~Cn-3)q#o%YJ-5dFRDQ+;dkl~i3QOGPAg-j_hjII|GnUd8A#&uwTy1q=P zTECrPg#zA6cuT}P{KzsaNBprO?WWsb6jCd(xp5!DQwMr|r>;7@?`OEM6@0avASHV6 z{aETXGw{@N-GxEQG0&_N{!NiKwEN3QvoxdR_9o#N2;V|AGyqQwi#KxLdVc##Ek0%% z2|AG2%5>CrllXedGq=C2-OxHgNVvz?`E5%zODn8A4#j+k?^&rW4QcOiEu-A>+g54Q zEyF@^gW$1*zH7*6^WrvD)N0T44&iUE(z;FSkChc1*}YCQ4RKsl9B6#~imoQZwV?`- z;#yfIeD3W4b#$nysAB+4*ltZ;@x%EQk0k{zdI$$PksQW0*dR?eq-zyv;k{OC>9_4` zO-Y!F24Nm_CIz9%%kcE0ZOH$C430<57`%;Qn^xV3z^1}gD#}-DqdMNIRJ=+9Tr9*!a_4p&4yx2@K0;zvrfP~NawGqrgJ zMU3iyR=*)13*71g7 ztsgg+XoDlaK_&|yyO}K-w6c0hBECRDWybF-(YnU&5cw~_AwU>*;C@|%*H~qvq1KL< z4=8J$`K}V}j<`4!)5_{Ik7GjN+CW!TghE{j-@wDJz1Wk1ld%eU%o=TAXP9=8)6X&n zuPTF>X;lJ*d?i19jdmop9|<%S7tQ9BO4D)^H3gGU;4d>wq*4BbB5p9n8lBLhgu+L=vPW@vJrX$PdFeO0tLOxG*t)z~1(Nn=*=T=@6BnLb_}EaE z{}5)5W$>jB8EsiSJp)?{>DaSZiR0QRYpw=UXUE{gedhddZw-1Ad|~hx@#Y+VXua0W z`CfBR(FIm#$LZ;!43%NZ%4U zC40waQPY5VoOYwrv8J7HpaAvl-t)BA%d|28LFuAN$$ASsl!hk}D$+^Yy>aZ0Jj)l; zrPgmbpgqMs@=bhWx*8k5!^N4a?vZ$Cja)(3UgxNx;t+=(V`DUB&zcHrEk3D78AILR z+lp9!8D>r?nQAG%=LXsgY(w6 zce7TMM+Po71Tm?uF;Hs|>PK1+4U55xFn(z5OSDZpO+eJgw6cbdI; zL05b6>?9Ne4pR60b3J_r78_LP(L&p2%Af|@XKd-NIZ5r(T?^63zirx~yq~+D)P}cS znn(@GRQxml>`5(I|9lybc}k1z=`BQiE}4>>UNR-mwXO<^aI|C`n0O4)o{o+<%B*A+ z@%jBvX;H&Y;(Nh3mA4EGysS@m87w`TXVGL3s)H^`yi8vu6k#YrbY3`M%9|gCaEit& zpVGQ=^JXp8iF(D?8Cn?|F zRuoUOviNag^RmG7lO*>ef%Fwl3tK#&yn#*W+}c!%ueFFt3B8Sh0?FTeZ9>Z@a__TR zC+CaelEqcLx=JKs^)3{NtiQG}hF&L)ER{wz-eGQ0)L|{esqCzjpTwp_ou0u+m3Xn& zCU*=w&002$(JHyI*CzLT3vnd3n_4Sr-!Do)TxRV`n#&P$${Me;Wu7M)lst+9716My z?Peb;GI@p5#um?`xC^f>QmY}VuN{|flj)a>7SgP$f0=+FUi!QialZx1WW#+7FE1k% zT13XS7m@Y}UI8U}`b7%9npv7lL(u%3^oX zHf@*Z^KRZ}yLQ9IgV?*U1g7&I2eob; z$`{jvtHI%38&`)Eyph8f^GOG_!H!A*Rz2dr@KgaTqb%&D5%>~e)6DpIPKCF(tsnDrl9cU2}@#r9yvnfvgraQF7jW% zBcR0v&{%^mVAw={?hu}Wt`|sIdI-KZaRmpWdXC`z&lDFHS0a~*5F}G2Si@mIi*);V z<&p;rOym|HuT=7Icz5JKeY~ZTX9n+0A1@a?$}o&!5l#f`MSy3#5L}T#*>gu8^U2-@ zc7|(3wq&38v3p3i1C|kyp4#Y*x^oVN3SosBfAU9gQ>32L!Rdm z>OGh|0_JBPIKjhjd+l zwQ5Q!avVZY8#SJ8O{qsdNW6j79+RK77Odndq|7{5@J6YMyio~$?_)XLky{n-BOlKl zS*=*re%8l|^-n+RN3c{9+|La~pOsvNbnTvHer{JE*X)_&=O&f&R}X6)bw>d|bXZFa z{03uve)X`{+d2CR>S$#(*2`)g?uRX6i+8ZeoNhL?8;uR$$yQUl5mvlUUyp>ux{o*V z{6o}+-RpBhsCt9)C>a{FXW$rP&~Ify6r=1q&pQ(#h>tpy?8t3TW7Cv47-ClEfi4k8 zY(s~$5I*+=9mkRht*%4%qzCb4M#@1*)*%d$>m@Wu@Bm77G)fPh@jx$Tr8(eWhDxpy zuMN7_Cs4E5I~Aeeh3J&g^*Mpy{S!ROd$0_0GK6`aCT2}qHkts=4o$=UX@R*4^AzIZ zh`+iJ7pO=Y*D8{Rx{9QJO_61O5@R+Wx6(&CYgSVgD&cBBS?ee36iEdrEL51!pj^^0 zz$XSQWR8+CS?SpRXQzIAKQ7JT=2QYW6`D%h%P&)LxjB_U6OWQ07CIA(NaF{68vg1nI-d32@qSj8(u1q(?m z6F5w1waK^!Y*inUQbO5v&M{L1onwI=-zsXMWK=@QaNEq1If*56lI+EEhGLb_E?VfH z^GT{wMX^Y^ZDFkTC{WHJXE=RbdIBnE)_o~%(&?#SpF{AO}7%>gF#_B9m&PP zu2#tY;9L{;>tzd|b06P@$sMf zT?(e}@%O(_^2Zp(2I(8!2-*c&QH(HXhg#iMlh4VtPnTVli#xTz>Mn zc6TpzUJN}%TrbMwVqPPjH*($_L{>)?<#{8+=kT05?H1>w#72IEmO}DiiK8h)sW5uF zBbRKT451_}58>T5;~>F{EQlOoHjnwQ*{EzFc^oeBQ36^G8b*5*$?V?B8~KBX=N@H7 z<(!+%|Nem%ue;~)m)O%k3%Q2)3; zMDOZaPkpUePFK+LGSDsqZ<0QT^tq%jOZp~AU$*r1lfE43`x?c7Z0Abf7t)spAC+i24G?a5 zh6+fdSX4dk$kAZnj#^OwDR1N%5oJXoDWE++c{jdm^ zC4`SxDZebEG^@`qODRn}5J;a@#>dHXDuOJkG+4l-LRna8cAsAqmkUo$6>gUbvV4A7 zVrkhvzbvz~9G_p7T3W8ppCjUDrRDh;vgpzZe10lC>X4~_RB2L4fkjVIAFEKX$Z{kM zK}UCetXVa!J&*ka8#TIm8u0vWcv_^Ug3_-9(zUd!*29UDHi5$7lgG#CJB zyn%}qeZ&W#OL_Wd+Ko=}o&|dIxpC;u^WzMQvn$SJ(ZU%$J}^kb0Q74(4_-A&c7?n4 zu4o7t8el3Kf`=M;LWhQ$iu%Hnid|e07x(I>l~(xDS3G8ckq4h~M|V5ILPfjy;$Cbp z3!r%Rkbm>GpKCXvP;dTR8X>js*U#Vjk!kb0{ zLfP~$v_8$1IXeku`NOZL=!-r=Q4wBn_o^jOoUiL!|HSgAz1i|7R!YBAk3<1Rd+6sV zXHVyF+C`Q>#I$Q%%!(hi{PrvU7^W@4t{`kvJMIaOh7G}r+uVv?n2tago^}9@ z!cpGmOW3=oWbxTwYGd@UEdK8=v6Sx_!yCTD=8)l#j6NCBY6X1^++Ca8n|gBw-M%zxrJZC(o zPT~nmHlOngzVmk}>kXRTAWf;R?@|W8Do4n8dBpIU%2d~XM>t)tT%{rMD0lC79eck+ zpS4Q%SqBGxk<=AERz0bX6oUvly%m`h>l=529_@PSDvc5|UH6#jEM~IKde8{d-8j_3 z9auo5XuQJ@TDNQrs<6Ek&NapXbR>HN`zCu+ z`UYldDSfe7sWx*+N?&WbX?Oc$y*Ll8ab+G(x*?o~f&G4wRq`m=T$;On2YVe#TzHae9JC+X20T4Uc%HWfz+ zoduF>$;3*M>lZ%qymqs*0H%C87v>C|sD+6{u50aHIe$3}bI>t4fB6rTi22J|F@JfE z=2O^FVi8tCYAiA7^cn~%ssk1~-lCZvCILA2&g7rP5EHYQJE=`wc4frPMc=+C+zIOm z&1W1#4q__fA)^vKZK)Bg_&O964r8LT1BzjMBcq!JLlZq)F*`^3&c*Ruc-ACPv;sL} zcO9Wm+UANA){c9gcn9keKLj=}d?7WbE1d$B!$th8bQMD9g7?$M6X zVoD>;0I+XeTDG;5t?h4(TKBU%R8eWG(>m`L3ay*V9`xcxe0~H?k zG|crJ?FFSn&SP_-Yn9MQCfHtoVmC9^+eTt%Y>j8`FiKb5&ElDU26=aq5fX#-r$+lT zUj@2!U^Q9bsP(8Pklpw6!Qnu7sW2vvu3QYo8{WnP75T^eNpf)44$l}-_vt0Q<1lzR zyHlhQfsQM2!t^+&pNl=Zf8Z!Sy!T&=Q{e{pU(oO2KAG+UZ~74L!ibDsHKW~u2QSmz z;Jy=Iq)(;`{1n!3QH}$8A{~E~_A+E)3}t~Pym*2h^E|x=E9UOe7>FDehj4Lkm>#Jg z$Rt~J58n8THY41b;m`ky3_k5wOlv|q@HM}}JlWb1)(3Ry+0Gf*>19uP2&V<593rzU zM&w=&`Hz3qp3==7_?+Lg-u-SHC3K4JFd4K%o1g8^u6LwA#qJ0#ee!&(PW;qw+8Z>S zeBy#OG(6ZK(q<$-c|q$E{_k{uw6D^6tKYSreQ7K%Zi7W_GhH+Aq3>MW42H%&%@AjG zI=}CCZL6Ntfd^jH;=*5X`1Msf_@IlLHGziaID`;QMz*)n2AKt*OR?zzo2#<8?FG%s zpSy^ik9MVQPnus{b(&O1BYUH|DF&J4pqr`Nz3!6skfz%@aP5lL#VKbI_o9!$Ze6jJ z4BwH#IhNF4t~h6-EqPYiTapa=m^_E;4}8nZv}>#wN3|12GNR4H`V;DU&`K*hX$h`e zyoT}nD0kp4F$`a}R}I4lUUYq#oOFX6htnRv-c4oDH`RY(13;IKaqYZB4>#<4#4UR) z(=~$EoYeZDZI3Wxk27A{eb3jhZH~c%7xa4grc8H^H*?6*g%No9yII3dO(H!fXm7#8 zvC!==`aC?=eb4=;K;vZXG2&$HwmAN8Q8)}J31=}7#5~NEhwW#_G7`|Y)3?OXCaFYR zR3g}gCoJ*k49*nbZW6aM?{h^<&p3(hoZgh7dCLWKWU@fqZdD-ZuCG#(9>pOpG%=yG zwD6bY&t2tkf9`e;=dWJTqVG7h`Y>H#eFW$50p7+-RP$h>MGJSLMiSl>gMZd0s>&EZ3%R zurvSHA9y_@x+`z^LmSv#J|58Xe??j1*XBB-&x_{GI<`caiJbD8+^-wDX%xXB4Fo<$T6OrgQjteYb6b1Qie2`miNQ}gFX{JL|W=Pi}Y@AxE8{AbC-r4^+!CGXzu+}>*y>* z0y1XmfS*!bm+%~>BLHX$oZ*f;h&~9dx~R99qpYYZBfqH#?>dVEBUiowZ}PuH^SC4` zwJ8xwqdbbrVK|usyQLIAn1J<-k_EDL+%$?R@CHYs$`gq%Hi*-X3aXD6uGKmwig)=F z?-st&ncM%=x{pq~R^$Ta@V z7yXwU8g51Zgjs6-&9XrAu1#{DMU-djkEtTvD>3r#dR2>3BmbZ~{MFSOG)Ddv^hm|W zwM|72pfD+C4pAgNJGC$z|BfXJUqkX4^Uvg|3rWA`7C6*GoH;De`^&ZW%5$BlNyqD# zqNp{;Ix%j$R_Klv+K{7_G#tR-{<7&xuQP)UU=oQaQbmchD zG;_#*7Zi!|9OXG6lE1@fOWxa&5|FlJ!wu0Bjl|Hmj=RlYvJZnU<@>zYGC$SE|LfH( z0l9nFhEDqZdPF+ktLq)~cOy5f*SqV{mJK`ggizZl;0dXZMo2APBe_k}V|43iK3dZ)dJh@9&j%6L#{PJoFZQ?*s{5h@?;0*uJs!b- z3Bo|;tu*PM5y8)B`i=UYj>uuRU=kfKgREXWf@OjD^R^IcOEPcEQl)~U9r$ux?-6wx zsfm>bQt`tu--W&}FTpBT&8T$!%8IZoES3F{B9v*VyfH}csLxl|9o2!~9DvN>)zc{f z`pxp|lNe&mRpz>Ae`i;{){@bcfRRTf1wu zb!yVKNEKXlJH9Ruu@7PH9;o-z|0PrDlXIQeLunaR)ZjNL2yypQseE^bEHZ05SqJGV zqZiElo*=!4o`Iqa(&P2-WRliWtiH6o%*|M~ma5wiliDVmrM6Wnqy5diOE9#oY0p!F z_5LF}4X28M*#WP6qC3Wv0wacfCQ?0u-ur24cc0|?RHr0gC(3#ge>Yf*!n4q_4q#ohZFpL}3^A2j<@=uWiir()c5s^6YG$=|N42EWKVLH6o5b3K9wNC35z9 zSGlo81iB%=dB9gN7P~j%+KZnS&d;{eqxIFvqDHI(gpTHIeO;4|ovIah!({XPM8?uT z2oufIdJAeDqf9KE&|>c*oIBsR;&!%pt}L0)>@tm_Fe;4ZX>6bA%V)4k}7phf(C zCjNYDy+`uD8)|(q9|M`?S%MqMQRsznD>&=>0j@gff^HJ-7AX zkG0Wzw;zDuQgrlIYDUc{uC*KaZ13ogbvV)~V1V^0p55|#4GJ(LlAk>a?)e&qtgnx2H$^W=vdEKhQWIU3@~xIqF_@W&0AFU)f6{`%R<2eEeBZ#gNetJH*H<;#v5oH1o0WrI zO#zMGgRb`-L)KHI1t0lm5Pb~Uxs$S!;Wn(J^Z0OmL_qRC8J{1n-!{125$rO=2gshI zYBv?7qjjXn&(%3x;&XMGu0i-za35lOXbV*#Qu9W>*PEXX*KctQMu4wD+y)x=+;z5J z-GZe(ic2-(=}Jx9T_ike`_;<>_%xF~E8RMgG{Cf)HUI@KK5$1~Z)88Z4F46{d;1G~ zI_OeZic!UtP)}|Brf?o<)_d{N4tnnq_LqrChyL2rUBA%EU&#bCy(<~6x9cFEI`F5= zlAAJ1`r(@eSQk8$k~GVlNqbeq@9mjjxF^1W3Uy_50lxKSl6gu_fw`!c3Qs+Z^}3RNg-$a=Rg*;v`{8MiftQoT z)3raNq%R8APHpK-8`O2e&bStY4$1CVWLJ#CA;RQt;|y5*?+1y)v}Rh}L9>akT~>b25aFggR;T{zsYqo>bTm_WEXA}Y$<*%ld2!b=k< z@G&%v%&9?yfu}P|dZZ*x!!@FcMunuCQZC@?y-$(_Iiuvpv$2Jbm{~IQ=qNUy2tEv0}WvLj%}Z$<8`KaQTi&q-HrVBC_Pr+AH`pb!6^Cd z9y~c(Uln+B4?F-Tb=6mE@%-Da7zy5wNhS$QiYI|gUgp#Sd7@P>($~iEx2$;P4(cQ& zY;hzphEI;wV|Y}IK1a{#&R4}iqD@CB5!jt1MsRBvJ&;e2)gRS|cjMpUY$W}c2q|$k zR;2P|R&+PhtvI2aY{dyU)HS)P-IL3rBYN>})X?9g)irr;n(e+R^4K)r;bu71Y{lLX z&sD~)@%rl>{$`ao%Ax~!zXGNb&x?QZ@8b0yk;x(eOIE1-rWZ%KEm413|N9O6NTS|F z-}ztuW1_w>Qa*AG%kM+NW(PTE zJRVM$ya=Q3u)gG9Cj9ve5w`%R@$}rxQ^sR@bL3h6$ap=v%e&7q^tfgmrDkdu^p^0n z3)=@g6twnP{`z=5zQf91SG|jtIHm0OwhaPlQ#3S6ZBj4 z^|p+IF%OW>V=*G=yFgL4*)y^u+ZUYjU`dUJO&1eX{_gMD&R)^p+m%$;IMRP@qAC> zXK_P#zNf#N!9$$(l-jcpEegdk7@j9csB^NH$7SoWmWRQ|R!gc_Ec_;TMp49X>|Q=O zTTdRh?rFRca78l}bw~BWL_V@6=wYy^u2G`3w7lmIdlBGC5E;vFQ{_e2na`xaYAKLS z0vZHrf(Ad$f63M@+84ad-FlDUKnmH-2i>jTu2=oS7vHTvtaS?nxF)Cp6kT0S(2t6$LV+;HoNBt#Z@_cZ1maKy#{B%r zuCQk|AfO~8(CXu&XAgVS$3)D-QOF|0{fxZRnT2RhchO3_9WBWlIV}>=U2j~5+JWMQ zbb8MWBS3HDosqoHB((blk$n0jtde9#@sde;*B+Chk6zMmIx*3);(jx-7&Kl4-~~B% z(BLTk{v^z~ezNd06es?#;>7sl;5-Qt$8F*1_v$^Z_gg3(+yi?<&a}m5N4dd8zH#mj z8gAiF-m8z$7p~x6+^ctu`eFs7FD+(9I}Vo3<)=|m^qezKkre(RHrQPB-VLHB%; zJ0|P1^}|o_os;!j2VQ*|{r8GmEu|#rH;`vrrMQBA1hMj`MN!LsV5UYBPNiy>!xprK zR3N0@f11Zm(fe68L&)^j_Yn){*91j8Arq(x3V(vzu`}M)jGwUb@4%Bj7szWpXki58SChxCecE->E(IzKs1k)S; z33}S2M=Q1c;04_Yp6m!}`l8od5fbS6UbNZ}9CMC7;^@HY!9nHFW_qd`=d}dACgj~g zTLi+25hB_~jKkf;-6;r-uRi~KSBIUMZE>RQT*G*e>d?TikkMuFBWu;_#Z3&r92g`H@;f@ ze<=8C1wCI2+ijnwPt&4ljldiKk@){u{C_I`9}4GhPuF`nciv7~> zXT!!zc4p^oO;;$CJVjUMUQuh%cx<{uS#GqFU{4XoF-|$#HCUzI&;A9w^1eTgdbrA6t-Q3sWKDg z_>^DXPBKM4ze|)U8c{v5CxdODAbU5Q?2?&>05XYYINf2x5SCnJg_zih?0N4e@w7yr zg{Dju16kS}aV83aeim(6IJ4^;WqqaFB7ss0^ku-krk4B{P+}^>;v!D_{W%D?o;l%X zBvFI|@IzS{(N*DIo;F(_tmpm2m(NCbKk5qKKU?pq2mHj(&xX~Z)fFB#N007wY8BmK z;saQ&uqyI+_c>g9`D=4@YuApr(4(HDGxF0v_MwbErV>;PsY5{zt>R1P=$%cc+=zqi z6nA3G`?Z_VdD>mKV(53&bI^myz{F}tP0)JC(2ruTck}aebc<<%A>Q38-arv=LQ}j> zhIrn!yN}P+`{UcB0cL0AeB3hlJ(er+8HI_8?@*ZWpbQ_NFh*gB!V5~SUf}_Sxk}#? z%D-6QREcu?5oI8FmQ3KZim*oc_bLBarI8|NIf0f`||Q89qqKWf9W#7b^xDfzTDF z1bvFbTlh2PN;zNna^){qn4@r&%D`sjAF8lHVNaka|E`J=q3{!hwF-k3zNK)T!Y5RQ zUr~5W;g1S^8SJV2-4u3IIAV@2>wl_ZTvRzesjybzE`^DzByyDh6_pXD;`*41#>pLj7M(x-R-9d1ZT(W7+p;(7O{%vhMe z;K3!s7R{VFV*%e%qDS+#>-1-LSFY0w19#_b&?^FV=RKo0=({tY)o;;u$2_lRYW(d@ zderXM%WyQOHjwu!*Q5Bk7xX`Pc)9*7cU0&xyCW*}d$rvS+w{aBaf6l(kQY+eLtdEe zMu~QXbv>oO@sey+=3dgzE=zx9Z|QHaDY=0Xt+z_FrARcVN^G!5tQ?^^MdPzMBqK** zl1TnfKKNcrWkq{2=N^XqF(Ph&F#X^=`kpF+ZO^tm|o zBPs_GX{<`W|4GjF|3Z#R=5n&mvgOnGuN9JD6$w(R`2AJFBv$~r|1Ex>R|aUmw+ZJ^U8M^2rgN8*26oV*Ji759MRu(0e$uQTJB(=|>8wgfO3Ab@a}d z`jKL@LC&anvq6qxY=_^b;u+eA zI1F+WFB^UhBhEvT6QLf}5lu>1$F*2_=H>`%&p-!QV22q_^eO zU(PmW5b67Ru$T#cg_OgpW`ch$SMV2dSc~`NPxOdM@aHH_(*@J@@0sWqr24Ld-`Y|T z{w#wWWiSW+|LuyY>KgDROP`#to%u?}*ENW4bh|;&mqSV@g+7I;epK|i*OBunG|JVX zMC%p9r;juh_)FR;il!KhmYoU-wyY8xK7Ev-on!qOB)NL{MU_zb`vhAiOi?!7 zd2PbilVfB4cR7leo7r5B7wpt6&YV%rxqsDH=S}jrq`%5F6;BA{s)6+TD>*6#J31ea zoGt&)f((MiLfv1>QNooiGDuM#iGOtxAr&Wf{;-Thyy+Mx>qCYOVhroE4BJZCb_8y3 z7RVU2oCe76LD(Kc7(e=!J})3o7c;7e(7NB`$f+JkbF4_-@NYeo=e?x|VoByc@`bg@ z!*$ru57_j_W21-_IH(MM->=d@6R!aBz8{&6iMGQP+xW;mdS_C{`r{@usI$QD`z07? z;x+z-I@X`mMYr9A@B+laWDeQe__iK+d#ORicF-+Vl3I#J)l5HQE(?^}YJ^`#T+IFG zstcjFlX-wYn>Nr&_v5jVok`pmXjBIk2&P;GgbdWV^Buio2ZNRbR8R3h9p8_8OOFXH zct~e?2*n&na)xdP#|#;a>19?8|2c?9aa;cC6<_FVYk>v_I2eSh;u&3RP)12!uNWvB z#vf(G_cLCfIZ@O?ngik4h=bXXPpk0dIPbg@Q_)uChPJ8Qc0{`~vnZYCb?D@vF3wb`c4 zd;qs1{2}BYJ{Di18Jsy8*!E_;dqf(QM&9`3WHB(PYQ~g; z)!^CCF!~nA%rxGI%pf7@k1L?eoXpYLN#e~SnYaPOtGXYo@#?@+^-Xe##G|#f z3p8g^k}`b$y;bJP@8{3e=slv#dNXzm&Y{Bne*R^R-f@7nFJnF7idA@X zKk_mi1K-48=I8SswfX=jm0cz7l2~&f?)w53a$*T9L~B@r+CL3lnkkr>281*Q*9GPU z*z~N}Ae{x_nsV&ezzb)^6e~6?kcFKNV?hT(1@AHNZNz6(mSHV;XlMSoP8QWYn6-Vq zRYOo-U`{}mZlgv~c9YKN9;C`IQ;ng&0}TB30Eyo&I+#T#hOyAGA$hHGg0sxHkOIV` zuu2uDS~Vyfu0UB~4M1EgYgB&r9Xd4{C3W~tV>E2;4&0M26F3Q^))q8Uj+EkoC`UFL zNO9%^QKl>^B9KKShOpqVdLCsW3z?uF&y;$i`UJBWZBk&Xx}fTS96d{vcsG>zEg|d{ z=Y{OR_|ta>cFeygFeGtmVC85$ZpNU9FvF!=sJWk-RdyhQJcB%IA>Xb?DC>bY`ePGu z3p^3p*rp+*F1Q-ihD0f23958Df~Mb{(=gDa;(7rD{q_RMzjeCwzXK%y>ocVPeIWTY zQy?=<4`ZeUZSzEXp!PizR|IFnoL}(9w-x>#;0+}nR8Y9lk2ubzNKwPWSpWRhtk3jS z?54z(ELwYvHC+SlZfX|{SMUugkTPLMRnad4NK`hE3Vu2eA`Ay+u!LDs);wEQ;lVj_ zf6S-zWL26ULWz>_%M183NA&2?R7~{=vi@F`{U*!Kmr9~~g|L|E?U-e}8TD;qA+HDK zQKl*(KNdPDQ#k6Hg+B}re*2d2XO3b~gJgQib6oExbW|gp ze#zUU%3_ozJ%YuI z@4zhRc|(5@_xrfdcAP!})4jG}}Nrj)jE%~>bq zuPl*KK>F35^2MZx=8KctrUJ?Z<`(_(uWhw+l!Q%_GV`=m4f5@Q*sc zx>Go7sFhb|J}kT}DokU6Q_+yoh_?6O8&Bxnoaa(>)*8Vyc~1c5;7XE!b~xW}gW{3@ z)O(Vf51s}xs*TRt43I;H?I=+(#u*>NOo^>9)=|&2>A|cK@#wqBNwAT`BA)UGfWc>f zj5l5C5V+tdV{?^&(XY|K7=tb(q#lyN%=$j|{lJwe`DT7GunJ~*$1n&y?oYItpJXtI zt3#3^Mi) zqWug}gt%H0az{nTcYcD6skV<8TMAdHl5XzT(KQ4!KaW56iQb7veyaE5p*6a7P~jJh z9fq@~QO41VF0{cj3>A;TR`W1s@bkZYs&^e=J%v67Zj*@1SaUy~g{RR9QN?~fe-H6S zRDQ|WRXC4|XY^}mEdBy9D9CAG>;*WhLl&&x&->Qv9s9Dcv4a#YLxnf@LlM&V z^Z0^#Oan?G|0P_hisSe5SL^ZBt*`6#PEOkojE#XiO+un}!iE1?@EVC1g6Hc2Gb6tT zE+k#aooFrmK^`n56Ax{g!j1lBaS%@FH0gyOX}3~|^T$5ZI|kE@S=fU=^O-*2_7hNQ zhmh}At!~0w&dFXR4@h0t&)^3@$ao_{2kEGy-Y>B*n_ zvy?3ZlK#F3YpFlmpx<^K{go~B+YS0RHA?+;K+-?|I{Ixr_|KnXxYr1ouD_rnppOdJ z=ofcMegOIfx{uKrwGU$Lr-tW+<%DLnwV^x=ogM3oiSZ?wUKDJ>Z7|nqvmjUu4IwHk zK0lO|UdFHlE*C4-NN;#EM$3rQONthQh_)-}^Wn;5M1!9XI;oG0F8mK;|AJ!~@(`|I@OcGPS~oAqNj(m=<#8gorbdL zCptE$4mnFKm7ox)2QQ4nEEmo`8ai7Cv)1`|3L=cA=*_~AUNb)xB$iuB<1vhaE2bq5 z@SH*DOo)N+(X+Lr6cL;IaZxQJLdK}5_3{y4W3-K} zIBaklYDdNQ`}t#EqkqEA9QF}hy$HuOqDeO6wGCu#sU}G$(LzSX;$sBiG}mYFa6LF0e_4Bx_h9$})`qa87SGlnQ85Q|4r%zT)^-p7iep z((lQ|vNB!;Qs#KV4|>c{nmFY_!wZlgZk?)6(lX&;DY*?uzoAQ{hS5Nh-T0#()0GrB zpg@OA_}^qe+9NW;-H-5Ff5aH85dMvD9@x%fp~&y&i+{u&NS9$%m(ieV^*11CiSx)t zcngqzzdt7NkwQ7*n7W+5hcv{T{Bkd9u8jS6xy;A(A}Q-E;%(0AU2mmy>L4?0r3_yW zq~FKN-*=Vt-vy)#+@}2XtN7Hj`XDi1-;Vec=DV4wpgOIU@-y5r{wW|)mw_a!#e`xZ zt376{?J&VKvzRB)fj30dwXZhkwTo?o^+Pi=k8M8rro0xaw6bvRP~#L0n=d@*n)W^sfO@2HJIN%esw^W?d64ti9HrwQX$O(5jjifaN?dTP2hu6KYE5 z-ZB~gt}Ub;x5Y&eM7DcD+#TI0mo`NH@C7M&_g0yqg+QX7-|CkOf!rPCQf?-Yej9*9 zy;jb9{-jo38hYxie}$BD0qM7`f=~TP@0LdCRUw=%D$kHv@(V%y&D$p9z6+$^w?Lx8 z{~`TD{(%*eJN0*wB4);7#KKAd&N*fMNv>&8|MSfb9;b4a;eU00z;CQB`= z^kqp$ydo3p3k1cTNN5~IvZzR&G3i+Q%1G}>9=Hu%;{4=lBs@8-CA!*{@6EA zFhA?D{pg;~!f3dg880v*kc2mg18K3u10;W^ccjug-qCpH^LkfjpHS8ZlDG z+t|c#7WqWGMpHwWSfo#YKKf0qmhoQ)((hg6Z+BxTyAkob;~u#I_d|y#OpRd;ZL3@7 zg^1FrRq>zPE8~9)q@QJ<^tYW1f_?@2G`{e>9(w~_MHynf4rZJEQflM@DKz7N#s@a) zvAHBv0HLwqw>l_=2GmHQOdwrqRD5gJNsD1^8(P=NyN2vCa+$3f%Ej#OLTA=}!Gc$lkRfu;QG8h)&KS8l4>ND0j3I96`x<*4&KS2j4+GvN8SHd2 z=DPBj#y*5IMsCi-LWV)+dV{fAl!~WLmPp74vfgY!sy8tW7^~;R#=wqnx-IL#5C5jO z?QBQzBXCyiL8NLj`1$$Y^vE;=0|K;k(N-*j82OY^>p(0|bi=MZ+=t|U@Px+xfZL9H z56I8{uE%!p1$O=fa~im6kH&Afs7H131r&h!Jlr|N0`f5z^~f>4z%vMl_*7$k&d5t^ zO~yftM@?9fF^z4TM{?9GZm!qZt)FS^ZpabNhb#2^`5PDYC2h?or4n2_l1m1e&5fxm&Yhqe;qkr!ke0>#UFWFZ{4;EOqCIA7je?g z8~Pb_78rC^VJ7eU-II&Thlz!3_`csx@MsT5_EbM_(=2SQ0dKp3H+nYqEP-b;=*yUg zw}x_{u*|@mI}d$6c%{?)3fF;G{R?_u1FsT12Y5*a-iP4jf>&tZ{R&Q6n6hhEjY@}XDt z-hAm*-NGxc>QC{*#>#YEE z-aP^W*0r6%@PbzUgNx?QWxL-92$-q!A;AGDJSsS#8+Qf=jN3h^RlrdVbJc=?>LQGZsa*1D{uP8So5TBgp{YkHe%8w11D0F7_k+8IK7j_74$u=m8TD zXu@bX2WUd8;#+$-u|xvTy$G-a{|+}Dw0~ogE;cOv0t;Co_=K0>HiPzWKGMaerTAXh zR|7s_Z@8172?xOa7qovf5^S2_mG*`K!HL~LL~Mjh0`1?Cq>G(O5q+^K1AM}0xB}4r z-AB6Et+WOFQt%01fZGn5@I|;P(3QY_aQ7k;b--^F-3W9p>L-i35cn}%9U?RUAM1}R z0BzjHMBACB*irY`mXrfr3`bj+2z}d?2xp{XBLMh>fg_MGXu`5I%$z_I9(ABZhv3;< z2VzvZ%tRJ2Via->Av^FA9Mw0=z-MCNh^_`coGIx-;7g+!vty%=agz^i{>dDN(gL4w zHryuAgbz+d34taon~M6UfHHV~pMm9Y(9JgC%=B-zp-ndrKL9f?HsTQW%SBrOP5262 zKIlr|2Mb_=2HgN`w+N+$ttD3AsU@gl&<(&&g(wN)J3*|0qdh5vhu~;03gPx;xFbLl zR>EBXP52&MAa-^TUV@7NO&Gfz4G**p_~%NL3TU>gGRfA9XGn}&;9^(!V z<5m&cLh?5^#>$W(Tn={`G~s5r5W6n$FlY;C!r$O*pxHYAh7j5q^8N<21MmsYz%2#c zxB>V7?oF@{!{gu7fhC2faRtx9t_D6~)N@EI8N~(cwgriSwgD%XihRz50Yb9fL(EiO6P7LN=V{9v2F#-sGcpX;0h|ougW&2Q<0N7X z+-Z2SfZO4y>opp~?RAXZ4|f4Fgp1%pMxbv1`i$a)@55QaC;SR75wy_=PR8**a4Fyu z-thtIKN}vSRh+EjZn#|V37>~s3c3u~>q8VJXd59MWwaF75Ibq2O#twi_ z=z*&TP53kUK{uYj{r}C!=%2|0yzx^s7Fexqz(Jp3A2w(^@b7Tqxd1HvTxNohEa;RW zqa~fJ?dDVHClHsg6WmR;v3EB zWMSq+)NZe+?^Npxm&_=s6*`uHRmEmcn3)~9Y3fgu7SNS_K1lmjpM~_-U>qVIu;TSmL zX8{)~y3h&YARINWI$-Zh=%Gj{FcXeicn)wKTo(9vW1JmPe0G_!NH|hzG+>h?#_TUNUuo7j+ouGSUCD04(vfhBX`aaTc(mEhhXB zsswHc#m+*y<$yE8C5=-A*af&O$Y5U?I|P>p8m9@c!R=&RoD{%H;6w)nT+#uyPAWNI zb|=_vA&*l7Shy1=@lEjf&EdK*lOF>A0QiKb;Oar+Gy!%x8YUN_fsW2ts~U|=1Gg*M zjPLN4#lqYLKH&r1G%<|J11^q(@fUofDcni+^RM6>2q0V;k3FlP30-hYK^x86Msqls z$t^vQAY=$r;C6y0Tm*LjG~pv~^`LPe1nYhyE&wzRgJ3ua!pY1qVf)S72p0luupBsG-Qlsa59hY8Gtn&@Ck1jh*APgxC*Wc^s0f_x_(*( zGy-q96}dtHj+$UNW`cP@6Pi*r+<(X@&}Tn4+Q5zWak7&KjnLS=kSDwWE)O)}X1GGo zg#Uyqb;9E}j_bl`-rj*?1fQ@sTrFs$ahwe2H>D%v;1iPZ`!Z<15ggSA-uG~Euvrs+ z4TE?RXrm#V4CKz=GSDIsKsX~4IR%{uj2w-eg2qV@EMbgn$v6#y_0B>uf{zm)*jI4W z`QXq8SlTtw=i}f9HgPQWd5{b+dz@;3K%Wtv@Z||;6L+HjC!*tBC|(%Ft-xY9dU%uq z_f5nt2fp7ht~WE9lkwbiHxh>o;Z1Nkpb4|!@<0;~y$2Tq+7A2-jykah;Hb%{e{tVW z*4W2zA^=zmGx<}9PzRg`i+3aFJYd%xnK>(P?)_*Z;n0Sg|1BG7&_E2hxk znIDBb_=HI?qnhy!wH>%(xrWk2DFXjdgsTT#3GB5R%hmLrHE?|i>R%K!h(#{k%M+0R zFmfHP473$^pcIio*8%zC^6JZg-)%-~2EP$_`8ip~WzTEup%+k<+0X+#xE1vex(?V_ zj*bpA;ob_=J@G4W|Cjy)xxO1Y1uoc*D+OH$ocbc}QP4(H8=2tVe+j+bJs4yH!(K+F zL7RaEub`oVE(Feg19cBN4>;=`4Rs9~xbt0H*d#pSf!|i6-AqFN-w0ykJ~TW8lmVYS zfZP!c{HO*A+>6En^qGMO-#v^3!6!Tgx0YmpK0^@U3ApXx6MEpPK@(nrI{}&c&_?488G7!AL1rlUgp1(DgC;D3%LVN>*uZ!O zp8FUs0DLDATi{CJAtd8WIcTGih73B}j-&X%CwvR;C}_gNa1EgAfW7K8F_5tV-EiXR z2P7j)BV>%m7P8j7`2p%bVTw)!`w-3nny~waXgHt=SHl&6E(7j`D+PT3$Uc$@gaDJ_ zs3S@Nx)r|^m^1FALEt=9SBT-qs%3I?8NP-JmY~|6oVmc9lh-cs^ zvCY6bIEqZT<#RlOAyW>F`~vj}n#?v6;YfzeD7zI+W|}bAgNTn~u-HsEy1jFW2S;NC zLY%oJ=mS9KqhF#(p>#9w`>#;?kzgk+N=K4G6T$gmvaQbTTPwF#ZLQu~yOpxg(3FXgaD=TuN+3)sDIy^*fNYJo%1pLD{CV>axZ%%NEC$f-T#(py(R@A9_HKO2N#!D+;DwQBZN#_<|W%UNYtQyuAGiovKHE z<7(}@@QOQ9AEW*IJND=Igw7eayVdpb+Xt%awA+tX*DG)D!}ZO}?!QB>x3*q+dk)vW z3$MImz@1mzae3!u>iOlJx8Bi@^2Uer zb4ujaHMGO+`nfbvo$YFuYe}}tJXD@I|7x>cPg36g7q_c6Rkn|7wJw7s}K0T)ah#Hn$I=kpAWJ*UW2(7 z9>4<+6%FLty%${b@wFOIjghgsh60z}yJ4yC!e3YTC-`Yj5M9Z08B4+5aL3P_HS=N~ zDcpfWnIl)xzTl=zzY;*fr3;)Yxa0PM8_CoEU;H!UdHp?Z*Y@*2@8ep_KYmY@-=Uu0 ztL68Ip5Mm~=J(Hge#hkZP=537hjQ<6p6U8>UWE)+$i4hCW1+%aGd7{n6N*(7=7;!M zF#ep0X7rO1)B47YM+zrBD^rv7o>}O2nbv0exnwdKC|zyFB83GZD`8r%nO1JnJ3t;s zSC(9O!FlP*|Eu~xK2q2Jn|;@Rn>^mTe&2YyT3}jROl!FrAD_#ZA3O%g*Z}F`erD`U zicI?Vw*+J7f8-Sd*<(y*`bn9Ys3 zIhjGzYV#*4i>@roXe5QdEHi%R5`l_a)l$>NmDx1CPN8z%A(g7~sJif^B0g7cD`N8w z330Vg$x|=ZaG&(fJzS8qMj@L1-o;O+uX0JX$9(mduAXXI<2`2atLB_tB7bM*hb(`$ z8C`x%u(-{P%We0tYYq)sZT5*D_i>drn=!W;8wJIV^605ddb5~=bPK8a&(0T`i0<;t z{JFH0T>hgC93c(7x}BLzjMDRx7?p2;j#kE^C?@IsvqF4@Fo|Z)1@mmG+u?fHYPAIboL?^us>Wz3#!%;#g?$%K^xLQYDC6kc!PT-lDh&D)H_-9%< zZwy3}#YROKYK<=11f>0?%wc159eC4CyVubvf4s6!(wiKry3{IkZJL%0QaDk_D1$$Q z2Vb@y27i#6X>}*P6M1i5bL4=Yri3pPQwW8`uCLLYA|;T@V!)mGi$2?+`?e**{AG#E zNhT}vqut5KP}8#HVbZ%~nA_FZeC>dx@=#9`XjIdcrU&U?{yAElZ3#=iV)8PiXUuz! zo6?=IeO>=>V{=7(baupDVYMc`{Zz$9QhpaLAW_(PoX^o@Kvm(LauDU`%7YL~J zrzS4JST4+KjbR|UTqg06BPtPAWdw;)%G@zyV|jFbVVTg6FGU30GK7R%&0rc5 z-sVLGtcWE&o0{I}%k)CfhyqfB3|VKFAuDSA)^^4!1GT;h4)0Uv@khVOs@uLNmg!0AGG1c+s(!|Z#=tk^^HYl`P)HjwP}Pp zLe?rfKPf6}%%qU}YqR+6V~ZG+&%Ah*nUy6kpyU+>q`Dy?YlTuKL6KD5!U4sbO-tG> z{?h3hQbKhh-Nhc_bmRq7_uq`qk6FtwpshWg{aWULRf1H@o6J~GRU zWf@QR*=}N6rv_2CpOS{#PogZvzQVQz4!=6w3)?zOtGHL#HhL3W0t(b*C%wgt7jbJm zz1e8|5KZ;Gu;}(Txnm5c2)?+Up>$;680N=$?JJ0}!fKW`=i$EcAbg5xRh?m$zdQ>Q z%1@sIra#=E7&X-ZnCGD5{0LRR3EaQX7EF>-_SDfK*jOt^S?h(QFC&1c4k47S_973y+^ z|24Gg48Ji8gek14@iSawu4)Kd#G*c{#W6{YxecoQ<&*`x&Egg_+N|uJtEVNsg;dy- zY8$LslR;}JjN_tYhS{zQ2g#fCF8C0@`Kj(1wrfm$-S5v`cBzf zLbd3BMYvEky6j?)>3&tFy}$8vme$Iq#3A2a0pvH@qoAZh4qLuUA z=Z>_fD6E&EZ(*=?{Ysm+R|#iYXZuX6kC^?#xSxdZ2)Ubs<;}A^N$-9{5_}8Fti<-? z1TDMfIbZ7EOR)GT8X+GJsXzBjA@1FC@gXI+rsE~7cQ0o%~{#N zp^Il9;O_CQg>3hDK4s76ihoKnJh&~`{LhF+#~pO$C~^XVfq5BKYYxw<`&r0}>Up_M z(Ohh>BJP-{q8Pz4sZ?X|?)WUt43m_h1V0$_8mXe;+N)fVoA&S==sFb3 zY@;h6Kv5Awdy|CX%97qAh^xRkgQ7P6eAMm}Ipf6Uj?=U!C89{6%;N4|^i)O!#G$8; zAyc!wyYA(N>#uSdw>Nk6S%O1P^+G$|H#PR^nTYC49#{AiqfHG8b1!A0Z>j5BnzXt) zjEKhOP|nRnYK&L7dDI(f?lWz4_f!fXcY1A+)<#mB0iG%LL2x}4Rs@JjCtF6Ag)o` z?|f^JTYM{Of{iiM<*P`r&6iTm6U0+SUR(@Cin{<Yk84!K4|z(z9A_x!KpC9) zu;LQovtY8~>(JPx+vI1+-(3_Bnwapw zCy7j{X7+p+uE4a15i(19SPE=nfaI-0xZp?8cF!Yq=h*kt)2<5<8?pMbg4TApoKT2u z68V6c%3n2ZK0ugHNh4_BCbN8lF+ZUA@m0ZRiTUcsCQM)6c6AO^(J{b`pR}J@4tb3H zGQ{&TbIxuRR5{*^k2}UpG*d~^EXOV$-DW?q8H$vt6LA)b*B%p$Zfg(5Mt663@rfdN z#3Jk{Fd?h6!yPEyi7_0E4?I3pysC7gz33w$=Y#M9urYH(#mntCzK{|_kogPj*8dpN z`ISUmE~NfoJp1ra>=ZLLCYRfjL)v+e!NtbF^~&3sp7N8D-XRd(THKebkHP*a-6*}X zR)>pQ>^$jG>_V{F`<2M^;-8qN8&NG!Xh)2V3s=}@L-5!yD%hzwz=@%KUb<0KMK$q@ zfr_AV9{*Ozt)j37zAB|G>8%?eR11ZT&Na*H^Jf{RWzyTB-(Y#w)!=Tu*-&5TiVPtx z3cLaVqdozFVcx7mMV(J|+?C^PN^@AL%~C10M&va86j4_TVcU=VTW~9;Q?gQxuf~iW zcBu48cXO%gjD$9Q5`C4@BKx)uvGly(t}n#DN4pVN;lWA5P)n^2M&vaTXFd_!SnC_WofU2jHL8)o^MYla2sJshG4F;w1p z%^8w?T%RQCe91qW4czlz1G$SSWG1rbIwSzic1&I>SToUTendF-$*dXO6l% zhDZ@OYqP&s!|w*E$x!S@4<4%-yC5Io5sdEk%skVy4kvVGkM9&(Iwz}QLE#Cl6(?k& zqMe8$QXg{~>l&LmK1x~E*HAIm>KglP>h8!-dS7Ig(ABTo1F5E`irDE&gwlVZORxY2 zWNy$lIN#@AFnNW2J?xXg<0*TX+Eel9gGIva6^}kZCM}*560M;FB~;1e^^WJN7%3)l zmR2%*g>NPPEAnOrZ6v+(`zv{4UhMx4A+o)o8xr_|(Ect7?N@^Z`{R%H8P7jb=oYJC zk9fXguUK1J@wCuy3^K0_#fe5$ZdgI@krEk8dW-f`Lp?Tgs3lGiejoVSE_!7ze7(=u zz}E)~nec)W$!{3$ihv)aX!5AG+&zph<|Xs_=6yU@_#;~GQIWJFk?pap7(sa_6fe)C z*XFt+{m?Ir1B9D(s_P4d)m$r#W9JoC>YLvc`t;51g(cjG{qrF> zkdbL$_td`Y9lQc@W{;sN;dG?W~wgKL3V%q?5xt& zN$;cCjEnwF;&B5G@diK_aBb&Gld1X^rs`Wn%ZuelAplF{25Tg+RBj5mX`agUSM#12 zkTdP#gHNuQYRkQU&MNhbuvLM-<>*#!mKARoD0keJOKe=3 zX*K3UnUI^wtw-PblvSMc9!%YW2b!!2S++>n>TvOA!Itl{!;POt>VxqcbDN@Pxatmd zRv>L&klzEG-<$cJ^nSrq4(~@{4&6z5S5Rnp#&jFU1`0%xhau-3fS^qVJS*gt^ezHK z_Iq7wyfZS?7&Z@Bm&+f$G1sLdM8RmQhi7%4IQ<_^HNh7j zen|a0E`(ZOyoCKj1CW{T#tmNzlMew=_x?4lc@+p{8*B_utwVk&}B(@2ZZNMP!# z`qy$zF%t?4g2k@|(R5uOQ%kU=yASq~*|PnNkh>+=@^L@E)eZ)k$Qy;|Su_ zIY^;!G3L;!V3%#S?CQgVV9RG`1e5DR?k|$wfA$fcW>sc2{T7~1hNgD!!PCi*yL)Ur z`|42qq*XF{8BbZzjf$tXkwjdZBY2w|nDj@!VA9VjMSJ3B{yNwG>f}CPc3i5BDXBL0 zmK#ZvArvRftkHy_M;FY_R>Elb=n{1Y^@gO$b!Pl*)&lX&#k#z~hTsIZ=y8OAAlX{3 zFb=YpNl%T&KOk52x&ysrl|oE=YqUp3%5n!cPDxR!q$uhAYnFqPr=;V8lYREL%`9fM zY7jGAB(+qFm3yl6r&d-jqwjV01oLJNRi?%sfjpGQdC=fmd&Vn$M5H+at{=@qAxOV4 zeV!n7o2+}PdEYKmDEhUwOfKfDR17BmC`5M23h@byL z{{{ZZ{_~y4pRWpoIa9^SqA%g8h0r_7txYL6E2nBJ_*9yyfyoJ5?@D4pwOYq`IfQkI z6)F-zGGnwDwjLz{?2;A7sRIjz7v?C&nEJW8Zxy!i$4)A=T3hUXr7b~gQ0d05Uk0sl zxt716e8bEGQ~X-I!Jlj%X;sXjbk-Nbwvbv&L33X0 z(iDZ}?A(LPz7@|Q9)OTVlc7RkQZ1zd9$s?d4V$wc-Rq!$Sgc5-=ao53UdD6I}ITo0Z2-N0M5;^q7 z>CvS7#zE0!cHM6ifk`42JI>{uNgd;-whF?P8k^eIGY|gQz`~a8{Yu-GioEN3_tlT% z*}pDsiwyM_ua6A!m#?oon2I1PSBHn2A#&4?8_5!);>tvzggXaEMQS|;9GxM^Ql`Xz zgQKz(jtcDKOECJaarv>Vq<7=@A|t4EyHu}h-P>kyx0uMD;?`hRMWH8%qRX9~69oIY zL4tqz`kr9-==^9IGDYytkAlvS(SmTt)PO}N$-)@SUp=+YK5-S>Ph)CWW5?ND@3wrL zGi~`L!qlU4%gxZND}vSqm@kU{i9=-5S$IEJ0xB*>fLUFMgN0cpi7CU+_CyXbJQz^M z;67Lfggw#pXR=u1&&}@`Ac!cjuRS9GJS7Y82`1xadb`9-jE6Va+rCuJX7VZL_jQ|e z_LBBDF+f<47AvG6fXcEy55@NPqt#+Gvn6PChY;ww6;lrvY=*3tu{P$c&{}bY6#3^g zYpvMVkMOhlBLV!SE4s*tAP>YcOROvez#+!42B9$F|K(I}wzTJ1*|`XTPXpE)!It-P zLT=h(Wf^S$I%wgoV2z&a*F;{r(GfQ-=^2gnMe_DDMb?b<1dGS!nq8j<-Mogl4@%5c z*4Z&xa6}o`3codKsK0zvew{ZqCOg(|&&TRF1Do2MZlFRp?bj_=#0L4Tv9QzFd_Uyl zFKy}i-0vQrA3LaYOtrhxTHD0WGqFJ-zSw)2qViN#WRRk z%~yj=ewR#64PPd+=g^lj^X4ck4=h{DA+Zx%3bP_ptZ-Befk}Sj@ix{A=bP@A#Rw`7 z|H+8XCSCIf@SreIBYu9mr^NIcFXIh8AUdl?wS6Q-Qyw7sWwTXk6w zdn(;w@#VX=|0groMB(t!W@#*N3*_|7nbw4SkG=g+teWz=^~T&!gpRFiFzi-!Im+NI z>EVwkgIM4m8Gxh{xT|j2+s-*L3Y`}kb@_Tv=Zi2#E4`1t&= zwGv!Q2(Oy;rC{`vd|_HM>0R@+G>94Sp0xZUGcPv4n4)0$7+jEGOh`Z?=4rGChmR?l z?cqU5CdSQVt0*%<8FddBbCcgn=}S&?(1yRz!0QH!r)YX2W$qQPP87^2@@ zIL<%rAeuOchlGpQD`>~9_`<|9l#D3%7a_$kx6@IDwuc3Og0|4q_C4*^kh>igygC>k zO4Gfj>n|TLUB08z)Ab=7gMFteW%AE)D&6Pgi#xb72=xb*JIESDK~T{=cO`nRS2=SX z!Qu@@SJ@hW;qo0l^W}em3BY7~7$p`aE$=)r)hdGwJCn43$f-)~;Z)s(Q$=9F$-}^@ zx*KZ3sakJ8ybS!zSt%;BmpSBUS+VSLmA#`<(?sE6IDT%gB(Yu8G0D0sB>0`yZ^;!6 zBZj%^xA@-0@!3hlIRj+BehS7ydX+-0_W6V+ z{5=W(WcS0A?w4z4AIaFG9a(kngn&?plD>*xe6C7AB&@-@SeMW*Eo8XGe-O7gHGQZk zQw94&#Vx_OPY3&tRMtY9-T9))|qg7_B!H=4V86 zrx&iqRO)1E!qzuot6gQ}eI87s1e1mMT*x|5+;<$)pk8ka7QaldOIPoWoKP^aq7zaG z$IUI_;x~fvk)BJN8tOf+I#OKmOPaM+ih5qs8Rzl45c$hwpQ$4fS{zhP@mjjY+#{tpC61FOR^;GNYG4#g1GUl zG9nZhWf_48dhrYC1@X)>?5{-N7NEsFUxq5$#PtCzuzR>Zj?u~`VN7H%K!ENAh`KsH zcZsqkRqPoPwQhcE;7?hJz&+yV^YfWRU>OFJi|2u@i2$qj>OQ<95!i$g=(1N1m4Szc znX&LKSfAvPu9g4}%o~hl7cvghItTyl98aVu<-Zx8@O*LJ@Z$o1!i^hQAUG-tWnG=W zy)QGM+%Q>o%_nZFN;^RuZdNlE7jD)j`=)K`_rk@>&Dvxi@N&w{O56C7Z`c!_QF({W z^cN%<%DPv1hqX9k^#!Jt^VloYBw?Ds39KGy6F=}k>6>P3@-uvv`A)=0|F(z5Nx3xU z-GTx$<5%S>dN-q|lRH!ienQsO;B@kOJ_$@=J^^C?tZjxLu>X!BJg>R#GT%I`=qUi( zPySZn?{Tv19>@%NW$G)DDPz71Y(ORf!WT*EqV1|3P~4ig zCD^=*tuz?e3zkbOvB?MV83wr=18H$j5XY}#(@KU_p>gwWSFat2Y*CTs*7|VycgEaS z=7P2(W9&yRO5q_49#*GAf0=kV3W1x_7vN)n;Nv`n50W7tsQW~mm+%8n?Mke z(+rOI9cN`57|#Y)SKfPXp}HFnqZ^1{RpHyBXvS0DGd=a~Q|boOIgC2&87Ax*%*bCv zZ(g!P>CM<fgw8CrC)4ANRuLLYbLy$lhEY_nsg&qY(bh?<>N;=YEIq4@pofoUvP! zo&ZeI05_%pzG>9}FQx!0G{EE(z-9;FhF>^U9;pFFqyU}~0Q5m`eriJnsEnvYpnV_w zbivCheu6rxmP%MS7GB`+RZ~C~uxBsgv-rd}16G$3J~e&|mcdk!uX6uS-O$n|Vj3kF3oj83S*z^F=H>HkA@@0yIX)0RH~J~&56JUht-?mi1sbQ>Ti6iS-pvunKGLXu?`hJ zPDgLk><)^Fv~A$!zp0mxB8@;9pl11c^e%eAG>HmD6WAThT*uKeD}+)usnid9+b$7W zl4qzWK_*ud0WVgYU=`6)jp18wmIsy^bNfMhg4&mvMf>u}GJESYW^Y<>T5CO7%*^0& zW^jYhNlJc%t&z@b?qftO4|JLUJ(sa@bGdiO>Rkp zeEp6B8I%TD;DCIR2HB`VIu@mBnc{%lng)4XgS4eVMmivU(;&aoAor(1hB+WBWXquH z-Bb-SHx2S#i|%f9x|Y!zXrx~yG6r0!q(@kPfW6JcMzMr^;xl^Vv|3@ z7TOw^hrXTsr*M26`sj6FMth7vi!z6JoP_pESj8{~k1K#*@1a(M>jMIZ!GV$I31Ef= zo)HKiH_L!HfolS#e2+jOS~2>>>^ehWswJbT;0t+DYk2y~wZQO<^T@Lj!!x=-N{F+2 z_%*Rv;Ko(L*w6jjE#@E=o}JiVi-!MY3`uVcDeaFDHD7~xScbPn+n4!`rxV5p+a6%WQjqe*Hlnutb^fX9Z(1@>M)o^Fi#GZ7p4yp>&UoGND5zC0T6ya9=HAOw^ z@n=PqcBDZL)*vUQK^}2HTxw6TFvQL`RV_vuB<6semIirEgM9LrR6~~v$X@hh#-|rP z2rX@3?AVf=X%Zj>w;$wj*K6XDoW&Lc zj7ev?&``Exj09b6Qeq+ohOvkN6&%BDKM0i3h(m%>D8NK#3SIV8kFZW)2jfc`gU3Ld z0mB%4RtbX(52HYqfX=F9AlI2z-BuD)o{>##9`Z4%_ZjE)WQ2KlCFU zzs%mgMG-@vyJe>NJsUS$C~EsB4nUg*c=s*`;Cu(bhi$0dct8U*rvSt(uVX)s| z5eEg5ueK|PexWOUYDpPWweCO(M)A1X{(Y_*`&4<383nIep&o5>Ui^o8@ppv{a>apYeOS|`;HQT+-)B2U z+&hzf;)3i-yKohsj*jTkhd4=zSP(g1w{E56IHgLv%@FhTew@y{MaN! zU=5T|3*&trmQ-V`Bvh=q)SkqdA5EoiT5H4%t~AM&A}W~B@u#50PMz#^5Oz53!(4o2 z=BK9hZ|OssUHP_b20{51iFW`q5y%cn3im2JW&4YMYLXVL6&zFNFy-GzNBq?#yDZn+ zzhx0k6i_rlwQtYdhMe25QVCK{!Up4q8ocIxwuV{6-tW9yfa zsp(tHm$?vzJ?91LSLrW4L^>(5RrJ7adVvjrcTp8QUPzp`Qq+X#HgVASs+O->`N9vt z(Ey}I4v*nlE1H2k&t`pT(-+}A3ODOZJC{V`O1^2r<$nn>;)n)j+q6#meyv39ZbA%v z8X_N&knA=2Y=tTtgE3XHN=4EnR9z`6N`La~=MoKaiFB=T0d;W^PUEtWODL`jnJ|nD z;9bB=6>RNL9klKcFrk^xzRGe{lwIiw8WmsDoXoL)C7)L+pueJu*rU>8?IJhIfI_iH z+vW8{ql{7xn5l?8)y$1Z4b}thkb5M=aSX|R`|B)`W()QMmAt6HVvov;{8H#iDD(*W zDEUfXzSEa%N@;(fOi`lIrch9pP|mU3$(m~kgKp=^58*SUwOW1d;Sa5z}!7>joX?a0?ml`*z zX>%|JMmF;}6puG^sW6^KsH)96V)%VZqDQ!VTjUC(NWFt`+}9{lS)pPldnj(_EwP{r zDDTww7}7;5*GtNMl#4GW@RgJo==;&t*f;SKekhB(mTXI+A!Zr?}M zA?tn_eQMOv9a%=B>_21Sps*r-B@v2sruCwLo3i@VDys)Lh&0Hgspy}SY>JQAFE~l_S0)!=Pb0p$rA^>HH3h&UY8}J zby&J|pLbfF5d@Q~OvWP}lAg$V_g+1r8#8Z}uAAxU3$X@ewX(z)jGyE&TXxfLB(3BF zb}~JS=&Mruq-U}}?;E!GzPN-kmvQr>xeO!vqCoKL9)3k%lzo_N>4@|PW7BrmhunC` zT|p9s$z@q*-@X%imoAof)$IrscbQ95NC zCJC!wy+ESf67+OX^!qkY^h*pW$qqfoW4}HS6g4L6*tYay6a9F}!xyB5A4X8D7Y)9+ zK_10k%%DLW%+?GVoXXzBQ29rZOC1_C2KRGF5W6@64%=X+;m6X@A0@OOe$` zdMI<9eQ;L>Wo{%Ru!k~N@{725kW9pxtHRShs~;C1alC2mh6*KQUjZzn-7VQ=m>m#n z-3~DphMx(TWACtr9lr_sn+W}-S#(e4GY;6E*nM~?{n_%k8DZIN;0=r<|x7ha@A zUtozAd*Nn&Lvn!)THJ-(wWtep@>?h^?81c`2bnQBj)*$~->pfl`Dz+l*^xnGTBj!Fj?wQM6;%dW^0AY(`zB=t*g^| z*_`%GebBvok9oRuCG*rD9Vv2xd3p^0jZ+)yu@%&c|9X2aW-@qUf_F<#HMW8Z6&E?E zU>?~6C|y~W>bnT4eTmrkkUs=l-s_WzjrT&u8-uTY9CB|YufDG&7D(V-Y=U6%ZdDKD zJ`{5Qmcglo?T~wO*jnf08|p+SC%Y>HIcoT&Eq-f_zqpND<atf-`GHsRnC}!THAEYL79vrob3nTVf2Zmn|f|O1@0s z23OW{s)a=AIrzeuTi_rV#D`^~6Tza~B38ZA{2VOanMuB%h4$7prv|A8;!*1zqHp%%_rsS% zM=gEQbaW}Kuk?+he+j4;B@;M3MbTs*WA2+MyS;clHX|2HDBywd3(}^yNC4%=rY)e3 zkLkR!%(Gafjfb^ooqoJl9$QrlL#%M5v|&RFy*twBUDYDHO(Ga+{ml_u9KIILwLeF% zX$_7pnGXT>$jNKvm%x0A@S%DS)pO{DwnvVT>Q`pz!B^$mQ>k8T%@S;t9C?=i2?@d? zH><+M+j@{4?D{0={#S2v$egwjE2X#pwh!;?_pdqL*I(|Iw&J2L#4kUdpW=*(zl1ZE zUcTSg27^5p`!;SB-d9Ga{l^5ujC&uCiJPX2j=|+`qU`y1dcdyKU<)$9T!5_wtWbk3 zLZsQI2D?ausiG}-jRF?P06Rj1otgpmPr#(nR@FE0p}%WYJ@YCcDY(U{>3}rI77g;n z0_|g#+0z{mU&aWZ)IhH&AU(o>0_(NjxOlVnKgFYC3BPDh=Mz`6+09rn_b;av z`>4loWN;gMJWTCU%I>z*Hikq1^?`R8Xkzwo?!=Slt5Uvt{Lvb!3DLYu`IFk6JnXTT z#GRZ4?SCL^Zv|%jdfdrG*(h-*SBgJG5!+)hc$2p|?&L+vg<7JTYFw%>&0MhURUs6M z^A3E5J4u%;$KP9H-^ylCt;_*e?Bc#V-IILhg=|#t(m<{mxYZBckv2S@_j0V|lEnUO}M39`PjMB?6 zN>#Y11U>l!u8^D>R{ow5ol7&p_)B7rUW!RF5eB$sL56Yv4}u%^gzq`9Nt8DJ57bFk zTBw*QA;WB)z3!i|XmkZS`%Bf!V3PGZqDIylQPOE!MQnwADe3a}X2v@9>w!~5Hf0GI zUg}`GnfncVAigPNr%(9`%zu{%v@=v>bAT9YG6@wZx(dbaZ)POyD6HF`{Yi?{)ts~R z+wt3!4WoLpFV%+l5$d$(wTnc<=F*jx?{wm0a&iOv7(`{pee5Y%SNEv{6Px`cTXL2P ze-yDPeg?I;g6>u1x~e27GyY;5C8m3~4xK>I@v0?I;jf~!5*~<7kLt(9PdYkiy%S;x2gv-c zu@~B)O-QQzojT(G$;j}r@u5d+`_B}8dmd9sq}U=iK&za`o}(*gf2&D=%-@|XCi5cp zwJ;@pjSIk{57ovLg+R%VnrK=oXdp*iwRF2v>0lX~=MLV-<~d`2A12K%{UfU)in-Mc zD56j^E2mR@(8JuK=f;L{Sj2JO5p-|*Q3aclW0ORJnYLj*@J?Hxmx3x_O8Eaki*|l%2HAA zNDXwJ0@6hNngcc_1ME-@c2Wk|BMw+?2H022RFiobV2uvgof%-SXs~T_lp0hmVlw=Q; z?~a6xBA#ilu)*;Pn;frjk>eFEalFE2$1B{B@(SBCyh2*-7%IxCHkAl$|AX+oFOW+3O?q#5nJHUHIzZQ#=Cn7k+TY$o zbj*4h$#5W2Otc50@bYG)G+ayq5^sGbJBjv)EUv=eVLir-Wv@@K$GklAQ@^zp-CZCF zK9fSl9|c>!bAM@gPW!_VZuD)?`WJ?Nw|(CVS`xjU_YiDuSKY@%dv~k^GqSg#Z!0%H zw6}p{8A8^t2Cx^e0$LVAUBIV_&wYFz;qxSf`$7eZi35~wAnDAS-A!pjBCv>7M1wAr ztLUW?>R)%#d+93OzaQRCZA{$B zIYq~P{-tql7tTzPRmGY_-XIz4)W6ZT6)3T)Mo=_Liq4_P_|3XYP2ntISbI=MBtJzT3*PNbAF`c* z^{GH^5ECb3p-Z9z&LJ~iot)Hb+rTx25)8in7B*os-N_X)>CtbqW(`r)gS6SMQgXe1 zCKI}p6hM@uUzXIWe^^ES(EjpoLHSxckJf^aWN#zuGdTXh-j+bbXHMIy*mIu>{*`~i zrL|wCxzr28Dh1zDvz*aDl4z)~NU`HSZ8_~uu1aCa*|9#OJ6KFSyk~R88Pa#zfI9B? zLf~Pa_>%9%wNeymo~aaje92zw5sk9sU%VX;d+dqNGJ$b#?e(H{_}zh9>MZj0&gWdb zlr#=BMghh?OF-=ix;>Q!%3^`!M5)z0j~lpnp7C$!Z#m4;nsXD+9J?g2ncb}cYgZ^% zg>hPCzjVm$ZoI34ln(|Y#W2=mfigZmKDb+fO7T|U&#Hxqz&u6@&sH@AIJx14SJFM6 z-QJV`J9V83!0FBLJ-byatTysl$Z+UqwS-EWl}KtQd9Ow zFph0(kVl?yiE(VaN!_A!k8NDUt;9kr(R#`pk!`@pnn1NUpanQmH3**utl|O;$`MN- z$g@6G(x&lp&gktGc*{5?z&Rm6&O%{rV=`V+z`@bLR!*VaIM=CmG7osOSZ=m~aqPWm&?KR7c!@C>bZ_YrNCKs={{tY*Wq zIu)cx`Fg}?SGgEg-e-?Vl1Gh5XUXO0dGK%b;6nA_04dM0-`ko}%n22XU1RUOkN2Vp z(n@8u^m zl!OcwA_<@<=vQ5*6m}Ea)MxD>nOVf>>bjdG=m8PS=|Y*Ymsnu2_+nxA zheG2)Ye=E}vv)y6boOFUE6cs+aPlQ1`valM9{f7}Of5%gmW%`LUZXaAC zSIN`GgoH36F!v5?!p?XJM48hD?nZmQVu#+KanMoz?aU9NFG@o=;}0Mm3EoO8{JN80 z(HAA>fFB&rERQ<%FAfxgE_X&MmhYDy>M{Dgq+i;{6C2Ho15_cBIG{(EEO3aQR+pe{!QJhe`VfX~+Th=AX&VC+f+ zEKZ0BfZWAiPOco;Fycs8-A)Ozmk65R{wqj&&fxx%r1ue8P?>3JQzSg$Y>MO*jEiLX z6ZQw{Ms=H)d{Ef`&3&-{Yiai9MHjhvV(Xm~oW^2fd@@_wHmmh*9skG>mw%_iq_+T) zC1Wt7{P(InUzLyNphuCTe|{rNtwlW5n)7S2zc^OrQ`yoO-hkQE%Ou?^Bj6`mPg!(Z zSx=ru=}%l4DHu@q&eH~z7&n;))ShP3YT^kZQ)}#DOVlit%>-Sl;Gef?U;XeEDUk0T zS0LUr$Sn>?>8=#W77g;nRjFF010n$n*>j`oeOiO8PlE&*p z@k8o#J-r`O6oRDOCdA4T*%MB7&g7lf=|Pgw99YBSNCdth&Af5?bEo-T#0LGH{E?(IQM1 zPPhnMDS}g}jRK69gnj1+SuQfdkuQxeiTbi81Hmf@adLO<<2X~_9A5W~(MT#9#2&be zCw9eKvbv<8>|BA6^5FrXt`y;7Rb7L|LwFX_0Xu9PV@t3f`zA_a1x12QWO@}LG;o(37=fVk2iH*1i4 z(jW&rAa6^!MKyG>2Du>(vJ+QO`f_<1#HT^dO@qAVfSiHdqiZ=#gPfcOS>k{^xjF^1 z>tQwM1JWRI2jtu|$R-WaIU_ab%LPRJ6aJN)f7w6M%nU(Q7u_c*%Hb=8jkgLL!>0rv z0?%;$2;$qScsKrRwK!>H%?lIBjloA?mhr(J(VGGwV6yBXYQ-=@!m&DuXmmC3l066; zODJ(RRS;r;)2gnd2V4I}4>*ZaB!PV$>`K`h%F>ld_f&<6!1Ka3O3)}LCGZS}2^n%# z>=I`EkiVnwP`hs9RNAlt2kop#WcM-Vg?qO+gW7G6Zt0?ZwM4DYYaGywu6`NGG9tKE zkiq4$M>)-Ssn|q{iA7;eKFA7f&EDBUgNIYTvnAV@cXIERzx-cX{v+(G2Iq_?CBCk- z*2r*P+olF<<+TE=dZA`PAv0mG#xW2i?xw}zlMgZ)5A5B-tdR*f@VyVL!_qlgp?({$2>0EZP+!5k&*Pi%P+CdYRuwsHN*kGkT@XvN3N0{ zbwMY`m3#cwTNPzTn?_)ZSZ-d{sgmAJOGJ@G)M06_Ni7nqmlS6fQQx^VXxz9ZAj@dp zG25htak=*Hzw_4!MrUWq5y9hfu?HD0X&G-u4pjAR)%Eq4^U|W!N2X_EJN?Hkv~PP2 z0#4cRjmK%TU8vY)G@d5EfLXRdmd~5h0iHIb&;~y3d~80Qi9owg>SIl^LEhAh(~CHd z05uw3k_g-_XeZu^Xk+dgJ^XZ}SIUHNl8ZTdFkaP+Tq?Z>?i!7CAS_563hok6$6xWv z)1@$IG`x_klAluHdu-yezecx8h4=4S;YxXW85P=dNtzMgN|q$}VgWkPz(+ZCm`M@d zBzSfI1ctzxt4~IfXf(OV*b#>IOL6!0lZ1mvJTnw{d2noaqtlrOY^lh9Bgt+6^Uy^mMn z#^!y@&NTabECZ*-Zl^_)2_|7iwO+}6yR_a|&9~}+bNphwh5~!xf<8QpaLa%0>bd-! zJ4Sv3w+*UL!g~7gg7lr(NxG~leRq5MZejXv33tM*G8p!BU&5}Y)Uc{$7Symfc}xxK z4(X(Q2$SUuNhGu&aT3G$!nv>Jew*A|@n*m!(pq1y9R-m*I0^;q^kNIe^<+In+3eU&R$Al^n5_6|buuv_k>B6xIB1D>b|?+L45!FagLUZY_?n4;ujnSG%HFiitg z+7D@f6&j$@KHUL$=xJ3$jUCef|EmFN?86*@m;zAA6801gbEAUMMLY36r6*_V4vRTx zpRU0sYA{u_-T`|;xtMxg^dJpZqQREPn~MdEe=2^g<+ZKlwF|7l;FUijugCmPN?x~9 zml>a3y_Bo(Sr6Y|nh&!SK-7>qo}G_mAPFs8z`eL1l91*zt(?(!QGpP>l~b&5J!ohs zAW$Z2A{_J5OXOD~bp!FUvI6ljydv=}j}rOM%7diW^Qd693KD3uzr#@HPduc4=0Tz& z2Uv@8q!q#?Xkmh#U+7Ex2T|7`!x1N`))8kDs9SgFq%p-ix?a)5`t2! zIHf^EyE7t{udJ4j_aSxCW3gV>^@<7l)5bFVnZ3s{XyzaH(J490+)*{@e)Nb5CH>O< zaMO=y_ijT8DoGk5GbqU#QMSZMk7R3r*|IG==|F zo(E47c^W~Y>S(e{nQ88>Mu4odx6aSPv!3-%t`KRbm?HyKPPmu>8%XhyKO$&7gx-~? z%c(&khSB#s)vDUJE_g!@6Xr-#ls8jqwi6I?2L<)kXmRzhleV%3jRf?ebug(3@T%wIq3brv^jlx#Z<=G?sY0s?9mgv= z(=aYQ;gR&-x@vwW{7MoLQg2DRA*W_F=F`wp*OF`+k*rEJl7vd| zp$fr~3`Pw$q4I&`R;zF=XYNoK!2i7iLuAM@EfzlBAHnNU)4?|O&1C&~c#qxeHl+pX zHqb4}fHDs(M}@K0FX<(mrYvw)1Og38UX}Rufu_EIBr#5D(-QHy!P$O~{3kP#C6{#k zYbu1g&lN@y(q`rwBYUWVUG0*@$2p9z+NN8!RP{HfHg}>sWVtcgj5Wrd2W|ASypN>S zl3rFnmF*HHB9Fj8=Hk}mO=7*&p7yFn#BCfVO`w0VSnK`C9ye~@Nr_1s`Ti^`mh}?; zk_B0omi-StAu}jv+==WDvp(aV(Ww#wzP-dukK33#SgcXts)JTv<7xi-&}bi+n8%+* z4amV|vg>t?ebSW@!yb+8b$^*q5SFdJnEFzwFO#@H0@YkNH&-OxNKPc-;7g?_GV+Jy zuVGZ0)gW>)Lg~te|HeuMl_q!RZqGC=CA}B56`4lB!ckQa(kBF_$&b&5bx`VjBMM zUX1zhpdKVm+KVwKrIF-WvQH!pG3Fi!lGF;2SVTu))d`N^zk|ThNNN1T4d6(Y99d}N z%nOqHT+p_Vk(Ms>v`4xq{}j(i5(s*v%PxW@cAhhcr!g;j&vahKAZqp+L``}SV;|q= zATBZHUd13vH)@U0#owtR^uF|#_sf1S|IMy<^`29=>25|}co4hBJeDAsc^1Ax@mU`K zGKuT;u-Tc?-%reC1~^RY|>Xgcpdb3S>pq4F90joV_-uvdS?T}exS{B|w< zJ(->PjdW&)>P+29sg8Jhb>yG$oG0CpmPa!?(s75fW^c!cQBqKZ9i3(w{0CBIL^l5< zdDjxBW0xQzddoXI+9U7o*0f0^3Xy>3VZVU?MKKM+)>oVa^iIS3S&@EG%wAtdP84;$ zthbu2(5X27)Is-X+W#}A)*JMDQ0{EsS6Em33o{Y_?(Z1iSF1G@TrE^!1s2bHGcqVx zzRHMRK0pv^cMJ@cCycozJPujia*%QKdr&R&s0EzXQD$)&{Qvw_bdB7u?vnAOv z(cLi3JJ_p^bQCh(Lq3j{u5DkdG|`AGBKnX3)!giD-aUv94vpu zi0+S23|ZSdVEX_ikl;8~qDk;j?gh1nLV{9nCU!yK_T5kygeZ^FX#SrEGtsQ9lNHFn zc^d^*?5DJ?bOrSF!}|B9noD!pDrO(`@}A=3!JB%-hc=g#d`PLbl>Sde9WqUsy+WEP zJ$;ZgbzG~EUENqg7{>)R85~6~*vCf!Q~L;=+%2<#+;SN8^2M3>*?xVdYW`VMZ{7TN zwy)ViL`M((NvMbRT;@wDi>=prQ&wIdI+<*VfC zWWS>=gAOG)uQg9wCIz<7XB1zqRA@U%N79E+DD;V73$*cvSIF)4$RC|5+kQI8fM=;w zJbd2+F4uNZcM?7+BTD9&-|whXuwU(n3@15^O;Ek|o2BiV%ca#kWCZ(JJxaK}--9!= z<$Z!LY|{BMa#*PN+c3)-67E=M&z#-IH8!4ox(tL(Xf7-7pVTAxe26RfJb=spMSQAd z_6(G&%{VF6hH`qb_WYX};-6I@txo;DEY6h@#OA}U0^19>Co0@J^$U96y&v=rrPRt> zM@2HmR<4Zk1TK1vLT#9Gc-z_{)a;_S7xqZ$#%2L=GiU!jq9gX#9Ox@X&>0d) zQEMqF3Q%=rcC>@Tx10u~m*w=5WQN6%#X@?l-PUf?rDY{$W%LN$;Q z9>2IR{nF&cMs$#34ifwcFNTY|*p9)U_S+e`A{f6q%ZV=r6RXrA=*2sff5cy_gBLIi z1)Jw|+XI(6h^#x|ta$bbVXLAfmj|6V2FFTo*TS~S2PHnkS<~Y>?aJ%4V5OnDe<-B$TwH^aZz9Y4^X@Jy=TZaMf_wj;E9(BL!QQO$?})i{Q#CE9u%+woHJKEwW}llG@D zk8GcepU0mb{Aeal1YSVi3G!&;8cD4q) zT!X2i_onMcZ`5F$1?)HtHcEr3qURm3i5jd^z}y;as0LF-w>w}iJ%kz}a`syb6hgi} zQ;k#=&2Yf(CDP3h8U*Yq4Yp2$siM&i*c-ahg#z|_4fcQrQ$@oZuqQMaq{{lN25Zn@ zs;GOK9_d#a3^l{w=g?s1YA{vwssnb125T3vgEiQR8cY>El zG{^z_jRu<}VCQJCD>axZ`iR9Np||n6Z?yt;f(A1+m?~;^z<#d5h-ta(Tn%=F22(|M zI$(F`Ib9@R?-Ed_fnBB=sVbW3fSs(tB*D*qPJ_Ls!Bo*$2kaSAg`toQ0(P4Qdqjh& zqCy9(NrTl3Sx(nrjT%f9eLq!?bgBlkrM7?uyFi1fqSqa;ks8b;wH>a(PSIeh=y3;Z zfS#;;0qbg1BjryfNq1Gz0tf8w#cBv80=7YeZAZ=lriw0dz<#B{sK8}EsKJ(NFjaJ_ zfT@3btalV4t6A?55=V6JwdRU!e{(vmsoa=FqUqv!*U@p?%SK6Th5F#=98w9Eq*git zOU+oInGc2`LEl)Qomx?79)LE`hM3JN!GSB`i@+j2ixYw0fl_5pREkkCNxrJ(t46+R zX=egL57Bzs&9h|zfCVgZ9g!8mmRiX0RZ2ED5^y4aQCb7XjR5sY` zYQ3I{&qBbTRp_+gF}qsq4wdwR%MzYsFTG2A#5zzh8~u$(P24=kEMHMKn%vh^S4)0M zKVC=M+uN7weoUqx+iwtp!}=~}4kR!xA?%y~t~6x7VEinfX}#ksD@mm(VVbP5vnG;8 zZe0m(>T3Dab7)b6q8EK6m_EYDIk~UG+9vtn$CAX)5=@0C-Z4x}&+F3XM zahPX&dQITrb6~XSi{je$F&a;yMRrqV=zQg_h>ajzX zUk6U6JTHDqX1ISl;!=nEzmstP>}o{?LC)Y(7zxJ5_e7rr0#rJH6)|Qc&cl>2fb3+;S}g{p4vq`DY-RB!u{%Xm12Ny!J_ZkizDDQ_rhZ zEe4+uMNg7N@2>8fO-_*>p6T@PK!*5JdiZBZB-O*@is-jFGp!$W&ers z?-!nxealDu5Pc_GA^h^SGv7;d`-{I$V~f;wF}6bGJ0gegiJt>yfY0K31k1m#`+&VS z$kskm=KWvgEPNU{RW__gx8WzOVtb;_vS7;>Sr&Wk%6%|~5>`hFzG6Qm?OX9h^jutp z(fkxwLG6$5u0#X8ssnya&j7Ei374S z4YCWBz&i(|LE;X`xoMD18l>}d8Lc#QxdU=e8ste0@=6+Hj05t-EvfF_szDw~gA_U- z)#+NU&>-{EAXyH`3{?w=4{3-g3WDG57p5#LZD08^PS1Fx(1$C&w3&d49l3~Z6pK0J z{GA(8xhXd#OI}@HD0Insyp?nL#VLbpi7b}JtEwTocm$$z%@n1Pw`Gy+>9`mJEU-9j zcrC%YS<0u4k2by%fo56L!~U8pRJM<;(uu%H+9neV20LOe>&rULOS?K_M4fZ&>yJK(5y>u0+1h^2tRYA2H1WzW(jV$5%MBvadY&>vx_AGFh{TD5z;=F|swpbFYI0i7h z6dSBT$0Y+zVwG(ct89^42u_JYQeUSR)=1`o&q*1bkuR26fn$(y@K=HKCG$^^m!&19 zGR%~PSW9PCveC=l^tkTK5p+gbUYzd{59ixw{6R((SRnKElWMzKYKu9w?L-fk8%W#n>;2;F8CX9DA!$v{mzBAnY5Tpmy>c_P*~g+KC2awH{4ZLVDP@g+ zmZP!oQ%*IDzckUua)wSy9}_A(#C%XmLI3f`lAQu}~8Mbv_8&a29js zKN0Gj>F1}kp9f9|Sl?8{I5T(wL|9?H7puD6jD?q0SdWUKBuBK%X^ImrJVU9uv@X>8 z&)G79{$V>#pk3=i+tGzeE%B&?5289iE_R`44{lL~0W=~0sS__C?l6d*cNuE1dT%vI zQB_+nHCW9OoKl0iROI24j~c{_kw44OlPnn~h!H5Ts?d+?ZR3u8Xh98~(t`U@E5oUD z`R;aA^q&&q-qC*nNBzxMM2(B+j_AHBqBnN#AGK`Rkt^zP#zHcA7OFE9tJ(lL4v?hx z1(-o(YYZ4NGZ(7er#J9J4WX9q*OMG4@|xO3k$pK$UYpEoYEvk0u8o~&R1fL{~o1!{%IbxskW+t~(tB5i26yPL|8D4uY}1 z&fyIn?ZVVbvbCHVJO6irq}lm+P0i9x+2mYpx+m9~bH1(ep5*EokukTNfQOuEVa?9f z3E^&z1&h>4#(vj3dyX&T2pR0L3FG3~ClgO_XQmGZ<3sU64zO>#ON_zsxkRU&guw@} zRY`gyfmQz89O+DdQ~i>&IE4O)sih2>deFW=S_v zEJ;{|AT9y7XszBD)ZhZah55dpbMBobA%Ok-et&#;k-6J*w&y(OoM$`F*}pluPd5?_ zG_@NGm+5XCkK!$a;>PL5L4j}!SgEejF*xW(Wtv}LJ%1OJK?9}5I=)O6{y09_awqX+ z=sC_mU%*lz2d=*(IQV6t?}6*`Dx*mjmyB-Nh9`{Tf^cWLtG0qUF1!~3GXP1il`?S4vB1`@U`hM@Gj_Gmx@;kDTk9=brzxg}`6Dn`O9wLUBwiUqd49m-%|jOfRFvs)9x zD~bH@OfC}=WQA_}fFMXlQCpy7+x)iR;0*zl#XF&l7ej-$g*aCEuSCd^!G~RvW9x{{ z%{it#86{0~@HBqKRX3l;I&V3LSqy&*I7ZnVLO9yYS2uG&-+&-Kw zmb>c2AAqs8LSeOxkbINbEF8-jHQ%Pl@$}DuC~~|x{4?uXC?nCqbGl+VpUtzob+Af* z;BM_&V1TI_CBjfYa2N<7`X}Isc zS=Ggi+Q9tSaxJs*j{DM?9@k5>w#`Iq8~VYEGF`qX&#&^Fvx8?NdfdVqxn#YhF6XV{ zVpi4DjX}7YwDh#f61fgYt2{kuZj{^1G<4OiU87WhkWE&$Z$Mh7cEN!f72Z1Klw9bc z%N>jkol1sXgSZ*53dkzvQf5})?}zgn;p6-OVzQXqxXCtLW$bri&j1D zSMHU3X9cc85X?DJ{BNQ||MpYioU$I_QbSBXD&=oNqQVV`Uv`(mo32)(5wHyaSp^fp zV7PiEPS@p-{t;6u>l$%y5Q2Sx0Etc7Z)^FM;|5_P9*msvQ*J>SC-0r6G^&32XwY>4s{$mll77x51ffrKWPAh9ol8RqcPt=A=gbgemAf7QuC=fY~Ob1al z%&Y|$J3MbIg1>LoLcri_7sMIb8fN^Z{4L{8*r?@jebBd9)t0RWSeC(Ac9iS|%rldI zB)nNCGn=q-7EXUUR(<*-pA_dei{g^GKbXL! zQawLrb6xOc*r-{=UNMJS?B=q@-$ihz-I1y?Of@rEl_pDUz81eh_|0@-Tp8oSz{&;> zY9)rg=tM7x|0;>6n|cM$5f@K$=mWf4DG8P=5{EXMseGL1Rb<}gY|ClI0GSJr*2XAn zS!JFVV+G3$gG>(WvNbadvQP)rT_GZ>AnGhBH1Hc;=q2n!vK83=j<|Js=S!GicS%8Wcb3R?b6A zR2!|MltqmVEW(RwmrOR{uxrXUd!6tcS=V26^jXR5N155#)H&$P)|$7|D>;i3Bca}S z6Yja%72YbS)O)oN=$JpHGrN;JbGB6DHWZ0M^fy5-(wQ9XA;-k1ovochxuochcjf8T z(gX3XlwRn%Ueqtw9snuYJ*r={l-gL?F`epE|F$<)(v@B9V;FMrag51XJ-BUmicp-= z;<*d8z1D6r^M!7*G$2mD^Q4Rjl7KY%_u0D1nArTOZe^f~UBQk|*=Sh)UX4yIdd^+q zokhrRd-rM)3KXcZ2JBIKUA>~mZ(B_;GQrV*T*f2uSSUQnE32KswS8I!lbK8tK69NO z!JI8F+){DEzU6djuG2o4lSKP|K^E3u(CtY3Ofk6=#m}pDthovO53dCt$*)Te0(Qf& zX!F1%W>8g*ib@>2dd@Fl-ej~E;xPbcy*sJ=XkDY-o?Z2-J+0E7I+<@IMn67tH021X z$3GaiYK%2Q(qxJk*#lmwQ^^;^zbM$DVjpd>JYtB;P&>TwVP&V|m%)Mk!ZQh`;n;LQ5T)5pOlYUBwE1}TksZeDdSX3W}Xv!8ytTw z>627`o94U`SNYAS9D}pXa6>dk|4XuV)m<+bAv^^SX=z^KC9aaPkbxTH!=hDEq%2*o zx+nAtwGyZoo8$$oTE1ACao7cmtj|*f5B8Y4a|(Dkg&}VH_sSSzP9e5Q+~3b0FZm8V z3GK(n%UDT&g)JuDC@5FHm@;BKDKoy%lP5l62HgUrT21W9{+H4Pg@NdNx9_<#X-*P7 znPcfQXE@7LVDA=}QK|yjTTQc7aBOxxKGZ`Q z;@OxNh)!Z8dSR2}@xuogk<5+6!#1Lq;1Ib*&L{oBbc!Z+i15I4?{a-vp)YIoWs|~obyX6@$_UW0iR=6 zLa_jP*{6Hs{R^Hwf@fL(334KitAX%`gbP^tX5%AoN+rvMiz=51ojKtZ%dC^uB6@*r znQ7El`7nnM!O=H}hAqZ>SU!0>RlpT#C+2&`pF*gS@n$c44y4Z3Ul>0cpL#Zm{Lu}M zrkOLfj~&`6Ffct(ruImM&JxfBVo|uc71<7Wq`q-TlP-JW4qLLaN^_f|Sgt zu-_t|Jb9)Fa}VYT+kPPR|D>BqX8RWwO;_?s&?EARb~#*&3@$43LJd|(t*s#%>?94Q zUOfaDc+NRYgITPY)_{Sh>UQMub3yYX)I}<*j z!B%LnTuGU4=Qfo9hkz+B!3qr)(O~LTx()WVY|e{Di|IQnpuv8m!PKiA2#C_svAS;M z0(OE1E74%;)jw^pks3^S^L{y1wRE@!Q?Kr}!ItVmYb3XqG}wp7D}<<5*Vtg|HP{LP zdqjgZYcTcdd>iZ>4F>V1S$Ar%-)b=R>L?rRpBhYg^Uly<3pAK|)s5&XEqzvlbx3Z- z8th^Xre3YL!Cuf{JpwjNgPp3u)T{piOuGIEcEy8Uw{-mnQxrlnG?;p|(9Z2qom;+u ztOLFnn%h)vD+O%( zl?oxBj8H9AuYN3G>YuQG@Wz#SilNT6jZ8P>wP9i^1nY*Gb@Uh#iU+IgG#pw&bF9n9 zkfnLv*;ta5+nR(*Tqfc+Kr0bQF^?dm`y2Q}Q2U^2Bgw;R++Z=nqF%=R0Aq@dsrC}`^8n{Fd zIFWsUOwHAv7Jnw#6(%a|5!;V78E z*cvu=yo@hHns24xS~ihv3+wMP$%&0vFyUo|wa$?hY>@=z0@78N&UZ=W25Ou2kezCX zG{Q-dok$^k=rB7)wn}lmodRl0qdEia@UuUNk)#d!JR?y_LB}b4OAx~SIz?nua*7zuO(LDqb_zk@HtR=rin8PsFQlem2ZMqg z2=N+bC|{U|TTj)@gUmiK4<0A_TXN91-M7p17HYG;7VUR5=i98w>iNFWdN!SShXSyy z*EPWBM=1cys;~h*a{!*v053TJ6KsHj8aszyX@Gw^0H@mkV;q26G{8?CfT1?PEe^nB z4REsqu(McKvfcqWTLY9k02^(976;(l8sPg5z{57cn-0L|mnl3P;Q&NzfSg5kD_+t7 zAAZ}$!z>%%HU|&?)Bu|tfN?g!Hm8W6YJk5w0N=F%9@c$fSvPBdyBvTl8=%R_pm*9GsDjiPpny)#pG@CR$uv zYu_PDB7Z&nsTGr@wL6Vjfpu{;^`JI95GEz9h#;8Wau~p+ys&mKX4}xEp^M|cYllDE#vMaz9QH^&TUIw8^5gb+zHdd3d`LJ?)a~!Vw5P2ti*`z zLyu+uSuE#~nc8-63#kx6I{*|mLxkrqnDk;fR|8OH{;Qi0w(&r|y{W#<;+stH`6}0+ z*QmrpSF3NM?2O0hjC<^iLFmxmDS+3eW0Y8xO%K@l%BMTkr!>;y^F{PL-i<8`6mT|n zPnLlv*)f5r;nw=n(chVWtys#!P4r6JoR7sx42TgL0txs#t|hP0>?D{j4dbdkPy9ck z#)vWj5-$J;D5Qo?cUgidj=QkUD>EH$MwI z{I2}cwxkmXBp5WY`mt0?7)I%B;pl`&i8E1_H&qtnUkm4!^J`Ue&MbRO8jGvQ ze$On6o=X+bVq(W}%RVM4I#+IHapS>W)6NxkT@4X#3+3BXZi;8G*{|&Y!$DCiBhX`B zp2(RW&=b0GM&LlH|2X@B%s0g6rKqv!#iI4GoIj%7$DiS?Fq2dBh)BUO9iKd*ELA?!&ZrI&Nb{`ja;y>wQ<2v4u7qp`oY9|d>QK^bsg%nvMPm2 z-m5$Y?OTHa-E3X9fFx)X)p-kYI8B}Yg2NI~fvrEzpiA*fQFOxtx zZ>B;Un?`B~(CVsfr0q86I2o<}x!fVxtK2nZs=1bX|+l0w8+|?UTqGC~2-dGQ2>f3YouIOwj)~ zSA7`ExkTlJ&(&<9pZFq$LXsnY<^UtTl!0;~3hD740rW^o0D1sqcVCDfisVHukom!kAMtmg zJR3Y4T+6wZb1mk|$}+~w)f-@5k6toU2##rZ@f;c^HVZseC=J)FugKIb2iVA)e0j5% zH~As@kduk!gcw4I{q?^HQ(v+0XW9Y*W|Bcl!1h)ay7cd{fDimih_Q6EF-?UA)6 z+^AiLkBH^GNtbYP?4xqw{7mb;+k5lGRaF03A20S zuvr2&QiBz1F!gE$V8S!sc8db*hN!F_L*+J1gQ-^$8|?Eb3M^Z4+p57nI9wH~Uj4`h z+pNJn0`{;5Tcg3$s}dXR&l;>)z``2rK@Fx}4HvLp|AMlnNU$2W%sttuzRWk|b;4R& zw1KrXCVZve>>;{RK>7N~unR^8r~|zG*99XL*@p5g1iXgMz0Qajj1l?bWymN9hKPC( zf1{om5$ehaP**5RT^S+j$_P={$YW3}qL+xaW+39KK~5`6a{VoM@)2s_3r28vK6jy#)~dts=3UbHJm^fRTZ$87 zkL@}eDiF8CJ-Y%7^YA;}Mr7iW&N@<-ndP~DIcTSVOv_ATgkKhx<9YCxY^XXM=kuTO zcW>9!lp<2yko^T4XoA83sNJ*6m+@j`;=R7vjr(O(%FMmOt8l8I1NyUI$(xmVuog?? zoMU(B0XgTGtw{bq<2N?egPoB1nLg_Bz>qDGh$SrP9_x}TWl2XZ5)=!#>P0cYm%Qa# zd^V&gJsmVd^8+f(ZYY;dfrLpMZzDY_X)`% z2vN>PevdNL!OM1eeoc1q^BWUqY?hgAoPlB2K$RQB36(wNOru6q=LNxxrU0(1K73J& zR!ir18=T#V`}Epi$w!qZy4)EG-Q#g2Q0Ol8^g{PB%Ajv!`(4zFQZGiNawxhqKjC}g2I`7tx`lS;WUAJ06#TDB;C~(XkK=)HWr2rAtg%MHMp-?2bq+FFW3Z&h z6<$v_hveoH@E;tf0)AS^0vi5E&mjGyL`b*f3q8=6+Ny3he1h<(pi!PqT99 zyv30S8Z$R2ZXLIA2+)8>jf9>LM8(V1yk>?tjn%za<)@+@h-yo$cBG8}EK`1j^G2DO zV%|d8c9tJ5#JH5aGeUBsyzr;jnWZ~vnAOxV zz{n`=Fw%WS97m5U$-gg-%0?Gg9-lx=JUzMDc}ymfEJ8>=(PW=@Exv=LLCx>UeHywW zmh;+J^e&;Bi0T!(N#-e8#7hh}xYR>L))Lp^ui*tl1)nK0_$X+8A%0E5P^x*R$5p#d zrjO|S^a;^XPxAL!J?A}!qVw7jn(2}|#tG&SX?`In6p29;+S8RH8dhbu#rinHsZOduIxBA-pubVW{3P=nX^l26qSu zG5h@8_(G_Lg6t&}f~KXPNUSvp2Buot?W%2~r4n{65FPb|5gkQP!W!P-Ahwwk9_;qs z3rbR=Q0tCidl#bl(pKo zd0+>iA4<;R08~NFxQz?{-P>*aAeAHyAhg&N>~2f$?m zthwG!ais>B>i~Q-Qs;1jlf!5YaESx(f&e)G&_`=;?^$qP6^O?RJh+F;fj8luqHfWQ zyZ2en0oxQb_j3Ffw6jAZ*_(%BEGx{VaY2(AQ1<8xZ>s1{f?h@pcsj8@b1v(5%c!hu zQaaBswHBfNYE%!u6q&`tgmw{TuV8e%+t{;DRCq8k%=HYjN!iwTG~AWSJQ$pN zB{M~=W-Du$F}p=bHdye2yxfO+GBCR_K3X};HZXg=dSr;DkIfF046M$Ot#hNtx@E2# z>kcyU&qPMh#<1)2F^Dn0FD!PnmQ&Z57iK&9oU+kB8se%|x(0JxZp0X2ju{d#C*($a zo_K4ERZXo}&IGAx@7C`5nwI9$Le)~*$bGkUS(V+)FUQ!;{8plwjrRN#ENE5@r;T>2 z9K7}H^al1A49jceu&9v{s_Rn#O8E7I5W@^4UnQgJ7T2BgoKEO>^Q zH42-wGG3s?jSd|M@me>32YIyU-RGzk>-bl&bY-%XUd^%EMA4RWSIfftV7YG)(+K-O z&lDO#6{}1Es3AGar%Eo6ni3r#UcbkBW{Nhw9!V-;IshlcgzYPwXdRJp>f>Ck$G$2_y^*qrliDn7tENhev`mWRn&{;~{eMLQmI%K9a0_Ilh4XjtSjjlSP=cN(& zJk7VX#wpVJ4hj`1+sd_X;Dwf11y_rkpEQwCBHz7M4`s=X`aU@RJxzTddL~Y22!zWB z%21Js+TReA;is$+h+QC##P7gmiJt$DisGiNWKMV0E>jN^%LTq%npSRIajk~CQhf@d z*e%(p532=lag>!?Kh)JCDb=pLO6n+D;L(2S{v{OjJXoV<(S$@JaXP{nl zmKwRY_c$+lj@GmO)xz_s!97NUOo@ZMY=ijXAR{$MaUA3^8|0qroLs*8fhs5`4sx#z zQWBp+1^Ch|pQqZ{USc7@eBu<| zp`TLwph@YtB>Tc&rB_`F^%sVFn2`TX?UZq@&+IE8;=aXTD`sFaVusE=jp6X!%k_|J zBc-^e^Ks};DWJt)6E{aMWfrdIuYtei{H*{1%Vb7w@i#Co(##fo-!wboZ$vU~Z}C6N z48e*~-9`lmR1K7=S%M6~Qv{ML>=jZUFPbO=o91Vj#ilLb=jThwf~$&f|5SzKKV}f* zKny~+-|HGC-)6b_sqpZk0)e@i(25nVVP1@Nbv3|q>*Z*tdUb1*^R|K8YkjWajd9bP zcc#pDvwYzfI6R11Hk=L|?|_IJaL$!yLtCZevI0Lrlkv7DgpSAh2` zzzzUq!i$8gW>!*cMwLl06&gk|B#W&3gaM&!uh76OS?Fg<$9iEQo*;O))TSbqGldM4 z^(x$x=^~>4`pSXud*sCS+R15EXKM9oTwfz5pmMc*VCPCI`?Jw@ipn?S>r8mb%E3=7 zq@FTR7<8xxtug@^;H+agIY~*MBB^?5Q7XQN*gd$Z58p0Et3vQ0;Uq!(K=GNpEw6&V z1)RPl`x|`WeQr)%V3--vhlEdmUI?aWx0yji{oJUL9?Q9k+|}b@>hUrjV>vfcK$3;- z0$yqhoxf0pN`mFR9*`v4+z6>;WNuG{%}Q%n&t=P*j@Nd{DGZQnF2h9}qOCroFVRlR z5=-P-Xq;u^M_i>^0L8BF{tL_&9(Fz3SbIFO#SC%VBNaylc2qxzSPi>Z-%hy1(8kGuQaASCkbsDr=~3Psi2|qM4VJ z6>y-WFWi$}mE(&Vhjk4QkA{t-AhdY9D-Vm;*&gD=s>o!RdtHm)rs~go1r0(pf#`3t z(d!8T6=GO$_<19?MYiaena_o-0Tea=B-z?>IoP79f_izp?h4`VR>_1xLpXnfEHjj+ zsW~)!lB{>In3xNQN6&UKwztcyiR{+YGXNv5*mcO}pu0bgrc2QyY_{^#8I2MmLq&d{ zeCUziUp<=~P<&eNF}~zpFB>ehn~P19V&4ZGiK14%t?o1~^Xx zWLqn3fSnhsN_wme4KP9jc&rBmK>6=YV!l%IZ%a)B^Oen`xly0UdF>Yb8QjAuUbu%- zjFtS93#T@D89y_Z8fMn1-?BNe+Ps-iPJnaL&0W$t;!NdtO?vm!pzG8VKXtjYD()rD zM49V~4&J^dZ>MC-V>^#8%HySOd2Hixy*!T3mB&^d8|AUoBae-)NihlCu@I^BiXKj1 zmxsAnUa!FC{iZC58;Q7Do7VtMHrM6}D*_$domay(mUHJ>f{pvtle}G_-p=A}(0(E; z#2fG>jDj72U2YcdOc7 z`LSw(&vj*oY8p4&<))3B7v-j%n~ieQ!OdEnD&T=cE;LJFTHuRsK zq;3lg5Y4SA?vjBM#dzkaP1dxpnHWeUJumC`hhNFW@?f6O6YF=bIAb5e+Rbu3V~tu- zMSi$kDAFTCyQQW+i6WuKcBoMZ(JO>#2qBgWAx>V(PlJdI5acgiae{16)z};0fM^pd z$%7JsG?gY_zx)4pn%pi$2u*fCAkbtR7ez&lTw^((6lt22C!wuw_4Xy+3USF3y%&J{ ziqoWrR$&je)5(*hlWeW8VJ3vY zcbZCT^N@DZAi~Gh2;t^$c}w-1KjhWu8{DXPe;FK#Q0y#zp>1=V2NIubjvK+HT(pwN z0_M2kT)Ze?xxk@_LKZp39GA%j{^c@=+}sSNo@AAQ&MQT=fLqS9C)so^m`I^>g`$Jg z`Bi5KomYLK>0Ag(=sb&Op>v^9==@M=9A{CRZ*(r+OHoXF40F3-1`35qxqx+-#dRxg z6CKVN%q4;&zEG=A79Xdm-1dx|KSJD>-q=g@k4EL&6_Kfdc_O^lGX<9SuNWdJBsqTq z%j^6x3LIko)lUAPSox3<8%O6qUgzIS-nY+`?-n$nPB)SklU&ioGd#Z#w$;im zFFD`;{k{12u8aAw&H1o_4^kwQmDY7WU$#15R_G-1!O8M?zOQ$_*Qb2H8ZZY7=9ePw z7Rvsi$TTm1h@Puz1RzR`|L`ioicAyo5OdER=5h~2SSX|#h9Il!$Ff5?ywMdXd7Xf0 zuN%?QbmP1<+BWA@BWj#%l(ZVIu^X%-_sA$|Rern^hmw1^@nj?1(Shxxr)wy$^iD5fQIviCw9q zK;^v04gFMT{rof`rB9`moGO;QUq3RFGL`&VYG+(v!=6+jx zhSSoPMxiVk+-jX5EwLMXJ({0>8vNzws=@B0244+#B=u4a_F(7~e%PlPe5^E>ljfsi z3Dnh-KJVDB(X_e9Y4ZuX&2H;=1r(?x6Bz^`bZJ*EfL~mpaN@QW$#-PXv@|cc`M0)5 zQ07ej;3NoI9F@R@)%gk^+$Hc)PP$nN9}8`K7%QdG{JR4WW5YGBJ&6YPhnZ|?vW=O^ zgZstI+n*`Spp%Z1!i6x$QbUEAnV3GsN~($AXqq>k=5-MPEayz!&T{LQr;@!nu>tR@ z<$*mdBG9?&)-g%~dz$sbL^U1x+hs5+MW02URYN!<1&JxG^9z2sS6IYqB2dQYi7P6P zL(SF3(P`+@I$XnWt~CjW7GCX$Nf05x!WqpVsEv+FbBVE^qLq>7f&XCBAY8r4YdAbitN2awi zQaZ2z(syzs!+vSH#lKTNe-nHU?A4k?LD}R)hI08P8UCv@6+MXz^#Zx`n-03G`_0f} zSB+ZA?V(b{k#>d&lD<&JT%w{f^bUYx`TR{V^7}FUVI+-yrt)(6CK+DRe};6wWXR>4 zWSHK6hIGH=B$;ws`R(2lW!S&m(={|cLq6$~)%iXvsiy-vi_8_{>)PA4|j?6 zr;X8K{jb)ar5`2MpSPsp`)5UdC}3F5lh&UN*i@v3YW?ZZtca_&R;WtY5xqup#(NJj zmhO_b(Hz&MYRPT0j>zNr`q0uDUEk+*ht}S-S$j_8wFy@n9a7Vk!)5yT9uXCW-e->a^lJ4soLE94J*?iCW3bP{>qZgosF|D-fww^mdD0i&{uIVb|!z<2*KRO>^ z29DyxBhH7roexsIknjaKcWjt@9AZ;C6`{9qk087!ETF&p+Lp}h)6%+z`2KMkD|xr| z+Z+ySo!E%2=m#GoG?@3d)trN{BA-T)FOCtUtfRdQX#e)|XU-=!X@~i(lu)Vxm<4bbw4JA68O(wGB$d9}khMWG~5eJK--?=&STO zzfnz)fTU+A1x*N5MqD{_v*ay*AjFx#4G92A$Y_4(w~7!iLWgBVluOo6l`bK zcF@c?j3>>Sr!te+c?0asMk%`u$G0Wbto)#v;rm29F_qcc05(q1TomUvOj%54d2Z-d7AAyI}mJAMPrvQw?D({47=vw^#r>Ybqxs<|#ma-ZwS~sXt z$dt%*DxN|dC%BHrk_-9iCsK$qs;Y9nr@OBrsT_hA_vya**I3z

^!BMj;C?17pD4 z%Zgs>9I*@LHm41(Lwzh~y}qp0muK|laW0$KPpqH*q%XhKmu32LpT68B7sg0~Xa&%y zOkCk;1rq4)FW~J(q}^oA?38*cOSI@jc3D~{BEmm95$twzz1Z&XtNFL1oL8#(`%aeG z60@y!({NA}%~|@2^I||xdfKM?HJRyY9sGUG-*H*#X=m|Q&R+$8z4Q&m-~(l014S~W z)$B*zV17yFO5IQ|leJ8y!Y(QsxM-3Te)%!vu3YSJ*urFg4|)fbQQ{ML-3up5d{}QQ z8E`FU3#8k*Zv(;*r_4nNTkx_0oZ?93C~lT}p$;g`~luZ0pVA zRF0@pncL6g%W=+^we}a`i^-=5we#1(`id}EmCbzR zQS(1{iS<>Y>U602|0i*I_bnv*x00l~6v=+{`+|Bc*)QlV*?&vgc({o1-vX=)cU2X+ zCuvQchsB5azC+v`Y>BS7a6ctz3-=ewjR<&Wa08ZFt(7B~D$S22l!^ydiO)1|7V^L5`Yw>BS3nIh8HsNRLZin~PUy~qK0L;#?{;VQR(d0?6FN@Aa8a>FBVR^><-)TMZ^AjvOt4Kr9#$d=CasQejU=4FIY;y_ZF zmnK_-&ep|a4G+;_oG7qFmO&{|$+s$JRW7=UlyVXIREeBLK!O97+KF{t(yE@O&e9eo z$TsMf_fv6>GtoK-Zn}udM4hGby5bj|B^BpOBHwzijT5=r`ExdW94DgP04Ji|NXh!h z1Uh;!boI_vi93R6s{h~R`ak>2m41u&AH&E`|4~<0?M!BNjm@O`Cih|I#XxYrQKHsk zRQ^If^r_v85?jkAAXnYPRGpzCLugrxzd>k6_35X&U*x&P{|65jyGwDIJV=*L(nX<75nu|kCHyJiI8w z@%5tp)wRLSwIMlISM7|0_M)7KoYIT8sr*$hIz#a+zZcC~obrD*Uiz4W6&}|F*&sAm&a+S+B&Gn@IbM@$4 zMGm=aBhI2-BR|uI9C1L3^O-%dC(%Gyfj68-zGW_E%wV z0t~6ER=T9j9MdDg4cObY)e5*CZS2GuKB=SBBy=eAWa{|JSCvc5l1UxK0xl?(%Qr>o zi-pqU{pSirNv>SJX|A^&AXmsqa#e(`X35(Q+EG6N!n|;J2Gjl4PO!_dGwc&?Qh#iN z3ud(WGTH+KH%84J!i{|K4U%A^9452c*t3hBPp3(r!9^EqdxBbJm#LN3tnPsWvEV?` z6bHgxf&*E~wBDdNvj3I?dE)g12SN~m{&=%{0qeqfN%k2oP2xa4RYs?24e(~mY_-F! ziZ`34e!c3K_!HQgM4>QMxd0LsujE^4X-|DZqq}CB!MtGCbUvQ!d^}!0Ud2f1D&=*L zPcf`ZtwHiy5Y>fVA!ohwx_cO}@0Hi01NxQodb_-CCKIq4jAlk)dCH3I>>_;-hE@d~X5=t$1D^`QAaEcsre@>MzvTLvqO&Ph)QgRaGMmH0o=qcXQ;|Lmm9 z_bdBPwuX?g%##&%Jevw7`;S8|?YvJ3|JyJeBK&XCmv#E`92cbiC-u_`eff*N{Exo; zS}xMflSSSiEi=`uLl^j`k$RJL({`zx^6wUbKaEFmf@q-M`W5&cM+f`bOMX}0{EK#q z8J`Kl&193ZKfo}vHg++nl>ELF#6#~p@T#-2cJ7P$o{gP~H$Av=eC`tt1j>0p@sDBv zRnu?%Vo9HL1R?hb{r>VpIF|SJukgk(@5r{*1KyPL2%BXP@d_m$FF^Yn)3d z9{$+m<5!{vY7yYl4u_8+sljXKT;A$DeIns}cfeJKOK`LFlLmLf%zh%Ph^QfA85~Gc zmKPMRLX;L>+nZgc3Ula2^d8pjCuwI|(SX}tOmVRKUpD;D0U#qX7= z4(t|sY$9PP(5+RyR4g4Ky;Nvjm9HCj1?A!MZ=xpOQ&r3sX;nkfLyO#q{tN|`|05$<2-qOVmz)>2Y&;j_Xox@QM!0xYA5iSSdE<1+0FP^c7aRb?26)^7_?ZUyn*(sN4e&DuV2%bb9e{y0z?sT{ zQTR0t@M8zyKOHnP$)E2qS;)`FUC^!9@vjc(E_kfC3;unp_Rh$_n-2pVQQT~AzG{q~ zYK2z&pJlVW2d$1tTpX-sK0tl$r({O43dzBU3}S|ejLRn6v(Ude$jDMRk#T5|MoOrF z3#h9R4cl*_M7+-=3<1U2j=nZO(7Qd;RC^LZ<8V~m^FeC-6DMxm!JGShTR+Xc2OFfW zfV9^w+o*DVm+Mv&dqP@`N2c9ynFDJdWKuGo(xL1g9$=JgbuD>FUPaF(hTZ#F#-f-S zAES)ugp&>1L#$+rauWM`i;M@+ofGUN*-Sb-mXjqFkwaBL(H3i)Kt^=un1hWBwpp{A zgHV2;wMa&!j9;0YH(PJM324S1wqTJ`fMlJPU=Cnf`Dps4ud%+rdtc1PvvcVO-%day zp``ubTScKbIKC%Pe^CbZA+O`v@}_IhyZj{j?LK$`A8qcZfK^#`0sp)F@98gp*|Vk^ zkTgEfct9hgjmRyGk6RcY@FJF*96&&Hf>#YOGlQWaJc^7pGviwLQh1~V+0X%FuQR>s zV-2SIt+4O#IM8_SIXJ$ad!ZkEMcHR(w}+z~_B-Ac*`sev1hu?Ck^RpU*^l};7xv!h zWdX9kC_(m{71u zp%KkIUe6@}3f&jSClzbVr$X%NXo^E@&&cHVynL|soF%pFXBeNKk=&jkht!@C2Wiio zPozD)>3J#(J3&PwOZC4X;Xw9S5@oOH3l2|Pt>Ztn8P~tK;NaBJu(2QNxM_e*9f|VQ zgvbmw)i5sx^Q{t$`&;-EODNTEtm@tXDzw(`qvvD z{jC=Rl8~X}jAk4hA@^?R2O-nrHDQe?7U|0)qG}D{x^dtz4~ykohJmXd{2V5`nD^j; zQO{s5UIRU{zR1eL486x&Zg>_d6()fr%#eNZ~5s zAFaRrTgJj*VL;AEboqfNq|>F#zbAP|Gg-Gd+E#|I@FCG9`BJu;B4x}gCV}%*LFpzi zmmer;!^Lc5jf~LntS+=?5hElchU;BY8&7maZ^lCMDpaSLmf&`aqmEW`Nuu?gP2jrD{6T{n6Bs z$?ys+>QOuo6}Ye`AdC$(Y|8}mzlEwRfI*9yf-dU(0R_c!j*^^G|Dg|JcR;qx*cC>L zce$+rf+efSReko!Q{~{qTFI_dqRXi$lBhByiX;)}-KA`B_Ih|ORJfJ!0bYn7~cFgpH~|Km+Y_JqTx-PxoG$w7)&P)2o~hq?1CXMc}NU zZ1o(b&fFxz6@o$mOAsMJo!tWL7$K_c8R(&d(V5^nfUJU7Bay|;t&9>i@vUZX%p(f) z+Aayqbaokpv-RGXBNc>q!D>sZCeY3-|r4ap3SSF?}8co zaKeTnMe7AciC`Nd`nmb4bv}A-0h3pR%42W*5k|=lS6w5|)T0vLykUdK<1)4+*ryRow1 zQ0l+2ihiWNpib~02&iSVv5+{y=i*Jv*dqRtY5W zsIC4l(Dy5i|2OFSBaD#Hw`-5k_kHg{-`O zbeNSlSl!Pls&&<}+Y(M^GMvu8;dDxqIh{=cW46QTyhh-X%)If@%qLi|g3(c`C}Q+x z9?yqho!BkLsnnay|Mty zdae7bS+BlENi+T193+YZoU}L4^nPZbrAa1d);n2$S*W*m9p^sQQP;bPZY!&ljT z+b(l&ufBazxLErC+0?!bpkJnMRiF0PbLYPr?>Tp(=kj?gngT6@OzOQL?sw1h80ntB zs45URLHb0x0zPr1(exhYUlV?oMjH_hm{c9tyNjj@KYTF`VwziYVg_89ip9x8FmLv5E!$E!53p#4buIyMfwL- zop2=sWx+mP$#2Z|0M*8MBi1Iq5xdq5soDd3d>nJcA7 zGrMFM(kBc@nWdR8WiZMBMLtt=B|uCTGNbAlQw_(}A4^}Ay`}r=p#3vI_s@WM|6GvN zKbP5KGO2%vaAc3kL++nCSr8Aof8Ko3?w_{)`{!wau~$UhKatFT_SZcwF}TSh81Eia zx~E4hH+|;I0~?K0_uL|dM>GF=DBZK7PxqYok#x@=->`?0&@jw@d{Gm&$1o9vM37|e zV|dDxXt7!uOjta0YcHO!Zsf~%Pa zXUt%#Jn*m?_k`UokE(9j?_7DJ!05SBR5H;aAG18+E7#PKVV#E*PE;}EYqWeyX*M5g zRNvX`z~kcwGCq3#MZ%~YPj)^!AsHIyi%}e5ROT+Fmr!~eQQ|N#WQ}@cCs;>AHSYwY z;0@8J69sI<8$y><)cTzWz0u4P$*pTu zDo6P&E15*KW-$`cK08F*X5mmC$TH`9c3l_kyhbWxGe62i}^W4t!Or9w)g`ArqgX`yE|B5u*Q| z&A&zi2ZUc!1w#+#3&(m^f{sVlQl8MOqUU@~&%wdPl%`HLDjUpXye40$@im(o5ip&( z?Y|Q+OKoy||4~JbA{T3Nj1FlLPS=QB#6o&7dc2R_Yn!YiU(xi~kN&|qweJA>2iqCq zaP$uY#6?J-)Dfr2BnzCP$=|58`at@J$V$*EuBN!1i-V*e&-rLpeH! zmmU$45S;^6{pLD{>UV`EX5i?TD()P*{vj*Qnhi?QKm1U2dT;#$>u;kr033q;AuGlhutUi=v`C&rk3OiZQ@ur>1IKSu zx`^Xcb^7WeIJEYVbP?;Y`)#tuy%=ZO&}YN0iJl03_|0zP8Lf4|TxqTeaZ)1U8k{y9 z#p1EjH8A4khB+JMI#D8F+rBLmoHAx^oR9qVvUn8PS|W>6m!p}dinUl7Guzc(VbZ~) zUColHV?~^<%$iymsNm4~eTC{#@l~)}BU>sKZ>bwdOH6ypXFN&Ufwh@_* ziIHl3?ViJ#HaLK6vrdA$4w-9hHT6-#Yey%hmo{gfj8BR_&W1GOm5RC9lcsR_&bOqp0QrhzaKNm)?V=8Mt}LxOcS zCJ1}nh+it|io;c?@6PinCN<)pbPPV9*Dwdiavpe--b?*wm}~cI3sr~I?9#T-zp(=j zT&bl;7$&&Dge{a-BN!NkT#LCDo2A~!IIm%ridfB`HZ+Nm6N}RS} zLmPBis`j-GSYLbSQ_K6>L;skRG(b{lB9^m&mk?Sk=K^l(rQiFSM7`Sn4)H!#7m0z(oZ?n_kCH4M#Ju{N%AX>$bV-!CQ6WpIAH_c9P))z(-QqD?d>#fA zRSvjgISWbebme^VZ!XP6C>xLUYY|V*mucMd7Wm&M!H-jKw^-l*J+np5Xu$_AiAN|J z1|in`KdbtRYBafSLb-8z7t?d+MF2WA*L2-){T+NMll8ZC0ZI7E6m&TF`tuG4UoTTr zdIo&`THp1qlPXVHsSC9$!Bm;x81udHHJqNk0$&&NR3_L#oHq}pxT z6!Yb!JU9FDd#6uHVEUw26aQ@eV5JznSY>^aie3w7uep$zK}C}u9DQ`{2&A6i z37|!iG#FYZ;Sw>)L)vs$r1{5)XkO432!9!?%rZ*0%{$EZ3``Rk+Ce6T$(b-J5}A0( zr1(IY(BXC_My%2suSt1wCX>me+{t7_@0twuJp->UnRE?Q^{WDO3OSjVwPXho2Msf` z1qE;{kg*{Ufg_jNiO)x&_}@fuy!g^R=eyW}1DvpS3b+%;8~oi@ux8*uCnAJL7@mCJ zj5UFlbYUX*_elYt*_Z;J=u5ei{`3et>NU))*gv7fBIVjs2pdXp-d*^=mzf)c9q~L( zSdlc#{8Bc>Fs~*%OXlJ%DcgEZ7owOcjwR;pZsenp*3QF)Vi9}Ue&<2IBeuzfH>zK7KlPjSbW;6_ zrG8KR_CWQ^7Ndq+nyxn~6uO@H1jj3jgTyd1zz=i6<3i9eDcySGFv+M?#FK3AQk-I% zGY2g!Y7CgL(6U&u2PoZI;Kp0R97~EJQ&3mPYsGia>8xN%*)x2p_y?gd84wu*v8*j* zJAvw0!B>CvX@IVK)hCoxA2rR(_;D&WMO8~`^q%X#t&v@y8w5X08nQys8tmV@EH?Xq zixl)cm#xtQR!(Rhi}N3Q%!bue6T)z?5_UFQ=c-Yak|<&)5+p&kH{=!D$;q9qX|h-( zmULMP6DLl>9K6G^FGkBc89|DFjjgLW_f*JPZB+YE$2pKjc%}aE=b*pT^~9*}dTaJg zt-_xaDa`5K0oJ}j>1nPfv$o)3sG`;abV47X?HcIs0wgZ00C{fnq~w%tJr0l%ig)R# zL7h(ulSWXS8Jc&*5oT9?}luQO#H|dftqol4Idwsi9e=C%4 zm-PB}w*HnY-_Gjw?O6RSQ@$P3>sxG~YJNN3_QKj;8_66k!QM(fcP(y(tK zMvqSJj+>~DRD>?cqfSzvt~0~8_oU4`sTZU_0eQL&iI%nKMT^i@X|b_ALP<&w=s$1}gQN@gLy z7>`~i?<1Iwr5Z29a-P6Mhr5vSo`+%RwpJ-qkCYLa@0PBilj37ue8l^{W4+UyGCh+u z^pW_AM@o*MfM?!d%~#!!d z3iNJiq|!MmFqpg%c*6kAQJE8uGtA*_*1u()Lenw0Z&@py)X|*J(^AViuWwm*InePc zQFL{-^_YqPNgusxJqeRaWD0kh?bb!H_p7fdmG(IqaSm6nVw+WEQ=pG@pcBRJ_K!@L z^TH>(Ie#G*>l7Q2@PQT9cl(yMZGc7*OvW;H2Rto{w_3&puR*g&P|F$HZtEC{0-+g> zW|^gNm8vloYNmORM~`y#G_*{<}0$-b?#ZTh=b*l8UpF9Oy)Eo7H93 zMe*_;S1~{cJ0tAUl3z%M93CR<9HL|rLnrf1Z~lZ;7`b5=v+ds%el}aXRf|(r`QNiw zRTe~g;xtv?%?^`b<*WLpKvy}?rUOPq$m6ZwsC#a(fkJDXgco>IV*Vq9~X_w>3={B`V$@*=1eWx2!$;WWH9- zdiJYS4Bpnato06bN?EzqE`+<^W&#_RMtGo#+~*ORQ~u@l)eu6wR5~tJti* z#a?1VKZZ!<^l+MWn%%0(zS(~#k^R74+5bsVQwjz@{o2MsB6Be4v5rl&%A?}jY__ud zmh~?ubu{NUU#4R4oW5l(aiCMm%C{a>tyUO(T!p*YY(3~80)Yg^Rb-NYlRBECq9MmK zNw+pDY^1d7SO+=|#+*C7&?-%oWl)yFs`aK*T7uJg?JI{iF3M&ESF>Lc@7E1hnDvA% z&Q9v$?!GO3D4sg!r(dMv<%GU?)G(Z$y~ zsiQfU_A0AOVI!qo#SU~zS%x(;QPwq-rMvh`r?f;D_k7{BEVYXpSVVN9=Dvy6>_3ra zw-emA;$S;3CnwFq+UrSNzH~f&@x@?C!CF@IVo13`S(i{{o~>Vj+!VPb-5M7!J15v9 zJj51RTYSyq;O;ikgT26WZSW*GdUATUb%Dy-B{Npp$GWuB`j)nRuY;bP-|XpC+O)o< zJ#2#~mu9_8uW`^P46?Q3BVE#J^=hPyYDe2}Q=+8I-X(pi(2)Y}wZWan3nHxRbxGo( zSYekmxo=5XiIN^>4@FX4)2&F~l3wZ2C`f`!v(m{{1?Zz;R;FFn&b|owrJWc~;P1W5 zI<9Y7KeoXoMqa#M>946;0!5=zi3vjJO7>HBK10EA=}7Cx;8z^_62Z-*WmK4!fY}M} z`4|#YxAp0V(hZxf5>?@p@nr3Zclc0MVYS@&M8RR*py(t8{3#op+;mTj%}=x5RSjdD zzhZR~eO~QjeZe{%>XPC&Ta|r_z1hhJT~pFbYmX?@BwAELsI>*d&KPrhQrqRW*Z4X> z`&F%Izp7|TgRI?&_J1zK5pNgmSN)s<{*(=#1V{T7o^<=WG^V%qMcwU*lK#mqfTSGK ztpR;Ynvf{TFmrBMkBFwmy&$~Q?$7J{mXez&Wn%AA9#qLv+SB>DL+tT(r&+_D_PlHz zO?z}FKH9gezd4Byxf9P}RZ4W?{_IyWHDYqlsu4ag0{e4cjS{c}3q;NzhA6J&{A@(= zSX(`=X6A-iPHB@Ed7|SBty`FUxy#zFl4p2#w;=UBy-a1X(dxQOzG9Qqz-Y z0ljiMwuCC!-r~zu0)FIT4;wC#w36GK4uWsSW?p({2MdzQb;ur_8K4Yo$W^WmI`W&4kcePtQXJ39CT)*&usUb z-F~yjhx?$!t@$4cQGTPgzgp0|(j6>$%~iLH4`t?s`Tn}~Uf2E2wb<;rb=7X?ebBtb z<1<@BbuW5dPc*ykZz^fhds>00I+pF;=vDmb!Hm7?Oya!+q#T3J#OmVk=_fAaT*5|) zY?2wkE)^(Qzdo1eXZfTy(xT4ep=NzXpD04Jd^`5u&=Gg`>F}A-Z?m{Y_XmtI6ZCo zwox)8yNZ_H;i)>jtYm&}RpyHHt^s1I*#Erjh>#Gg>NK{)aKiH8y!%$B3!DV@Fr6*8wu_xdvC9mkisvdP3ZVzcEz6RPdb4Qt2KGShY^LdQ) zG1+A_z=!8n`WUwi;dH4zmd<=spdRfL!~o)!wa&Jl#^?7``vs|4-GXMILK)@21m-yz zKUP#R^iyap#vY;D@WOniSqW$6t{NOP$LCrb5Occ@wU( zQ|8C@-9moaCc^wXjm^rrA`!^?r4v2SH_`p6iK^{HHu=O2CCW0=c``ZK_*3ImZB;I# zLAkPfIpu`-8}jzbam&y|&8j8*qK0e>n%7)cruNcT9UdJ0Ln6c7sZ1CDP?Tss^Z;OH z-M##lnLoJ$Ev>5-vr5`|5{uqd7X>zGK2^i#A_fwJYIo&%iFkR@;l<*?MQE6eMjYHS zTjHasF*tfA19?ok;VNwjx=L5$Q^cl-IqVzQ=L-Ls{DLN$u}j^u36KMPvZX%)<~>r9 z%$hTO<~DH;nCv%KUudEij`$zA&@3B`>oPvW{@<#pG2j1y^lUj3#-9Z@29XoIB$rr` zrvIigrN8AVU!2H1a@Oact;qz&tR+(9Lo^Z<-3+NinK^Ni)S=9rHo=Gy9=3&zns&LO4}3!gu7SRYL2NJT)7I z2|>%r;mELc+qSO2So@q7zw_naY%RtcF%T+XWR0NBEo0zPJN z`@U<~csJI*&>q(?<)A#i(7~BP5YbJYhSv=SQ$~29J6835nK2*C^9Uvut?eLZ;bPTR zRn%?)^UfMxsYNWahkSFHWVb+M1Y9La#&%}gVDbG{L&N}x4_sYX!BVWZJYhb_RR_whx=%ac!&S^;>ZLskq-7PXf#u35+=pM#?Q{HFCbFp;cU|^ZnLD+U z#FZ3+&G;OgbzN>&*yzL#pV<>E5M|4*pgA`;RItZ&ioYXNu#0%9h1NnC9@(?WCNC90 z$v>GZIJ!PJvM`^Rfv(zLfgNUkR-=G9B+rOW&ktn0z!|39iIsq-9wQR!AV;; zyG^R%z(cR%GoO8fDjq=!BcL8%c(9*n>R4(OXrn1ih2(z z1U8*)!22nBwyBxLPpqAsTg-mx!02*M<&hyqo=hw9f&J3qwZ@t_g?iNRQWWa3Rx!(| z&yGZP2>H9eLH^VFhi+&l_SFl@ecE0NrtfUgCBJz_qq!j3(U0Ax|^~UH+ zJ;K|=wCQ(MAD-Fh$_H9-$Q<8KfLKU}jOM~OFU%DZYGd7K50?BW+Z8^S#VJ(qmEwzj z4uyno7a_rw&*6vBiERK^wOJD-*7hmlGc>U+n0`Bp`4;`*G*;JOdhON&NT_N=RL}}Gn(eL}C?ldPGZ67}{2Yg7 z%fZ3w)L@7nTJO4n)w@@wL<%jG+$3YmPmai+!AM1RaP)lSPnG>RKj|dx0EL-ig8>VSHfk{b zi4-;2sW8*bH21bMZLkyq2t+LyEMQ17Z3?u6P6D01jCK`Qb{BWMU;Xal%C7aRVnNiT zrA^ujEk&SE{zOE*A^a)+=^v2J_x(BdPLig8yRYB(_3NvXx%bYw=RD^*&w0-C?>xsn zkI^hPsju|d^mi=_MaKCPJjI~zdM(YMkDSk3u@azXZ40lxgqx%Ax#A;kP>3sA;bHWw z72_CvilxiP=vgbHBTAI#J}xpU4TerQOpOA`;u0h(cZjtNB2GK`b&Mg(Y{J<+OcbrPG;7_x0lazFY)4C z)f;F~5Wu+Zs=k|>q?X7BsyA|(c7C09yev>j=Z@+g9@%*^o^tG&$YEN2Z}obfg?s;q zVBRplCd(#20xt4F&IENX^hL<-@<5<|UVXUt%UrND1-B+*gW=rgxU-Awc-9LT@S6S%^|gk3xASA#nOkiax7!CFk-2g9%i={987DAWo*}B)cADirv4hQ3 z_UuQj*kj?oTU4VHX9?cp3?*Wl*Mywi31>&b*_=CEvGP>w&V+OOt-Ei-JQjSo`0K*F zH^0l!7PIrTc(%>i!}8dgWX&fxgH3qiLuuz81GEyx(FH;h^O?ZtCwPxj4_E9MJwEq( z1%b54azFQ=?z@EQ#%d|OJ{nAGxCr?GYZ)g6y_FxICh09%!t=CpetX7Enlr`oDhEvLKaC8oGNi@;3N-DwK z{K}ATCv)>-AFpU_*c%!wB(k8>EY&SQWS2Z%@ z@~4UE1XICGz?wZTV$a@=<{j>9rYNW7FeaN*vE6H?qNXE`<_=Y?{D?OmZfQIcg}65$ zUGEzGv~Gmu(CFC-3THn|EHPc`R+k=wt#YX@?a(E&%|@4J5@Man5~LG9(+z zM!wB2hIow(#H?tzt}49NF;6|JeR>sC%S_UWkH}J3RkVVzWoJ<2Tl>15KhHzNE}v{O z+IlmYb{`D)NcwO*iF)nWquFc!D&+>&O{UyBpK?X8O3FM= zsNvD~`E>g91UlWFi9HzZ(^6tyEO86tPZ_Xk3A_w$wPN$DR%X4vDxH+x0GY1V)zV|&(AIGdS{8t?ad<6Skm7bQqn z&hm_4a!EucDsxP;t`}?x9+-F#JHCM~_xSqDddmXu;WLAe&1VT8Jsa&S3;cr5zw-Gn zKKJp_vkK_!v0KXmJNdlE=M6r3_6t7W=JR)a{)x~1eDqBGnB8Z~2$~ufgV`D%f&b!T z^W}*BupwyeNeRAQ^p?SuOHsvCiRQ6-FE75PWw3X9 z*JToAC7UDlYHb~MG1O}i*&~P8dsYU~cVK)xxq8xAFt>X;6^C1wL}9afl>vESVNDV9 zR_zPKV)h>J>ZW@%(6v~y%CNiM%p=8!EtMFJY{EMduMIF-1dHFnfO^l-l>me^j)4*y(ZqHtd z!Cd5N=WY-_9s&=(93xvU?gsdi<(cRt(k!7@LQ&lqfeet%tIEj#nL>Hc=CCt-5y9^gd0H{X78OH4+)PL+6Ow9$b+GDnS{YNoF5`HvFzgmWXc`r zctwEM3(1#~i)vdJ7=;ES%vd4@eP;TIXB|&fmxdatrvE?J))rT*S!9|#s0UF zzvs~Nn$HaN`4-D&{2iP<7yi3(;xyJ6(=jNiL?<#apVPIX5TfZjhS){;Ayc+cvEw(Y zz2pw+5v@VgPa__$e|vWj{vdon`SZv2=uEaRl=SbUb9F0`+DXS=mg;-?zB;N;Y-;AO zGr+X(pR-5Kkvp(+o>R}5qE_G0PXdkEluZ^cu1bcJ1B)FDd4Qw{B>3N82+4u$)E%qy}M9OydxXtasKbE`t=EMHTtxdyJ*>B{HPULQ0HYMB#7Yg{em!tzwrnmQ5+~9+UoJyb}k?pq%pBYX% zmsHK!j=7Pw%pZq+B0|RDAE`I(9G{8p2(SGL*C{w>8{=S^aX6Q8I9Cw%Ud|Z3&G1~r zYvaRX>n`YyoWl!91Y$1XzH`Qk#2LX4ctuJhsNrKb1%}DA-ick&9qtQKgn}u}oPi!)?&Uu6hw>u>iIBZG^mZSe9y7{mPFVQy1 zU1j{e-uVS2-Gc=ymsIgHb+`cWV-~7H8qIq|?}H_^#x6eCz?pG~6q%0W+fO zWWz_9+~eDo#&9F{1PlHORQAk5=@ijslw;w|1qA_ zlrw)BnhyN$UulS7Zt(pwNJV%CkMLW=QEBJHW^d39{F-u}-lS;>`uAS7-ikfZS-seq zKF6FdFkFF7fH*%Swv)ZtXE~JVcqJWstn))f{0C&xt=(glEveEQu_g6jtFfL7OJ-AU zbg=}R;6f}9j~V!W>Tv=3jMuRZaA6qVG31sJr(j)}!2N-hVHpt>3$}G^0A(?+$OH#6 zPAC(5r1OISK^B*E*+T5U1L@d84+1U>1UUW{TIu{kOQOG3))=uJtV^+lQ9%TF_;c~; z`VZ^$pibS)FRaTrkxa0e>HS!E?QdazBj*`*QA`6yH*jXc$)~~BP2jvhik z-?~{p#GZMV6I!Awo*hvw9{`cc>c?nm7#YpC9Z4^N|6C8s%I)dr2aQ&fVVb$IR&aAq{ z&IO+cSY;ndKp$q_=NVc}E9#QNMz4{Yt~BlEmKEmTCoS+weor_D4b`wU%8u>n{JZfL6TRN-MQl@rq zrtIQSrs3i!iUEJ}7e{%H;chc>btoMy1E}!YPchD;xwP|)Ui<>B;o^jo6xrI``H@A= z%ukJVZh<;y^bv1<{>;n=*;Nt79_@Hh6_`HS&d#^$V@4cx{xT^K@FxaKoorj^BpHk1 zP&&4?^HQ2;HXgxnTkL{CsBtMTFx1b`_#-9x)5`q~@|;N=Q-sUD$b7`*IHD>{*X>%@ zF1s2GUo8ytCx3NRvyb_SPoXs1NM`?NI5(gEk0Ede$+k1G%_Rix&Xk=G0?!u$`9ny} z(Fml+;kA#7TxY8mfC%@lH1%=x&hr&^kgeAz|2OIzLY`wlrqF@#+M=f5vA~W^1}@|R z50Qh>&k~NK&^VAD@q{K>C)3W0l0ovfS8rgH%$lBZ=Cw-Pa5|PDu_QUOz;m!AADM&2 z6-VI%#;7d^cD-|OkyA~0MX}IiInPgy?)xUWX#9j{-u^O?4^^IX!ma0B3!?3Y;2fWVWH*uMuYBa4}t z6G6m6SdkVqdcFX<(<=6Qo%Wyt1az(d@hSm^<4`*G2njN=tjw2sEiiy9y!NAv@W^sA z%wU+e^9lf>m{CP>KlkIbNCYLTfQChd~@o8591ErU!nSYzYjYcP_l@ShCaFAPD0A}vM+ zi4{Y88A;VH2(qkMU}3pzoIR9;>+XsP@fB zK{basScLV1__&EQQKFiiuAmNxH$s=#T{|qQr?W+O>!Uq9A+~Y<0KICrS zB2-*P)8mo)Tk-TDx|0_mqgZWX;Fg*gxNCQNF>nvLUw@0YzIyaq1i_iNkYmP;7$kBT z`5?7G_2Jwcqa$v%L7E2fugx(l2CcJq8$EC&qF0SL@?~93o~=9n+H8F`ytaZDN6ypR z9L<#5s|&KQ5B!c~Y6KfjwPS~5@4~L~SO!Ae?*SuUGluqpLjqs45w?@Y=ssl{-`uBN^_MQQ@zExtB^NfVq8F z2}}~Io@?s7S9KoY4=!smX(Zu$CiL*FR*#yV2MlGx+MguAvlaKe{Frw`tZ8Jdq;v7I zWWKx_*Cgbh$h{#I8S;}`R5LxJW3N{5!1-`>;(^>@`e1JYcEO5zrrpg!$f6B_32bPOpceA|1e)Rb33lu!KdiX%|*R@?crbVXpA0s_Q6M)C^~s6_^ds9A8i`>?s;fHMlB`*8aL(? zd-fI^fdV(mmDsjW96eX&aY??%e3_?tzLdaseUDJ0kno+EwQ6r2&7*+AB@dG9{|}?H z^}F0@&ySvfk_~$T-s*5=0yk=&`-)lfn?>xg8OZrPHuW zo9sOzmYSc9s~GVA$*HU5_!DH`Jo zSSl<+o?kSaEUej^IJgBeFNXVaP$x9!Y)d(R5y_>opbClaH!^{d9e65a`iLZJU&u{^ z$YH9J>oJQgbkL06pT|h+`y~|q8yDiZ&VVO|KvvEh*6brRDbu+m4Ix;lOk<)N(`Rv1 zO~R)od_Iv~EC=d4y3Fe)BQ@KFxerK#{=rHjH2;#RfNb5Kbjqs}p0T=ceif5-N_HU- z8YPL<*FZVF-iRqe13#Iy7?ZJcQ`vCICAi|w3C_#gML~U^&?w$zYiaYs8IX?1r}P{? zb0G$yOvG==l6>ngxKh*DXt|;dV>g#rgb=1oj?-E1p-P1@rwbRu+jl~f2Cd9c%7e`v zoGKzYnps*VvK+(bQS%jF_ulLh8KaN!bBAwxUKYkX6?reQ;Llq zkC^}h{XW+s(0?ous9npZ5!_eZ6A9B%#lp5P+jtYdaD50JbJ#G*+peMr9g%a^`4!zk zZX)ozM3U9!Cn5;8y%<13-5>qKGdSspQ`N^MuW(mIxGXhx1o8I5%i9w_S`pnFR zSsZ-So7pts(dAa3Bn*Tx# zAF$BWmaPAdiJkG55{TB&EE3``v|^yOA5I71_FHs#LBJ2p7|WZZ8rvZVnj{iLZ)nTcWL_D!6!te?dXRK3Nyja)@;a*J=RetJvR46KCsRePi?R=}o4^RmOWIBnsEqIK|5Q;9YE4J5R_?PHa}yA}nf zYyGy-D`%t5AIe^sAUEIS1uA^oKJ5uHY@mBG{f;kE%3t6wQr}bgBIm3Qpj!O7<6k_U zm7krC1v*d9&8-fY(Y#;N5qxNJ(I=vFKOD%^E{_mOSIgebOn!N2ZZmIR-X6tUi%+4zX!JB}j~ z8&QHhjKnduiOq33jd(p4;UX+z_=z|GJgZHA2&p7wi4OJdF^In zhsI%bO~=34&JOoTMIZo*G37Li16#(ZGnRf=DI_^RjsB5<({7#fa@si`y_2JLh$JW2I%7{Rf^>m5@=BbY9Ci zJ5U(^q%PaFf0(czAmfyui>;m55*W|2wb` z546$IlM*_EHQ$aIv8mew#m>E0mc0kLy&5&1z$}di_yyf0QVVJSUAi1>ZiTSwIm-m9#1>}>z1u3Kc7cJK)9@7X= ze+EyExtm~c)ZB?`N=1!o_#|uePoun$cJ|yK=i3;}(K~k<4=21N5lIL4NyDo*u0g5& z`M;v@vr~gUHaYbr3w-oJ4M8@sL#My-xTo zuVb-R4f+|K$Gz*VYao)%w69IXME($Y-X_-X858!4E5tK}a}ZOAl6p8@`>;K5xPsNS z>jMkr2lw{dQ9j);vrZ97n5O%2v|)25D0!gcDNjb*mWgfaIx*7_>6o&DW5aZ-=^gJn zH38OaVU1nJzF)3tWGgrUH@*7K$T_FRF9k)f2`oGK$K0D`*#&~4ZakhLLoQQ4o=A&m z!^~4Bgb)px5TflJPZ>LHkY#mS*OwT&j(IB@N6$6=dAs-L6M{7Vt=|6-@2{NFNeuHg zRrj9K_!k?y*@TLRaUr9()2%iGY*2gD*u9m1zsk#}Hb#;6#d+B|{PQSZav=oYkn9!2 zkSJOR{;}9B|Nq$%rFXeM{i&yvKhfR1m?>~9JdB2y<`%dF3T@~RneOiRlc5%SYZH{JQ4|jaqWSQ9IR?z`v-SbCAtC?Bn z;)D-JvOz{F-Ov=te%2e%7w-dh73Ie+5Vou=I}g^=jXT{8Dvwv3g*O1lyRr8W$k%Yq zlt{MHpuNcPzne8k*n}5FI)-zd97{c!4kqx!x5{rImS9p5E6G9d=YG)Js+!!*p(!GX zi=yz>D);(OK%r^6+vzUzZonX9!On65KH-@sm=lUiSrEu-{^=es_rWs{_`z=5l>4AY zT#`y&wI>F#&g9B>{2`;9{Te`OnhyyK4J4& zT}92+Zn&8}hrJ{u!p{F)6i7_=#4-Cc?vzJe{9)0K91sbpcnBgw+Ny(aG>I0+OM)D$bvy}3-5v^+t+`DR9^xyH&z_|k`FO`ITVIX|JT zpX~ik@I=0`PE+^NE$oQljRbF&o|J~r;G@~#8#5bHi(Qi<@CpMd-~CCgc0v)~b^24U zKXLa1)65R!-v6U{f^br2HRP1GJKVRye*$ghgp6?CS9szqi6VVn0KFyPK!rq*&DK~r zw`hz%NtA3fko}UbKSWSxx=_9|*BuIETi}N^>d;iXAznXPZZ#yL*+$tidG<`AxFs2T zs^j~)*U#wufLv(>0yl`#G{GjT4h6EO8rSo?Ow>e#QXQ@l=$kJk2LV{&TR0#$!kq1x z@&w;ZPO1fA^?~e(ng;!m*p80a8!X>_Diby9AEC4YpB{9#B(p{08*ATenOyj1C|Q)NKoB~n*10xqjGLnb67Sd0=e zL|Td=_DDfA`?|^zVp(|PTa(~1w#=x#D!i)~vMb+M6r75SFP`FMsqU@rq34KA${#4| zfl`Xi_0R*cnvW;Z{Rtt#5uaopLQqk6*N>%DE6zp<3u-iFZwCilgOG)xOzlt?czE2M zc3&wY%jy3iU(HeJ3QpUoHjhnLu=(Cnx&rsR$Gr4g43`sL!c*hrI4JOvtaYX%WKPVa z;48j?h7H~7F-eIgWW+Z3=ZuXweJ#u@eiyC1m_0N1`n0ao$Sp1u$XFeFBom(Z2>Yuz zAMBCQ5tl!lEd5F%H$a}@5xpS*sz~b)xiRc0$&0Lq2G> z@#MBk>_i-HOV#3UCJa7NxSGv+%fy$Yb4eRDs%yHi25#`{@}%=;(y-XMZNZc?Y;%S& zF3nVMD2W%S)#!h4;%LTs1uXciM#ry}!z-aq4 zh5c%r+iFf~%`?^VO>pBTT0nmJAJlh_sc$ztdQLxUIOFGW9&sIUA1Y5X3zlaf@FBWD6`C&>je+fTa!Vfo8kplwO08(#^5;6!6CdEenk{UaIVTes0s*6=6 zoxdV#GD_qxswpCe+&~z~xgZts>{f9Wc~kA&OZd9~)7Z@}sP5M6Z#8&&cVm<;hyw$Q zmLRM66@ipK>+iMvz|1U8AS$hPe!dmMKXW&PAbvJM9A>E?p^uSCo-t0ACEVLq zA)Yck%8zU$67n3e!3}3eiBnp|Vffg@`}g0uvXmWfEWb-Be_1I%Q60`j%XwW`YA1Lr z8MC9|9CB_T-OxyGh##mw&Jq*9*|!HH9y1OdGC_nr8w$gHf2ZG>-p5-Fmk=+!mRtCz zoCyEPr3u?r9GBwjua*s>J5^YPLc2U_J@Sfjx}$Bg_be62E|jdsiiFR8OHwv11TF$f z-4uOCNBA)1ND#rUk?AbL$7N^v#bF>%nBppXswtao^q5Pt#hlK9Zg#vEv=3Fb5U)A6y=ReZ@sCZ>mXFGeC^K-eseV5fjK zXBQD@xf?Z%O;y<%e?DJICta-F1YU0LD2IPkDFwQM5qGF8=N}bsGs|J|tD+Yye}HqF zh$Wf7F{Eh!K~N`F(wr`=tVc~lsz?X-i|@qO(~YWRLpKrLvU@1=gD0izK}yLlGOS|( zL@VO8U>$q1u#S^NvkNFFRcARLVqKvrE~=Hh(By9*W9Grx|M8q~j; z`>5{WXnbm4?^;-#A5s{#wx>6&vGkk8Zx%NS+2v1uP5tQmRm$vO=a(2xrFkSu;L@2- zP;u!NE@V1Uih~=MdCdBLY^O^(*+&pOkW&|{tF!59b_oP_OgDdG#^BPb(MJ$#^lRD) zx@Ui11EWa*{gWf)R32KjICciKHJATj=y7-YMwNOS;Fu3Se5L#XUMfVU5Hs%ed}2Dh zgqzl69urgEj%Z{S!5npDSnpjLqC-%w5&~F-4v6qT*O^9nrb|c)Cnby+V$`WrAu$bd zi`$M^aKR^gaee6+j>*G-3^{SpA=+-%9`zY3u(Efi0}}9z#zHn?2Y&<6O7!Y30+# zr=3stNI&4b6Ydm9cr@Swf zJ%A?xYWm`sQr<9lD`jUlBjU;MWD8=^vhxVm;LK2)F*Rvv*37@$Dz=6UBYvy-V2-m^ zdNOA2lg{}}%7>MZ!(HDljuG%@;-=g%aeJ&YYWJB_oTz9Y01 z5><0vcAlv}%?gwJ0ajKo?FZtAtF~Nc4hlO?XY*ZfEv5uKvm(Ppa9h5qO|yj%&?Xv~ zI(lxp@Wplso;9=^^-tiG@QcmLb{2Cc!#8&y$D%w$+o0k=*EHpz;mu@zWgF)ljI}v; zLYu?asLieFeXB1anGLS=hf(o@b^T*H%-HCUEdRX<#t=@R4%93}Fs!nu>B0#F!a`;; zHNdeCoBr<`?|sWSB$v=*dTV-rp@F2SEw;7ejM}YqKKpKE+MpT62&){sP>X-RmkzEU zf4^hG`={uA^&_r+zhQehM)+5?M-GQ>(ZLEz`OmKVIHZO)ufEMeuPYXMp(XDW?6qi{AeKqR`e7+KGu zK!!%)J7eZ2B)yXP2@3hSs3M&|*QBnUI3dEJXW?EBL#C|^syw`HXMbhiKR;>L`D~`sosPdZ;96qrfNhdX$ipi znZB_}m~_|%@YukMQ+u8-lQTQvJTov_-t(KX{I|q563*5EcS>~`mA9Ge3Uhr;;=tA^ z*)TY3v0{m6XGP-Pve97b>;s8gej5l(3pjQ?AxgQ!<=K3<3xP*5+s&7WOxBv)9b{Iuu1N6=YB#bm=x}&KS`%=17CjU)6TcV zseAX4=d%|fP}xaR&fg?*zkfBiroKG89CI>6N7{+ogg<9KgK>e9UiJ;~n(Q0RNzFH> zub+EvfkN7tNndx9ENkD`$PZ}{i;7ARVu}Q!+O77mYcY{DjgO-fBWF>&>9TScD)WKu zJe_X1swI3YDOfeB)bPx`6We9dTSCa&HBDJHEm$Tu{ZPnj`YTiUov_Z>?Z+zHYevd8<8gT;<@_mGf7K_5 z3S*>f=a*8DK9O-A6v{aAZMUZ%u;ZytFQ=zq+f|6PO?DJkxrFnD*y7-c2g(uSu^C93WYBYo4RPU8R zB(;f;zK4`VitoLAhWWTk#K!&;zgtdVz@1uMmM)|=Dd|^piQd;?hI78rPZLb4`gJlx zPoVUPT_%E=D24fT&NnttI#K*gGs-QCIjwmaB8Rkby255_(#J{nL|TJQq>St#Mb%Jv zMrJ+td%54B^pGxxzGXJ1Nu8i6`##a0H*avel+YJmTPdbg;Sr`cRSxm1{4I)^gdR&M z*iUz1Qz?cf6XH9rT-(QkQ9{d7}CVQqNkj-Z1<>w=il?oo5Q1G8raP&edZZ`}s zsoW{!0vd+ji2_>EImA2*B?jlWF<4?ae%s%fP^G0RQ+>rO0U{^K++E1@7;5^~1`YY6 z#pV{WFV$$C7W*O4R=hpo+()R8`D5nO$W{eFI=>(J01v%NJSS}Vw)u0J6DP5AqiTXT z$b?g&_1uTn)9+aaPBNNUVL`lDS$CyG<=xhxQJ@G94k=7H-%@qeV0>1fY+L+{0I*!M zZr+*sOXA7==aXsYKZk(S&M);Fmuygj4Y(Z=idxRM#2eDi&ZP6Rey&m|*@dM&#pZU) zDTuEmNjNbu5=`{`8m^h@VVgr>+q_wr{rx2kkJBXt#!D{fAD)(6H}8z(x*JaS84OvR zqv?D{4Q9K#*wENCk*H&!u_^F7Y&gR*gF5 zm7VUHG_~G#vNb;wF(i6cpAlGnQc^+o5ah$|dUI-qD1hA%#pZ5}n-VI}UhYPy4n97p`QLEP=B^WpgP@o|hu20|YlF%FIbhk8v8_{v zw$16<{zZlItwhU_UHJTvdqtCm`*EaG%l*%PV5_5@Sa;pr(MGSuxu!)U72Gt0IKt}W zgY-TQvICa+RV(nW6!m&3EaZ#S!tn=l2}?$_>7vkeEuEh+>m%-pTtX9J(R`E2B~iBG@f;N_WN7aG@#>ojAYuAxpYy@bweyNc=_ zEjzhxnHH+v>JSeMi_(pzvRR!oELCjQu5%vsmZ@;>PJZGP!mcmc&L^!xQ`MY(^=Ejr zdsp=^y&7j)-vpB-hWYi&p=D2jy$Y@*{k6o~h9(rd#uW3QbMC6vDm9UYDh!#3V~Ihn zM#vAb?sav&1%tW{*gTdiC6szV_W;N)`f9PKG#agVKRkEZVACX)`|$~2J$4z;Yt{H1 z6*${Qeo1xX4De&E6B(dH3FCc@O$<$F;(w!_UEK5@F1an(^vGRT>#VS|`Csp^elm`am zCVP0=*`IHjZsl7d#mbvM3#-mw-e%`7Zzr5D?K@UkD*^D7f>Ob@VTT1sCIGVp(Xs}m z+|a%IK91pFcUcPxv}KNg7`Q_qBP-?4Ii%g45=@n3Qt*qJU=*L!3s%p-GME&&UP^`ts!f2eqw)u=*OPFEA5a}P3aNvPBL0%W@<&#veun(w#v=RNqt72 zZH%OWPz0q&Lu?w46CZB)IbKX~Up_J^d{V^sLvEe zy9YdOC$~c!QM#A6L=Z&y1<}Dy&=d$BApdPB6D;P6Q?g$!^t{Y{046U(f}Y0s26Vm(y&0vZLP6#_n1Lo=)F-0_YFez(lHrcL+mAj#iI7_ z;4~9_Wh{P;{TZ3UwfhqXQh_Pq-X4CnjWN$T=aq~zmsHgAS&tI+XO?rI!)wwJ#eV`X$W~c{^V_MDVH^|7Cz%!BGogxKTtpSda;AKmvgyFln-CoO zNx3aA7ZYogtA~q~GcQrmTOQP0pTJ!wx`GXVh)*<--SSeufu&-_C#RG`TsD{|^9c55 zI^I*giCgkE8&1Ji0Y(c~B=PeMfrhS5n^tc935CqGwhD9OIKZsf>-eEww_>m2r+QQA zx_aSlEZl^?Gr~;JT?$rEKh8+w6jP{rWs+2-Fpbg~zfj`-(UXdmbHIl#gEssjwj3JU z3K2Ateym>}MWS}ue~!n}ecz{5P_rL$#<<9_!CneEW(v~oZXFxUH@0qAE=bSNB z@}ee+>bc|N$ZpUHECzR322RKBj%^v|A;VuF`sxrHBx}?RvENhJfV4d~eW*;=pozA< zd&6fQF>6>eN$+Wpy>H|l(|3#hwCWE+yWGJrvmVMgLD)lkkSlQxp)8@z^#?M}t4vFF zn`hPG1LbFzbzN>dDXcuzXJ-oOYK(w*IMU8s>^##nLHUse)75OWYLBD~gmY=evk$Gf zK7!39AB25zE>8+Rvz@HIXo&R6o7wxBM){y}u;sQzWY6#FRWL)nVjJodrv&ytCgqOMM z+#la^Oz|m`ibE?aHM88asM;_G_p576yY9P>ZS2APv5g(V(yI!rV;lR%q~Z(<2USz{ zfwgKvtjHbkTl=ngbX3vWqq zww+g?sL3eYLB?4X?>M%dUrZ{lUNKd6xxX_QFufXGZ5oXhJ58g%JQg>rCe@iQ1YM`9Td3M2v7g_F@Rhq+IjG;o)Ygx&W7~CaJ-W|oH|@83&9Uv?H>o)7Hr2!T z>WL!gdLQq$v$k4=6TiV1@dQ>lNwnWC_iJEnq#(XhsO(S)o~_~8=uJGx`VX$vgXQku zA3HFOlUfy!Kk4je{LQQFCl-kM$jklBGGXwTq8!?cXL9 zH)#!axi`}Q6R;()qFe3sAB)RtC%uTw+|+D2(_QC$_8W!WH&-3gv%i`23O(~)G0Qd+ z|A}I=WlS$$Zs}O((dmI>TmB>RacSy9r(cFnml7+qLt!}IbQCHpkbh=gJkJvMGHiF3 zd%tP6HgPa%zi4Z1BireC7aB6?63qZ%Xu)Jk5`RbVsbAv zTZ8etj}+;6zJ+D4jrFTN%5g3%B*^fC(k-d`I5@v^hta>B&5yX4|IyVq@RLnOu@&Kj zww|sCVR;)nxb7aag<6{8-KN;NffdK+RtEwr%73V%?e<}#J-!#G)d!g=qg8*iH#_sd z=CbUGx%=vQKt9&9_l-{Zp-hBtBzg{(^)v*!2uWNNcaJ-jb19q3@mk)CNotqd@E)zg z+j{reWK>H#z`B*I2a~dvO_he+(;W#&AP6uuhO{`>NUEvRpL+d?8-Fi$2&9x6vNRGI zO70!0w*kq6_=1hSO+E&5Go$#K8ZFu?kuGZ}OGNNehB9ZJdsZqi%J=F(;;i!O+H+Iw31HCPjzh$xH1lWvwwiDndH4sN+7U2wb;?tRijn7oHzl>Lij{9JV7_4OJle)-X7uAA9jg>NOKXN4@#XXf6`SaQ1XILB&|-)#M|*FV z%}to*S)4?!g@p>+sk?OKx>IOIS6*-h^Dln16?CJJhEQGa&A+=myQ5Rlch*q~4@WGS zKuZY4xjqEelFpCzx0T}w@D1pS@geZVd1itg9V$@32P^`$VyF95r8F>c$MCJ2m|^L{ zhwqhrBollI=L~Y~+#a1BW(WTwMnd#v53n(!j2HaTXLgd~*;1hMFq%_k4^#)jXa(0) zUyvxfr@EZ$q@H5Lpam!w9Nu0X;YK9gW|!SjUB!*fJcybS1k^Bh)3Mjk#0G3+ zYCYD^b>^KPRSOEUH}Bz63H_oH^`?S5j5KaLo0SsHuS376gk?&UVt-?j;T!5abHI}nb=aDQ^~`IASCYCvM(hM`$WDuRpZ`Gr4u zJ`GJ4H~h&rK}Xm!h{=aHMIAOJEN=;~?Sw)!Fm+=*|J0e{`Qy67d(FL1Lbp69(i+zx zN_I@KztBV)Jlaijmyd5a$N6}%5|L!y#tBk!rR4m48L>Ud{Hkbj-Ky$zepLnCMxrOio8RbyZ$rw#0gEc55^CRR2=&$PCE+{zKsEWt<9M+J~ zV@+Kq&weDdGRGn(Z~3ov@=@AZZrO!vS#qInSqK!kR5A|aipWs&j!d);Y|zhe%LL%o zrwcRR!pN9laemR8G7+;h>$F++O zren*<8M5{+$OapnD&s@Lg_$_G@M#U$1BZgJ4{sTBUohf?;$M6cfNLE~J;eKIYsuTj zwP~qoAxWNzY^$h~*pGeEx!zL9-8E+aNbzAzM1V4jAw&p*J1wTOQQ$Mom>2`Evv4^& zTRLs|u_iIE0+H{soRrF7IeB!v81Z2LWs3D3$lmESve+Q-9g_90_Uf(pu_W94w8R0k zf3ZdAM6&qU47KVEYTg0zHKoWoxL=DAuP&byk6lNG0-G6YBqvf8gnPA;5t(U``I&8M zagQZCm((m~SDiL2&t?Pm3~gC-${)2FkiWd$*PG!5JJQZ>?Y@3yVWFHPjs0Cr2KI(t z9Lj#wB8R_l4cc_h-cB+98e^otX{|uVgykR9%#PMWMY#K<9x%F|T!=iF%v&%gY%uI! zB>ToKFI6!^3GauR6c~vrgFyh9&{eENzIzzw_TF^9Z8wL14W;wl{np?%R>op!J}-@1 zPWe-;%1>Fb_L`2fS*H+y-%6uv_0K+MoYsgO<{rvU})0urEV z)MC`QVsXMMZsom}<5jM6Iwbv0Ur|oR5c2mu9{!rnfg*_j-sLwq(g4I#R?e@Ht3a8P zsw?)#daIerf$(hv+=7=3ryX*l?d{cZ^YBvsO4Mi*rCGtqWA9RJxMLC#?RL4V~H8ZYZ(3MYm`Q3;P7N|PGPM>edE`% z;(F7f^QjC6ljy`6+m`#Fau?Tlnk&aGmX?R4WQYe`&oh1t%ZKv*ZNR3yd%O7OL@a(jcCa(-vl z&wT(9zSrZ@60gZJG$H>O;}u`z%pFeEXREuK&44@gmUDBfmaq6#>yr&DYq~yWJ72Dm zvmbOvHRM1nL6+PgZfuRA6Xx2>UT%>YEIB6%$%A~gl zei(425XiP`C7fqdFp~Eql}nYBvybU;zWO()cT3XQPg1vzVX?aA1Tv)wUe7(~BiA3H zx7r^sZmq5Hn{M{ktMjeDavse5gjtR%)~wMRr88by1ul~Iq2tZ6!CI4B-Eq>A5UD=M1^Y?Ey=QQNo4JbgHN&K@AtTN5-DIU1f{YU3vpP2=oFjbSc(-|- zNfNMr{=NS&|32i)`*MikX^^mFIpvsY!i|Ph9?c z%Y@<%fq`U0M@_iz9t0jCYq9W%{QfI5>EP$Y<=xK$OFEbC@ z($1yLWGc~aZ2Y*5R>p~tPrQSbzp4(=chwBsa`p`kyL;cTJ3qwgh~Qz$=0 zN$;VLgD+YGuLrexX}~>?b1o9>*!Q3!rG*cY1MrfmI1#DTkG^^cO39s+QuRfK$u1Z# zBU|zQ;@+Rt1Oc>S*U49{X9h86rw>`AiHPm$V0JII*g6ECY9{g6fO`#;t1_%PEHCta z&c4*y{a1x6`{x|sxK0MSj-Mma0pm2v2~Gr2^1~*LK=`b0Yygi0_L+d;@C*|cju88r z9tqi$*zK1X<~!{~DdciR$l~29bckzti&!YHWQD)1C%KN5Om=}*Pv4In4Cn5e>H%pv zGwA@(E&;q_3GliJ_V#2Jzy)7;`ZJ4l#sCK|r&V1+4XSkE5Fx?&T&69M1Jn9f`7r&kKnD_Xtoo<&Vs zPQq40XQZ=SzHe@`ufbYt+$pQUN`yL{Oh24;h`NW(p~5rTqtw3<#0>GRkpg%l<`Dm; zhVv=c?dsBKg5k6yT41;g(nloEo7GV4us$4-rEVG8b|fCTii0u_(2EvYYEjsd&5Hb~ z(uz#{fmx3W7ng-?PIX}w-r_EL$X}4jsqiaFydU}HSoK#TZ!cZ1uG=im26uLT1!sWy z_*=a`UG_x}L!?S#h%EQa75{DQyl|tzPRnHMK&Ec^oiOBH(m95iD-qF-!p<7Aq%ZA} zuzeYhD^m0Y-;e;Wgtu_{3y>Z2tD&ibX%2zP{mGCAVSw?#c>KbCJBX!VtDm;F_C-7Q zW~5^!3CjA@g#;&<+GG!Tpkf2B7P}B`@2HTXO>V)?Q*0#uuHzRp+;O5FtsqY`R4C3M z-ISCgtsPsnwQ#_|VG7Sev}d;8wRHn8r1N)dz1w`>vsD$OMGFvbJ?W|EAUExi$6LTB ztVPZ_kV+yVVxQB`de7O~tMAS@AM~cW4!D{FGZ6u`)6;Z*X-_(T8S^8%Nqm6Ak<1!{ zMP@;voII(UMN;+QZw{=8_H;)BU8h;GKXhV_+-%iqyDg$@zgOaG1HCLWUk5tNxvYnD z8|m6d?cD2;j?TY|pgsRH1d*HwY#xOm3q*Mcy2BvINTFk_Xw-(5Sp9Am``W7+M|XEy zm?+X^@w#HXUd7GCV?vH|Ff$GFtB2tou6v|i9fPM_2IgTFRuDymxhKXsaw=j5ClSgS z2C}$n@}?XdF~qd%W^lSS?a~dIySqN^&$}nya^5|@U4qaP$iiu7_1(-w3Bx}a^5$N> z)R@(GYv$35fJ9^NiF8~+PrB6;!vGc(W}WLzMnT8bYCtc|#BX%?ELkg8U096D{X65o z8d78riOL#~sI%{4!L^YoT5UP@p^1P(M-4B+Gh)7R))yswi(?6=OgO!vgPiHHSQy2^ zGZ0t6c6i41U{L=|Cf4>7oVWS6Q%@%qO_XfUc>4d7DvLj z;sMvu+#KN-N!PNqICD^``hCD{0tfQhqI;&`SK{n$;?$-|3q$UkcpTQ?D+a;caJ|rRTMy4N!2`VW`+1l*_H*>P&^FfdJ8k>sJB<&w>&6%_D)9i z6ry!(N?fwk-@$7popL&i$hg!lHMASRo;Q(3QW5Kt^{d*mMDSi;pKzW_JJ+`=;cCO> zEi07FlC%8 zNr3a5?HuGhO7Ea=BcEo7{)ZYU^&gaiKvnyG%fT7{^n+`?BX| z$i3H~6uq%Oh1bsIT~N48HVlMYS-J^F@NCA*K+^rKbc>xO|H{siyD{!w{zI%~enAWz zUP08J+rt;m@iEZ&3)}Zb>Zt8n(i0B%B zM{-)m}I6U8XPHc_bA#C-G}&tu!Ut%e7%%Pwpg{}R=8Vdj=``H~G8h0ic2 zhOCBFbzN}9{_KmRCt?sM``W&6-*vC_nQrIOg-N;g6}4VUk^>oF+PX z5j^hnw3gw>ejmi~F~ zKncrR)kofG=DKZLV>^fgSj@HJtcgt_{>puSC*SHp@97{;7X0Jb3}qLl-+}>KNgS!i zX>61HLW9U&=|Yy%%=U+G6?Uw`ImDm7V&#s6$V5gy@dJ1c2^-xX?lmcfSRosEy}4xl zhKSx<3P7zQ^3-!OVT?@_l{=HB4voGWS}0cD^HI6QyeAcNnEG2YvHc`H+a^W``7B{p z?K1JB|9G<40BJS!4EUp9pE2p@_+@!q#`>7=q!%}~jN;;15>#Lw~?fqwo2xe+XZ`pJ0KAzrBb2sQVP5ftRVD z-BaKzq*&btOf^kWh?7S(2L__q4~fARD)+E^K+O%)oLjM6gYRuxaKg-pLdE^)V6xI5 zQt$a5;ycZDkbd18!%44+6~V5-uN!#^>2swO94k3j8_ZQXS0YLdR9mkHsb(ICW}Vbr zu6oRs_IPwVsdM;h$LqjXo0Ix5U#(86hOZVURn1qklZx`i@-U09IQvogs&`T|`KrT1 z!WT~8E}Zl=&&u5Jq;_T(DyMx;xLg!+k3^3Y56mwwQ=(8(QdF+d20kI~6oa_uKo(d! zPKe8$WsIb^dl3%^fdp4bC6=u5| zJ)Gq|W6`XfQF<2QnfjY9G|uGtsXTZ8bf%ikrPY~2U#hm_M=6C*Rjw^H`(E=d5iW^_ zGgc&gc`xOppM}fLB|*7q_$E``7WYqQsQ-(F+tqc{9Vo#T(H*e|T=TE`!K#DA0- ze>9IrehZ%(f4a;(rP;Xq-tng^N>3y13FA-K^HeLL&)IS$?oqb25%=^XV+V%9&#b%^ zniziM(r#nHYLA*!IZuY~opNAzb=kTyXNp4Z)+G=Q$sfupZsl%>2JpA6 zlM#r#7KIbv_ior=pLWp>S%n*`!jZD@jA`MBjoydC7D-?kZlavCm(G|CW5voVK49D! zP8*9G94Tz6AQgq1Y6&ii4#_jRZDwSg~Qy4$<62qKd~qS4aI%USQ6>dhQ*BtyC1g`ex0 zQX&F*D*;dLH+aUi)_(njxEFA5(X+wE7D*d?xBB00{&&0o-R*tndcc5xDL(06ih25% zVyFJ)hVe@=UjM1Mv46Q~{IY-ia%lW=w|@!JYP3cWj0V3a5<~ZFdq5AjD-Bc(ylJ7#PoigqndIa`wr-&K50C`s=)JVp1s-lC8yf0IoU(cjo==6hWc+J1Fc< zZ>vyAr)euzKK*{pC8vksytiLc3*_!dX%wR>)YytkVUl;pi%Qo}?6h!qyN zf45ZiE)KbGz8e9E*klPni|OWSx;a4rde*DS2taG8X}z2ZxT~RvC=^i#Mbw9D@ca95 zy;p46B;&*AQ)4PE^g&30g0Lo%hgmi}(l~78?o-bp0j=WBcZChh`GJRx6Se6FPr2~X zUaZ`vTe&0J|Jx!kS^^^Q&jfr)l-s0Q6n;7`6|#4`^0fIOi4ox5OeBRvfaNe-3EE(BNl`!<+r9wD}H^V~wg=5hg14_5jQ>#k|~R zbzuJl@KvqqtmGe}!SR)R{fpW* zQ245(0Ld!d+AY_^jUhX-jiZYel?cIIQyW8vC&G7Dqy}REq)|7O;yN`Ik?f34^ z^X?9LcZa=_0TGN-@l#k8?}fZOzdda#sH_zpydDml+DrEy7Mr?D_Z}W^6J?bi37HVGM6S{!p-!|t z;gLZggjokIlJ{Qphd_+VDed1>$0qiEUdV7Z*aJpu_Rh8lN04&k5pqIV(W?^SHG9G{ zi1i}|0#27CvZFaX19dlXU~_rbyAJdoRw98ZWV6dX|AzUM_&Q@yO7(8fvgZP^TIt~T zuXs0aOu_MzBMdE|@;l1B@_FCZPW+wZMT(VmU8}<3zW?A}NklZsIGt68O-l+SXvz4= z^^?R#-NbdX(Z8uTj=h1A3MKZD%Dpa)70KC|76)>7`?Crm9s-H zT0XGxl!K=B79avr2}&UI0Tq1sPu))fB6iIw3aj(blP-969&e@Vq*osLSm=)0pmfy! z2Bt>8s>WlH%NYUoE&Fyd};k|^=W29@w%UA zTIW|=@%q)RtWAZKWl)Q|B$_TP_HwHz0#*q*HD_6C$u~!C;Y>W^3@TjtVfPSq5ui4* zTy^>BDJMv27T~;XuBiv{onY&AzG6n+i;~p&pE|P&t3Bc`=~m+Fy3YvTn`%p}OPnb) zZz8{noKyc>GcS`@CO4pVmc}vS&YEK98#4J-q13vo%~3c9_nx-B=hr=nb@mwvQc&CZ z^NFEPVAoHzn^^Yg)$Ws)tayL43gyoj#W4thJQuG&7oiEnU$#k$D9)dqZtHJYYZf^_ zIv`QlW+yS;@sQl>m^o`R4reywA1##s4Kt1GYQ#@zd&X{yGk46@pb{LBGyG}ogi~&f zcr*Bl9qD`z*^S`8GY?s{+Pj2pX91S`ZSJ-=)yyJn!{yahp5QeAp}n2%@1HmHfvHi` z5aUu0U&gqEy-|^uW8&2><&4I(ynXQz#agHAVKC)d}Y$XCOBmoEr@HHloYuX8PRe;a(CN1n_Tn%{(vw!%#kFJGZ7P z5bo0nWAv{|1i9kC0FH+f^j^siw2j3dJbhwF)GEMC9Z)^&2p!a&J3}lZ_^}d`X zaC`e`F!2LS8$O5_sySaC=t?Y!OT#Dc7SN<@GOkE7?^I#~Pj$n2Cd3Lz37P_A;UE4P zc%8>eJrU%)KH@|$p-i?`>g;!#RI%|5u;$(wQT1t z;-QA@d>gt<>&@fTNphpTzkHi~0nOs^t)ka9Z?l5?u9LkZgyO>8y%){mT;T^%uoQtt9Z$n-RDMBW`VJc6*CAxEhK7#Xm1imTo1?foXJ%># z7O`*U5C}0z*>)Tj(Lb);w5O&e;+E$A=(Fn2V&jzhsGL$W4Ma_36M90-czGp3FN@O7 z;u;*trH2>lAui@79$JwJKYUt0u*B(*p6yDq+f*YH4(3&(QaDezZ;&wlK+~CJI;(*l zU?>N*O3I??OvaD6$qRyVNH7RaoZX{SihMN<;0|pDz{$cn!R!x$IA^(E;p=lHC{Ox8U1#ZCjY4V6m;YUymAU8^2@bN zE+XlnR zUheM6<@!~w-YYjeUhdH3auL=WH{zA6fjoCzo5Va=X298^_BHO)fXAa(8>>$l6_Mx5~d9YqyRE zZjV>4dAyvlJ_74lxgM2k@ycx&FSp6R94oh5lo;?*AzI zn_RA4<%V5@m-XZ2?w(w(U*$G=<%Y-09hzJ&avbH>d*$l5_Q%8WFUR0(Rk=2=T+ev9 zJ13XhsB&?yoCJ2iKIzX#^;ZhATjiCLvg?;K%0Jg*?KZ32{@1xUPj9#-f@`;M>+^6K@0OVVBY!SgI`hbY_DjoE7%gYP zD(M@U-#?FuI?U8vG;&l@o|yZ8XJRr&I}n2-wY6yo&i@6jR%H_W%y=8C{kg zm>pLV<7g^e$zt``#sXu|!#FTe&m*yg@rNu0=bEa3%S2;6Z!79C!~x}#i5Ba`>`y*WFgp2q5z4!P zc-qYe<_p5gaZkL^BD*Zw*3sn76Jm}4oc>#rjTZijW_~lCx-*Cf`oGLam`y$f&Od!_ zHbZ;B-+qj_)9HvKUy zBA8Q^_O(t_kI6vW^JE~s$%ei)NE&vxo6tPk9TH1DS;2db3S_ZaH|J>2X7VJN@MFpE z1nVMr*Ns!urOjLwq%U~{Hx-$8-=Lo*=g4P}C~B3Om$I>+wJ3tF=i~Qtm}cY1Z`TD9 zJD@gmPP6n&K~KS#SvC$fixfBvUYv6nWX0i!m7rrGS7#&y5`Qx3t>0kYm{jODh^oJ^ zV9Af=pBu|$Op@;)u7l*)m?X5rloELeM(U=Z@n5083YJb+_@0A2ThBql#d%(Xj`bS6 znErHe9p4=k`VklpcZTHeyA>ccSsOa$3XH@3{Zuq0FaB3q`%h8_2M+(TQ6aBh+n1LwjzP5;C0*jiq#wy&!7 zstI4>=AZ&amsCZ0#tWU@jR+nryv%>Ym~DiCgBCfyF+1`eb%2?dC$`MGl%Sfs;G7-J zjhOP-ap)Ut<=x8C8PjKMCviDH8Pfz?1F)|O%w*34`_&$$v;%1N$YwIgrt6mq{q$eP z-6zz@BXCj9HcxgxBD;$a5Nb>hHJJ#tI}A@zKV!1fPB@cYkG$LoruLPYqm{`S* zvGFywE>8cCw$J-DAFSx`3>d~7m3g(1>ZT)9T0;pl$EgKoFnL*-^`5x99-rmW!x{(+ z+B~EI6G&jL_DZp_ zmgkEzSjA}}3jFxWPA1-?s~eesgCh?yR1oYXbvqO{WOTI!V})G!NYar5Z7Jb9Y&&{S zITuzkLd|MavqrCKf1%ZiysE8X3vNR}-Ilk!kjsfft5hf=O^QmL|9=%fpC4@pnT|tx zP`}>PtQ&v0VoZs+&NQdEFA(1%N~{n~OwfF0qni{5G`LPh z+o##)8pfmgHO-g19bJ*YADxPpfyQ^Ev&1EGug=Zv;N&!SH7?XGn@(VJx!>`BD*@87ZoSMwk>Vu8l3v5{9+)p4MaR^Yc~Xy`^vF4d#m1>3B*%Nq8v zueIJTS-8M>yKFb!E>BE_D6!PcipRws3S@PNFv-<+uj_^UEprQVT@=@UW@f#VL-6#;#kC}^ldbl zYpZ$Yv3Av%Q~J%HaBA}WIz{Hvvc@gpaNVszs>nu|8LVF@h|DdM_?wcvWoq)4X^dd* zc4y=*6*H@F`fA5f801f9`uzu68MK@cFuRIT6BX~4^LDJ-$s&NScHUbxpL*vs&q+_C zdAfHlhA4_TK2JAJ`9+0LoGtfdw#6g=^i77Bcq4BRpLaMF=&8oS~0u!i@u zSjXvH=XVy_-+b?#T5ajZF%*Rn>pIl+?X89z-yuQ{Vzbd*&>?0zJ9Ww$wlTHcI*FCz z?~8V++hqxJU#mOVF007mhaz(z4T1j={01)#k{@k7o>mKS*bPQG(`Wkd7zYxx-WDvP zb%_bQP9&8h%1_#%h1yOwOF~^8fA*x#(kD#ma#$M*u)e($teuClOKZvseqn-md@dp~HcYE``#7|d5D3|DL zsv^ec%T+33EHH6V1y@r+`sOEgCUVL;4ltXPZ652SzAr-7*?`#8)w*!+q<)|^F5I7f z0loWOZ*16>zNbM`pT1l83Vm_g>N@~XLEG!r=_!v2eLLx;)`!b*06}^`Fp~ANel5XP z`RwhpRBtGq^$`&+8%hUYhYXVZ8NY9k3*;z?HYGDyAoGz z!yCQm2_@wouNa)(W>-~@*nw@ICCRhxlH^5*`X~lxvnB}_?5n*O%#>D*JG`IUwGFcd zUE&5(H4yjgk1TLm$E!Yun-y*e_L=i@ohA~KagFw^p&9~FT9M4*lCj?PK3 z)OS$<5u++y;dvAjnSh<`7tlxTo1my>d0}+5IWL9oa(5Su+4(&J6IBytkN)v`zDpOd+;vpNr|i_vsFt^p zMm4&X<=1s_cPu1gEg-FXG6zoyhKi>F_{NKnAD|M&In-mCh~rj&ktXR&s( z=lTj~VSRsGkot6`-s}2mPfoA+cT*&(y^~AtHQOu9lGrwmOl+KQmvObV&;5scyEJ%B zZv`0aL>XcSKzhB?gl=m7dR_Eg1oRw zkn1u(-#vo~y;3Od?@3+;>yD9s<@5N%W}ZK4x4ee-N@>b;#_l<6J0V|%OHJSt^>Bht zIp|C`uzKGTv@8D4W^ToomvApL+4rf*@O+du=38iO{f%-n8i!jG1kIZz8B)$$ZiE>w z1+Sa^*XuG%cc;r$1!cHJNSerh5?%)Ry!-c+xBQ8e_arlupit_OgEwa$srExuf4+NW z-NW-0p2Ku3NZ4QPAzn0s_u3mfIT*O(d}#{wSp@ZiRg0*C4G8?;?&yA9fNpXDx^qE? zM6w(!rlWLEXFlpcKhLgk6XM;*`mKRGqA0YmT64Y4VZ6x!3&{*acBL6-au{Pn4i|*f z-hMLO&>)JO{9eTUL|x%78qY}8s+ANPy>9MsvlQAki$J5s;=v++AR))ghDQun9}gEf zZjTIKtPzDwhDSBdzk!)_Xt~*~x-N6+(Yrw}E`TO%x8bwr4M+DtdV3x$-^2i4Ug4z9 zhplnfJ)8=`UzakXQhATRdbaVp%uTzaJ~ofq4;;R)SKi;!F;G*~N0uD1ThZV9TE3#T z=)@FSU?`S3auF%Bs~YPcQBwu(_)pmrIcMrr+fKjQy%@j9W9t$6eB9Snt%NlO7f7Ug z4P)ix(ds9WNB9Lv1vk5{ucR#{7QSz2-Xr+=DJH9)a;KI+y8>T&r0UrNytEg%fvepe z?W@NtM~k+gk$TI4nYF4=FFDY=oi^+)l5Q6MBS zrp#Qz-89}E{Ap&^&oNVF)A9N*rhnhI%>YxgvkJp@7aJrtYaxli3Q0HwRIq}AsT@`MeuN|G3#ws&W!wyC+Urw*0tHC4xJWhoXIf`vND*-bE zjgO``#;U3_Khy|fx}&DLefCFu@gzD!uhVKKw=I=wzh1e`%jb5K1{MusKVS!|O;Tq1 zWv?XX7bNj2b9eg?l}B}&h>%1Bh>Uz(#_PbmDP>v$Y4kr|*jFL?6=3uWQJyPA8+>QR zQlX!BXdjj6I5tNzQwgsq@AOsryYsNOXh?06EY^?)(R+}1xCXwjAhAj`x364I%IA_J ztMP|6H2ULVeN^I$V|rBrg-NGJ5@X4|ccrnOn8XK|{BVy{<_C!!=Jig~J9{eR)|&L+8VSRbVQWv?XC7z_nw^_9VlJoIFl-s^Ps z0N~Y`gH1I`9@V0olX^s!0;jmEjd1u@F9Iv!tIsBgMGNK2Dv;B3#Ug~4Wmcq_vZ8N>fNglX*D&7%%=il+Rt(y$91_peq&gxSFt{>>{)g4DD(3; zxUW1`^h%&SDsp*@?JJLe?UjT)f_@&4n2Pt3qf<-*GbZQSV+Bz`Ev?=>s1NQsp;wZe z%qi)M7qds?*teI=d4Q2*LaO9RD|1@fdkUO^eU(W z0ukwp%V=M@HRhAf=_xMXVR#^%YIaq`SJWqco=5gYUxMY6!*g`#h3r0%F*}RYKVbBV zJATIwMk!^4SzBTvvPju+w&(HR+ZX;K$^EnAh?+womI-%Hq!jT2PcHV63qd z?;#3N<{N7nF-!2?NS=wo3VpZd7C8YaYy4p2_hduznxl{@<0C;Nh)GNqwO>Svt~J!n z2uIyp9QKO%tCcyIeY0LE<9ble%q;i7;r6I?uY!k zzn>)?!hKZlVs3pl^y9a*dbZU`*=6W#DE_tp?2}V-cJIBody2j?`tQbaXiWEKf=1I1 zLWhl2;F2b@bAlUqWh@+lvB)gEaYMkeu_at<8{7e0s$rRe{i-bFhpxb{$?R9Hdu3;j zC?@hsG;=m@X=%wE9;NF7(V2GJ{XHc2a*XeDWVu?z=V7)=Bk-gUp_R(+lGlL{lYox%~rcbNdI@ z;P-2BpkO;>JTM zk6*Y4ey_r<&Ad00LD4&gqR>YldynU%&$&U>;!0K-;6}U3VD{Gb$qrgC!rQDzJP@s6 z`@8@T=l1=t_ScP0{l!M&V(f^HG#xB37b60C@_&jgqVDh2>9V1!H^deh^|)3(XG2?f zA-2$yQxosq<04F36z?b=Wa-DWOaWW0do#}#PYVwm0b5*I$QIfLpN1{8K#|$8nI06e z+u&KR2+f2d1zfSo;xk-v3-}gB+()g*ATZK7VTU3R!OL#JH%80@e}`0g-x+zsq?xDE z3_arAjBCOIOJBjn&ap`2=-)49o2C$gskkehKhcNSV4JJGwjz(?N&E?I%zg^((C5?T)wN}tvq;>m%cpMW5e&(?3=gdoBer%Wo#u!mLnhv<6OO? zcw*$Xh+H#=RVr({C(9jT;#_Uq#w&lZgBs7$o~?xdOFU0Q$6(NAa*e#^GQ}M`n9C$I z4=znsWNXbP++f2`(N0wVX*SHjlizx=Cr`c!6Bh8~ zn60L=hA02=8lS{6`*Y|)fPI@fSzFEW+RHQxa%MqQJ2~pW)(3yX6$_r+w-k$Zz24ux z0%Uz}Ff?KEi3va=ujA((SNvcfg`Ix083acCZY@M^Ga)Tt2RW@Z+Bg@E`v=?MN4%k1ftBdhf3O7FG90fUQDeM*yZX`4J#ZSAT``ElpS7_l2Hhb>HYg z)^{yFL)L}hL(fB2uFG5rgZnbF8b1q~vv=qVqnGp|6Ki$3m?%BSWDeFKIKc^R&x%ry zizrZPxuMh}R=E7mQfkmE9;JNI*v<_96gJ(_Jl;LWI9ZK$>TfJ-{<^=X zZ69f?C#D)c={ynpYBk%+kb~xIj7`~vk^p!Q&|z*YL)9C+bXN71D>_TfHdax83L{hx z>N8{+9Xq!t+oS)Ns4Aujq^jsNK^Lc%Q%jx^`F(Bhaq@3;pRnXNFSDT^ysURd7i{Ho zHq@|1u8Wn9_X6!4-^{%{t{R^0L0@e#8v~g38<_OpY zcolg9_J72&bE%k5^H$;b5~2+=M1y00yRIk4)>wJqbL;|(&k(H^d<(6E3;nof$M)GZ z*Rj1>-`>*;;d<(^tD#qpGwm?sX>Ok0`QVF%RJt0P{jY|h$yCKB)5St!HO}35D>Ot6 zh(w*cwx>ibK^hcv?5iw3L#9FC!`;Mg2_7!-IaXd)i*IW_^J^XR#~t2s8XIA2;Q%np zZ%UyYHlulU9^rwN?ph64!%{|ZKGHaLMOFB zm|gokpO}c)P_x<4(d--&q3%%)_B3zt2j_x$%Rw-s&qe!#$DjHa%x4?%RZNA>lz;OU^O@LwS)X|!Trmql^FVSNc?Ca5J%u_vSv>e_kpiz~@ zXK1vVA~HA`J$AwVg&xC|d(dR9zzt74-{?}-VLf$S)jfo`nFXh@{<2jaD{wq3_YVrCg`0-;95B8v$IN_X4GsCC!pw@bXA7Bt!jDCJVcj9VF~aKn?QpTrTYm$MPD-I;53bVG_^2I(fF{IFI~{Oc zF%8I>R+#}q$t)GA7LYWj(#NH;C5bKMP3LjoUZncX5-u|#p4Pyfro>Zqqe<};-IQ^I zwX?BGw#+6b#tswXD(=HvWdhX%*&x6WVgok2kyhU4=5LLzNDwMU^5C%xb#RExMAEY3 zfwf=rVDly$S|&-57ZOL?N}CRl#`$ESJ9$1^m+<->5Eb97%XDzMqq{lLKI_=HJ4B^v zNH#Q5vBe_G5Gdlo{q$*LcRts9yyUOAR*ZOyQ+gZgz7vcG8oz*JJyy*Aqyol>C6Dbq zQcmp+PrwsJfb7~G@hFt?rO-qzXsp4Gdp8qTaW<^onmG@R*akJec> z{_bX;9S7Zg#v3C24;))`Kf_(@=HmgY0Tj^}=C?l^ONZl}4c=I#x1H=Mgo+_859 z2PnEED^LAmsTu4f8^{!{PNzRlD2uV{Ec+vE-k1CnTe#y+fzv)=gyL)Resj=g-_zLzwPwyUmz%%ElvD0&zsVb zKc9<)Ohob4T|s1ihSy<)BUk1xAT6C4V;trf!pk@@ zPf;T`q)k7r(f!?j=03}Qm~fqTA{_{+KM_$Oe)dPz z97Y!yO>n4gE?rLj2Z6bm>LJ;6XadNe(;XX{^$6Co&FVB`&!B<@YBmTtZl*J27N{}Y zWJCW7vGl4xvYKFCVOe3l0yS}E zw^dxYI0RK|sq{Z~-m$WZh9ahCTM4yS%u4&Q=ewRD9(Cd8{~di-(&z?9uk~+V7q8@O zrl5&h$AZ86QZ#jaKXcA~I!ATyB$9d#yBPhUQ~y2-Xdjz{NbVQ4A@msP&QbRt^T_iW z9dm674#GjiC*(wD&I8`46GJ3k%|Rp_6mm2r^WSg~2?C(-ta}msY!=-NQgnuLv#ti2 zX$SHEwRKQnml|AUD4o2etn(mq2FZFlW$qRGVs0v97omvVL`!%LKyx_blO><1C5&w- zH#zetbj}@%gbHek>%@VLqAIq2Gf{V!>ckXQ>pa9(^(-X$tA{Z00?9 zUkv}1jQQ|tk2e30jQKz0CpDy{+L!XvuS6~QsR2s`>#?{QzENxxW&dECgFR;QgQBrR zrJD)|I}gZ)UVvf!N4P1<+|nvg1aln27M_24UxwMl=db00ZMA(m!NGqf>X*^AM>l)v7~QR*w@9epCtUv)B# zxND&rR@wIE!+olScBfjVvpgI6k5!^UU(${qVx%3^D$U&W8-eWwV_hnI|92A4@K=n( zmN^GC>h2R|$GA@0+7p$1x5t%ZXpYv1a|_HP1Oq{UfiVyB`mIE{Fa3 zNpkYF&+)5!@7q(>>?&#VVh`V64xGjlx9pp^AJgPvzV`|y3iY2WjG zVQczupsXsSk=PE-Z7XVX%MKhjQWcCbVIfQbvk;77h7Qy>#ZiIGuQN~m980JaI~yVA?soXfZ82I**5GxyOT z9y6e^!VEtHs&|9n12{=kYq*Rv2RAmOKKViZW#AC+J(Vdwk0N=;6yOPJ)iPKPtkA%Z z&+cTmGX293)XT+-+HQ3%{f4#DQ7PTOt)nb6B%s20M-V$B$2cv+oFu;C#IYcZO?r$2 z-RW%;j3Rw0iu4#d1>bd1nAHv^JBA+Oe;58f`%$x4?*qrZxKa%>&T&6?I{s0jcA-6- z)NNhH?S!Kx@n#hT09(^M)UPEOX-cCmg0^-ElTb~)T2|2iaGT5M9O#9Pdm+jy;fcOH z;gKexnHHdwIfhLM87+5AIy$;V!69a0;@uKe}NJDFVB17lNo?i%-$7c<3%w3>{#M19cggjssWr~RWftDFtk1GaWK#h+L`l zOb(erhl4SZz3q99S)`O7kU^9tx9Rn6_HYzEXdoQ%8Ug}(sBGN#J*!hv?&L zGu6M8Ioyz7=zz{R-qPYnl8qh~ZOa4|YKhF1RQSsH5G};lVDObwIMNI)=JbQkm2_v)5IFIDf1Ehr}O43sjC7}SHm~rKc^<`Y}f_=bvwl$i2qPgXt1c^KjOQ?e}>=I z8UBm9yA-s^@T2%IO7nvs94eAJLjnIe{b`;0oq@S$af9$hZG0_O|8ZdMNjx_^Sw<;i zHIQNiv_?`BO{^?d@011FUW7k6tgoAL$BSv7wHfypph})*!u%L+yf5ay8moCd+VMH0 zelEJ(i$qkt%t{IHbpx=E5;#l#2P0#AfCD;8{P1EJh6pX6nkHzX#LZ( z*Yl$qEweS+ zvip2o;3jm(>c^MOI8^Z?lbAs|9QRy&)VCqnnQn|6TQ)rtO)^w1FLUZ&4J2OH=u3xb zQinllk|LtfQLZJu%m*eu8dFA`$63bnqfUhjdyk|y#lrK`liu@0Scz?xS##ZnE>5(F z7c&cni0XR~+jJXGY}0K#v2DAJ*G+IdV5#4TZ8<7sE-}yVDFUPs+q-oeRuvK3bd$y0 z5R(MU_mx$%V(!*x{Yop#pHSD)_%w~>q2X0=_X}cGway_<#iJY}Zw^r+1YF_|Ky6zm zP|5FKMct3B{6=>8^80w!vvze$!cIe4%TPUy&ToXb%g*<;nb@I*IJa%G!a9`W&1h;) zcf1C{fxNB}Um6@10bOMZlA(Tx4LB|x+pl#`hIdFSgqB21MFmmawsNb6wBFu9~>F!jbE8KLxf=WmUSjuKO{ zZB<#cZ3X%Pq>tCX9Z1YUmYd_WWwK=!d?|m|I=<@}hVtF{(;VpTxuCVZ3p-<_%j*?z zCXPHFXD(3Ni8Dt)cGPVL8i&o&+KZVHjNjf_z0d|-UbYt_&pmxYj7b__op=^?!WZXe zyhFRCSAE=hpb_uSX1>L9+}))GntRAs!$To|HVfRS6~`gHjT&*dPg_#IR_Z zg*TO!F4)xYxRvqm>nR*C`>|!=bEr>DD|V)RYR|DOMfx_~{cSEDAXAk1fUtw#{R8@ zToBPa^m_KmrW<&ua(L3zBIb7nu~>4T!JN55IBjn@)Aj~DaHO^%+;rBm{N{{~9gMR! zM;#%KTY}bEYh0@hVW3jK-3YUC#$=83fPH~{h(L~kQEDy9x4j2Zo0`VEC8zHNp6PU% zzgf>74nAH;b+tW-*0B!aI!r$7BEjUN&hRX_3GSYhorHU!vcH3njGl z8}w`TFMWxAGXU}%RYjU17HsJ^`!f?YwKlz&5S_jC8}w_V-@rjqNNdy~tbSdf-@qNx zujwGq8vQzs&MRQ)H@oTATEd9d@8WE5Zk*H&LC1YHTK|4v?k{<6FbXy!co!phe*6ps z_m^fF}n@)Yr6??sPnMs0vr;=D^8zxDgtd=AYxoYso3z@%-kP)Ix5QV zHl6ulIsChZ!{2vm(8YJ6oqsHiF0td*vtsV9C=;t#an{#&WFGB`qi!1+pT%z$5*ihe zIw*QLJYtX>GVIo$;<|5wt4!Dpc^-0*RS>Qo#+M`afYE)+M)eCuy zfIJ}Tbi7&O6bIdX%;cIHuBg3ixiRNwLzgarwm7V5$~)pcjog(nw~O5HEW4;*=74u4 z-N#lqG(D3VN>5=hkRwKP#*@mDYG4Z2-aOoWp;9OV<;Kj<(4Ihjcf|-lhf#DOs%hz z6Aj*?TI%kxxeC*9S2Q(?PV=Et^I5cGS83eML_7YDJa{u!{Grp4g}+(F%vRreA+dCM zMxt}d)IUl%X{yvo4L>NBDq$plA@e5UmA-zqdQS}}7QL8RuQ#d5T@LexRJ;pA31bIO zYcr4X#Y{q;Wq#Mz-X-W?@lG+kkuUN3zt8wClNaf$?yX(&O?5XY1|Vl6F(Zt*7a@fX zXGVDbFc#fRR902Yh(58Hxdcgdr2YDyz|+@c_!OI zvc!|u^u=+R`#Enjj*i(jn;!4YpQ7%WI&H{(N=2r+TU6c4Ybjdb(J|C^eS^(*jdGYk zwlCdSXQ#^En}SlQs;h52D$aR9K|4F0L z0QIkb>ffF|pF*^pC34JlC4{{~Q`j6lh3DL23huuHG54xWdvM#IBxBlE zlGDnT5z3OG!DKlAf082HNMt7<>tI6{IJ$fsyd1Pm@`)E^SW^T=ChY{pjTdd3Yy=mc zW$Mq9)ElQrIh`6y66=S#tGB9}cL4Rq+h*CbX3MBcr^DF?n_tvz*BD!a`soe0pOOuh z(6vr#7&_N;nI+|FfiXO1<8509>h!9X z8sU%CKP_j4eWUKsXmXH~+&{{=5N*rkac$e*2+Cj-18vgOAetPyZI@|IjDb>C5XxbR zP>1-wYTnwo;?|oM&Ceo?>S*lq#!_$OXW<4&+px$?8dfGEX;P_E%z8dyU5sOs8c%wmcNX0RmIzQ>Q-9${XiR9OPK_tnXdiq(Ive!%e-Ou3on?5hH+=ctr-Org+- z*1$a3Bs5Q$8r8(6hdzhL8Dng|1cNC0plz)r*xNc>WJoZvs7EEv-}_2bM=@Vvrfi0g z5y~8xQnkHHRo;XC?*{IeXnHd>P&KAeDMWnztIf9<0^s#xlNqx^v1s5uV?Tf> zBFMHzx;_&pcErEha)Po%$&mLRrtQHThM)rxcp8Tk!Cqnt0k&;JjKDz7u;C#ovoi7c z2Ul(T#8}91L6+Q@Wd;L|94#CS{D5`6L`SbfJE{@QXkYYVW0XL6_+IT##d5L%yTGzo z@n<<9a)~EIP(P)pIi9#d7dw!ti_yg{+)vVD9lF?=2nb`9*pzADWjxi$%(iA%RN`VOFadtw`l?Xrb{qLev|pfn%%& zy_r0$nXpa6`jV0;>7LUJMpku`Ddfl*D?g*bl$RX~Z{Vwskg&EU#N%Cz38AxBov*D4y}$I zl~C1Lh^Dk09AJu0sb$G%rQ9tIs@U8+x%Db9IVCiB6Vvhm(?AxlWS$73i$%0;qUr4` z>Fr;%oM_7e1tlgWV~$A6odk5kO*GQIG*%zl`+(EfrB z+TY|{d$C?fJjkvq^k1z~FFFW-gPJb{~pPq3si1X0& z&I&u8p#?Lt`F0Xi$%^SG>fKi%m$r!>pPv4N(KerR$m3e(Z@rRscx6A=lzoje^4yB) z^%Mo&JC|!;FqKjkhk#64x-@i8>qk9=VCm`iu+lWMC7v1&rHX+Wv`1`b0MW0oMtx;j zg|uf5@e&8VSZG!mHCfPv1&tQN#)s4Yjci*^r2x&hnF0bSN?Ax|6LunDUVx1$1uVl< z<#ofI%eqL|f{}Z9SFHXy)lMbfhdel#V_$B=OjI~DCWmj^f054AUq^Rf>1o&OXP<4_ zPpw%AS}oQCv|4sLc?#`VJ1XVP;zpk}+p%R6h{K4Qp+r+tkskyxF)rdei4BF#)~vX1 zbm}+Fxi+uwPe={hkFg?NyyhIY@##wD#&(sLsj8V7hilJqV^34A-mA4lqKO~6(J9^- zZClnqR{UHPvko?mK(K_F=aCb#gnW)9UOPD?F>d%jVX(cY$%&? zNGv%lpieHR7kTdn#FNY}2bmMCpK@xRF8nqqmL!S3ft|Tg18Bg`yTSYGlgjJ88xTwG zV{&{-%e6J@z~s1Ru*7w+V>81mg>nJH4mw`N-9s1-EIwX=l2=1zSu zfcD^cvcKxUwqO4hh2RxKxDa5dt1w!Zzc^XxE$@90O*t_A1=1B{qu9KnUETOR@A-VefH(ep(wHU%L*qG==#T_Cm(O8NhtYySm0t_IH%tPw`3RN z;A@&o)PFJimm2=2X+!4}9?im5OK@?&7N*6GtGWe zsTl{u>-*BYy_ttf6U!Cj%V!=lPDcTF-)8V?tbXV0#dcsUc|Whkn&psa@_^TRzcNE8 zMa${C2GW`w^Yi6ivt<8lyO{gwo(Ruk+&d!tw+i*E)bQ`J^u?$zy|aXAvb3hg?FCw7 zpL)}bVki5o5>QVBIBIA)B?!zpQipTQcl`W2?SLF4LX^bV`4v}=uJhFo^Y z)60$lkF|tY&2k`LVr1{3eH5xVGc*h{xbDO7NI4-&e+s#OVRj(!vogy$GtTmW$ZUTG z%AVL+*sP!tlrPePX4oV;wr z=~kqxgt;}ip6%snT3!)r}D(z)us=jn%Q{)y=M%a zxnSAT^uLgK>FF|HTh9it*-Jeghtp6ggJv%bIu)i!zNV9x?*;UUxrg8rh2yYM9Em8p zA)-|oEmE}LxpzLxG03hCUrp3I>j)SyN`HWLRdn5$PQN(N^Mq_JrG{E`IIosu6NZr- zMNKKkXgOhK*evD_VF@8HlnL=cyC;je^dPI3wxIAiwbA+wGvTIUo10h>LE%?&&-KgO z?;W?l=53nYe@NuPRvlCG=(#faZA1+?iC>0;Fo^4N2rlHesd`#z1aIC_^Z(fAsd(Ws2K?E`qM|Y1GZI&cQB@CMbpmj!gs>_`2?7jPJt^ znx28agSI_V89jIPqlGc6{VZJw*cN3QKbN*s}F1?4J zZ_jTG9DiL^CBHB%zh_i|VS~V(VI&PYRl1*AqTeYM20M36W%?FAKyPPN>vz&{{Z2dM zqrgx`{puG!3S9gge=GQV)}#{rDLkzbejwKQU$_?(e?!kJ;b9{WB&8ZkQdf}VX`3W? zjVTA99X4I?iUX9GF|C@q+r-?HlX44wZf;6U{zQ3)!Fb^?7}&#(GP zs`6w6!cR+{lOXrd^Jte%KEx?XNDU^vq?>LJL#H07@TARNV$+~+@Phn?UUGn!dWXPN z<4Ipo>?>?b1Saj|;hJi{Ff@L?3@Y+2RdKiq0$GOMOl5;N5q*LpnOX#e^i`Y4XNCfD zD3jf*WYb&l|5cT8B=c$mOWDevSFNhS2)0^v@zkLz;0_3bCx#&a^vvY{uEJV8}o} zpX#{fC(LF?J@KUKMzpgzr|xT-tuXV{P91L#hZm`+DI4mCV;4aLN)atnmpiG@P0s?1 zPfum$h&HzRq(XHOA=8ULUF~ga_NN2SAbjL4NS2S?(vN*`!~fMFyO|fq&mlwv_WiIa zL%75%!&D8>DvN%a^fO66QyNm|_S4NQx!|=5I8}NzI4SZ}Oc$p71a=$qLz4lXqUEcp zBM-LEJKuIN2b{Y7-!S(Jxv>aZvU)^+@=q6+Lj&18J6A52{8m>$nHL3=D=*I z86VbXGV>;Cee#CV!~bq;+p1zkra_0hSNlj$Ge}Jx_X6)QgZD%a@2apu7iHlqiF**9 zDbHk%zrYf=<1JIw$dpx)CQ){=>Z<1QD687-1=3|d5Yp$*7lkI88ili=2fnYglTgPi zn(5mZJ=bL#Zr&+IG%>2C1dcbvC02^Uu?|fvQf8^UO`hVlZ0KJ=fmeXMx$TQ9gwqgg z;4Ju%aF#HT$Bc$vkOwQSCD;OhJ5j}0R33-+==1ECF{?qlq(z-SRy8Rb+Br{o#k;bh zLrqOm>s3A?n=Ckwt9x&i>88X5hFx&H&Bq`z`la!gX&v<+SMgOF@YuW4a$4J`{aa3k zmu@!E0@!0Wk~XClfy76q?xEZL50(B0?aLv>eE-8h|3ioU@TC9YeHBuuOdF;`@B1HK zupc-nA{FW#|gVCUe+29r%^>0@kZ zKyF@Qpe{iArpkALpSHusmUPMWWC5hg*O8Psa0VKi zG~9q0&eu-8^{0snF0=&c*aQI)hU2dEgkc1ww)}f78YeJ0GES5RZ|qmZ{xGC{WQhik zT5guYh>N+dC)`&uvKgK;<3!*h#*s)XfAjfU$lpEuEfVW6hA89?chhuNnT{-0-HGAW zUQ}$Zt0{>D1A`;d`V^V-c#4@3veph&+M&hlds-&*$81&+iS9C@oLo}nAAh@FQR@5w z>^DfYwvv{~yOTmUXPz}03(c|{#;idudk zctqMskT$Z_kRTb!_;l?8%T`EMW+)m78iE?)eHB%w8mdCh?lZ`lYti12lX3o=RZ}xR zU1G`Uk|q!={nodrB!S+jQfuYeLDIIg$P>Ikfk8zLO ziPpNMQErnBb2c?J%h>kTXU3 z+tZaa0xLx-Y@L&nxtUy<4V{;y=5C}rmu`)+an+&R^55BXVba0DYOv&FZYI}dLx+$q zvyx0>NHKIZo8USUWC9P_w+yT{`wR8<(?6+zHgwvnab%PJdm@}U^j`5u^?yWK$dzSf6IaNw;+e+ zK<+jmG+!D0{lThsEl%J5Zdm;d{bvg0G5XIE$~jc&?J0tKv^ zC;^85BK>^vZxc5Q#kR1J`rqA3%++7)W#~8S(+`BM$Wx|efd&OuLr+#9uqs%iOjv4SOAGj%fR$sUYm3VJX*w?!` zY@9A z+Xp(I_poAR*N5vdmV=N7f$!pN0Wzg$o_5>0TftC(*GDOA7Og3$hq6<~mD|qkYPkI~ zku`8fSd)U{iPN$jw|{Eiyvy3SxL-Zj+zqj%%Z$I&9lV! z@J*}tGI`%3?!-u32JkI;-xTh|oE)gO ztgV3IVs`b3U1D0d{D$vR9cHf@wxEMQd!5RIifAcf2*7;scANAqoAeZt8jWf+hyU=LD7Pg2vKvDy#X#{T29NJGH$8=<~I)6IyifF)q zw^f=J*DjNcflya(985>nE7Nr3c}a|HsH>GEZs|)D%^0W4e@N5hinDtu>`b_KNs^O^ z^7@L@vE=0S!cA?mp}W8xbIa!#iX|5D)yR~FrKX&2c@cTHBejNNkyC7^h&Xzr z(bfMbi#jgZ|FYF`V4FU=sU^JC%;My+dc3!)3Esvwp>fa*)t|NVoQluSbbQZ@)GxES z(>zaPGOL05rVbBakp=jR4{(YHu=%eZz-<=b9v@(T45?pK(e28cv zj-;31coI;)a^N&)c0-@@Iqy18aQ4QLN3{fvsb{!pc%()r4mG&6ZD>BYt$ujRfX0#E zE@9gNPP$J0OP%jH?xCG{+8;vKSy%%0ql&4n3{sV zCdSV^XlBl(hpGz;r(%2l7$xa?1~=Cdqh(TY8voBEz8Ck?F(&4?3M#$Q|PW z{YsrXaSxB30~?bM2qKoesuGo}QhnbWO40MLR^w-Hw%l?)a)KtWRF-Vi3kuXN?=Ud+ zv-NFarIQR5@!bS^^|+4}s`7gc(Al?GL|+FH3|*IbvCl1Y0cX#xI$fC+>1s*^08$30yb z5=i1n$R$?uz(x8Ym(BJfwUMWcy-1tnsoP$pWb#zT#Z+OxGcb*~uQZOlvZ^jH4@)5~ z$KfKGf=bgEMp)Zg?<7+~oK6TYq^5cAq9n0iJaOATfqS3{{XRQU{EW46p{^??Up#PT8W%ahR%0pt!u?EYQ@9$#V zDNJi|-Y%SA8JsTGt+4c!{NCM?$2fR_yrM39B|dLH!97iR@-z;-8vBAta!~QY@O^sK6St&zFFCq?(T31l?BiBJk}0`z~YtmQfn`D_7b+2 zNxHbDcY_qjYM#;w0otmM=XEl1ieSg% zW?5PB%YF4SG)6fgbPlpYM|O>D=+PTh*JF<38)T7WO(*W*H%xYx-INFAd{wE9x)*6b zoL7c=tDziyGqM%+$-p6j5c(1H$*uO;^bFo$8Y*8#j;5Fs+bQESSD5_a4h_&%wv1Uo z3d*?H!2AXn%J|=Wql}AeW43d7hoJ*G&--NJepB?t{3>HKPVbIcR_O=NGGV0KF5#V9 zx*eoQEd&2d0aVRe3$>ni3m%M5kikwC# z-aRniUbKG1(;_ai9YUi-tGV~+V@%vA&`rnkR-|QeqMs@HnFjq@`I`^Gm4**e2}1?G zY38D_Q3H|zp4FJVsuf~CLM;uku>(Nu%ch9hOIs|pUmau$9Hy`ie{{gms3(OZ9?r~t zJDX+pp)u67vF#nM465<4)?hH}ImtMo{Gf>0p>Nx?i(I-dS1$d%HvPHHrP>Nj>BgaR zY%bNA!^s63S1m}Zdmw#4QZ>d33YR?MUl_!{_KJTsh|`gU`}sw@hcYeVZ)rMmzo7-w zFtOoc?5P(k$7Fm7)2$r5&$b!ZR`qmTpG`LdiqSN(kX45%keUs(ivq`|rkUHw65h7j zx7bfo3+>zWyj^79ZsqL~ZegE=>WQ!imrAnG;6#lig}D5SGG%Z=$yIq7oVe3VEKJ-X zPxLv+Ey#FT!QFSv-7xMd%-vRg_vcTIl*n+yrORG~G@bY-4~806%JW&)FmmOkIMzue zOpzFg4SbN_19)!`R`5WIN_(j`09E`_cu?q>|ALsJdb*`ME*t(JBIU&9$7`mCH52q3< zd*%h^^J+eu*Cw7y`5^Q1Azm&ta*T+OVKn$Ur)I-mEQdbyT@<6t%5V5uWrTuVh5n9| z1rZY{)=2jX(Z3i(vq9vzhwZ!s%DAQP>ylUSk6}HjP`^Ki&)Lv5lpNS)g7x(KCrt9t zFHG{#LS2Xkm-*p|()C^ec$E24dZPKl{%!{ExgE4P6eztyFWtciQKJX*<~S%sJGF|> zY4KwK?A;@CqZ{Bw2HYFfXgRTy?bKK!-O_&rKYi0JU0`nJ(tBw_RUvz^(daju5yOsn z*YS$;hoO!1iyy0NWLYCFoy~ivru4hIXoGtFX3v`HhW@DX=QX6S84Pb-FUC9OZd13= zs-7SN4M>Lt`3>LD;V2U0lNFPaf&Xs?VL#JH*QZ}*1! zm4O;&pk^^*reqwEA?Wy1l;GYf8CHl8R>^@5>}$G(Z9m^;hm<+a(-?;S_!r<)@r9Yi zmm9-S$C1=i&u5IWhN0R&3LrHFB4y@UnEMQjeHHS+k_BLsE!fNgu=ht>Y#R%}zG1=6 zDF91*U^RcrXBx0z)dgUSJg~V1V42HI`TG@s-Qd>H@Hs1-q&M?9~QKkf9IfGd;$Foml|(s0Vg)0obm| zrltoKfc?+|TV4RR$%1tqk+11g4{R_KG{5#sEZCX?um->uqLEEBVDQ2M3-f@1;knWS zc-RNH)&k7+0kUCRyW&Tc5h=!7fN%Q%uK}PemiYk3TY%9%z~df5pA%#E!tDI+>{%-{bO|CYt`04b@&tk&>srfei<^{ebg9F7!dl zEyxZ7a#)(z$cCl5Jzni-iMA$7K>LaNZ61VhrB}0HBA48`KVX}0ld1Z_B z<^^7{He6$hwVy54&pZ&qc9s5t`^+Ifl~MH6HobR4g9YVPY>|)Q2#cZ1f^6_GsP1!o zkl7aG5ess50mvmjNQVVE*?=7O5U;7k{kr7qj=?ck-y{8mK_zrcZxX1fZ?d$7`|#Xs z^U)MJT9Da$=X;gD(q?vo$xI6%#^buJ(3K-^FTTKwteINubQ_!T!ZLES)85`TV+t~9 zGzBqhzpYsOM`YR9d4|j|v8pq&p%GK9=ntcV$@WWUSkfN>_t8lLbj=&e~`rkULn2h?h+=}d7N~$O!-q?Wp$u`EuQWLc+bCy%Q-nr&8 z={C_~A}6!eYywO?ea5;qT@>@WFJcE9h zId1gM?5fg4LqFBd&ZD$YM?JMTH&R^_^_FQEKepmiGsVrjSf?)?p8Lo~uUT>}BAUd1 zlWEdKcTjhl);Y|iG)AWw4iu|r$mHaMd-FC1=*L@&ii%oC?F@?waqRLxf$^1Jd|Ku% z(vaN?l1ip5UzN$Xdt{P)ydB(93KB-K#)8N*PZ?cOr7M4Bp4RhZmxPLn!g@c)(x~1Ku z<7um2x~0FdPZ^$21BUTrl?5T7hZplA8+!IiwZx3GXo*1LP8B^B`src=@aDM!Sa1~& z$J$*`rer4Z1WZq4-;D>`e9a+@<&wnS5(VWHU$dVxjcj?5JB$V)! zO3rDgRAWhGN;Pp2MVe+%+`+FpO_+HroauN4i+_jbB?Iou(S?v(HhM>(|Wq@@7BHX9s51ip$v6i z8BnLoXOJu#N`X$+W)gNm&&-^|j1+iW4+?xLmu7&+8sI4cejWdwGg%Pl|zu2-Ml+bcC^Aar~st{rJW%i!jd;(BRN_#m)x*R5yd-x);Y#R{al+l&89v@C->!2*XL4) zdZa$vOTDUe3{285o35wOfjhpVr3cqAsW)QfN2NH&N}O+YRGa8F-gv}zGNN#K^A`u5(KtJ)H%{(xv=z{9(2unNmp+#_M%~bM2ER zBs|sfgzWkE3BE+^QeoWkpYkFb`t}u;SFSG;@SH{bErtCs_(s|>J#mfWXLDO*D$&yRSl z36Aqlq)Rp6Q>@cbh$S$2{gq$EUFXWkqk5nH-^yd<`LwVj~m zl9+!*t)b?1ln{yLhHe`Pj9b1C&}`^Wv>G4VELq&pzu1p6%*VU=h@Bcj9p^XUJ+=uK zadAse1sJP2_vwV*(7A-{K#L5Ks-$FfjKOvz*i@bv<-tnw35&6$(qxvs=97k&UdV+~^GlPEc+~ZjXT%nD z)R?=@2=%*L1?rYBFeoqdP@Z1OOLfe)G=HB5r{l~gXEGqH=hv^yft$&)R4##5|J`KD|(Q}LWQv&?rrjUz)osEpbmW1^U!0 z(8~}v9SKt`E{!HX>#>rKsOzXLxg5!z$#XsrYD;^$hYM>;{DPXSafC@G7MYiK@ymbl z-3~JJnwK|_<0 z^?Y4WGJZN-W{M8ZevTUUeSP3Rd+f)8K$bW&fwdeNIR9~#AyyX8;jH_Dcy^N_& z5v&{~P)hFDG&|$zUogU?4mgic!bIi!%4Oh{lgtpaF7y16WWkC{1=z_%x}T?2=L`Im z`%`WdwoKBZLM=xOQ}slvs%usB{{gbE8DwV&na0#`yyoxGjxUN?<9=K8Q>>w{7(X)? zdE@6Nl?KUs&EKSI?KEJNsp3+99@)o?abuZ)+?Bm@_%0*#%EA_!XLEQqb2mAFTk~C+ z0FUKSCP2-TF#*t!pOV~62DPET`0p9vM;8XEMnFl5q!_pJw;(eY+o}{Kvg--ab z2Qb$H+~)&S03a*QD-Q@j%Gb9n#4QFwFFroef*kyxKFC=XWKs@fqX+W$9LO*Wa%v9b z4}hp7N8jsX`EwYtZ z0lsem7Wn{&dI0NufU7OQZ9c$f!>NU}-}JE0O+i4tgV`b*ntfqW(I5L|MwzhTp&?@! z&)Je?En};JK@@p z?t#d@b{hAY#}XP%NB!1*ZuUMCB&sxHUX?e1DD&3s25O>(QhAk+pgy!vhYIQm3$@)s z&DX^%%|F9looXhU0$-3n@lmL_!>IqD8Tq@9LTFH%nDnUv?7 zl>JP~lPtw1nJ=M(mCmci3A{2*Q)NM9`(b(Ma9!{i8y^UcsbIz(2u`Rr5&Ah8wW>06 z(MgKih9hu3E(j|`irgBLX=9XL^d*xeCi~iA+E%c7}S?16ez0aZ4`6i>a2BiE+1L9@$EBooU<`dR1Stb>u zXo}fn7B+iL!szYnHIXQ1)X$tiIgPzh`i5J2uac%he>`kwmVm~raA~h@w(Sj0pl7u! z>+SqWmNuI#<~}v$s1n+T*{b8teX8Q<65QVYKla`QEUN438{Y#AA{Pe~6uiWvf}$cB z0TU8683c4lK&Oa`h6E4>p>mnd42o@lRrfF;Q=1pmuCTiLS)I>Bk z!NjJfNs~!plQE_(wW%>z{=c>NJ~QVW1~qNp?|Z)I`5ztDzO21&d+qDl=N#NAOz6j+ z0wv>->02NZO|+L{)`6U#B8SsDAnL2SLSz^4>{1ZI^S5*W*);o@s!~=j|0ga`} zInKkD9TTGjxsL!XndTloe((nfP}g=o5u~XLP3pS&a{>T-8U!Jcj@V8CXc`3DMv)A1 z-sCf9Llhh$*u_!4cRv74wEgJm=xo6r#Jj)uApVLor1MEvU;F^bv~V2+Cmej><$L%X z3S!xUT)*eyg-H_8hFai|Xye~RaoUFf?r=&Hw1sM40#~X8m(Oe=>OJ2`L@`Td!hYWI z@`rupQ>vDAcw*n&k8~vTLMkjoaVBIBg)$Sm(Tmbah(VwE81`Anu3r3Zr8Gk80~*v! z_!vXNKP_gZBD#)8(bQZZHxJ}2bIWj6rFc)E?ANqYV7`Uu_6Rvi z2}~M^O@8ik_-iF6%Op32{O|}9^czlj{zek>sE;4rz?%owC$xh;%P-Low*seT|I@E)(V;F@#wkyfp-3c^xPgl1jai!6r|GPAE{PMkp~gvI>C61 zDio^OkT#qWP5q0{yb9*Pk=%WpE@yzPOxk`b?!_7oKWefmX&gCaBZMG(2^soCx-aD= zgKW?u6F-#OzlxY}SNUov*yZ4?>ro8tuMblEmk16}Q{X2KX@MEQFBO7N+6B2Cht((T zWg#UIs+2M`Eu5soUw|R1Gr0;a1ycvg!r>F9f;dhk`e{*sW$TgRR8hI!m7;zA4~p4+ z<^+G~+D32b)rJKT_dGm4Wj%KIgGSlJu0&oi3(L89|ii1zk>6-4=XmXPJ!L6j8&r6!2-M?w+vqpo+*)tY}i z2V&9?LPykm3A7JJd(ZHu2{&L!DNWDBerMKv45sso($n#Yyld|0yKT|pgl?2;k6COc z=*{R(*hX}G317~~Hjdeg$Z*Q|GH&kYUx&GGdxp$SgDw^cretW@b=a z#?FEF51caIU!!L-uTfVXt^<&xbbJVu)Abo^mG5wj@bs^Gn$F0aJd9M%8WDZ;I z_%bj8;}Oj5JJW=Wz?*n*JohNhRhQ5d<;)tS*}D_^;2wcExCM=wR+Qm3um)oA_H+4K zCc0Vnd-zl!eGWXpm+<4Op)V#LbMQW~R4=9;;=8nXFBW#zvCjtL7a%IX3GhR~EP!}P z*M%vT9W;I*RMCzP&?LOH4OClae>@yG^9}r|KZ!qI`!4<*djo%d9QDa?z&{p$KKucq z%MaquYy0r$oojG!tjGKdl!k{s#6f8~9=nZ1z0plGG@FEV^`t*&m)&vNYXROCB$W7$ z2VPl9Z*KTY0e$4F%Lj!Eo`|Hsz9)AZ!$Z&rZs;dx4P&v$(i>9OSw`<|jQu~a4&r)|!f`ByJ zX9YbPeQ@u1z&C(8I&}7x;it|%jt4%@K2$LLKz}G(!cKDN+0C_PBok|4#-gNtcOde1l7YoO1HZFJiXV^TC|`UFQGDRH_-Xu}#}Au?#s9$X z!|vipobQ2r2N)5c;;-U;4oE=g4_Fr5b_W;XZ&y(_Q1%#7pT+Q9^AT#LyZ8xot?vz| zyj@Il&7HG2%GKS)*He|6I}wMmaC_+4#O~rk?0R24obpqk+I{9dActz(PsE{dWx;;J zqCUq#?FC78k_ERI(EjqFL#J$q1T$q^Avl~;50bS13Z}?ZgiH28SbUXyIOR6vVs<{c z>x-80UVK`r3~y%HNJ8*XBZaP6`Wl`R^F9%H;9Y#Eb03BpU)c)kmOg8tdb>BBssUAp zx2k<175f(Q6&a!LN8?>;SQ}tRWp$!vF9k>A-osZ&J{Wn!L)}ZktAz`y>qWc~oY zGsSniwobtZ0Rj$irXerV-dzZvz+2e54l6eu8cvyxni1W#=6h27ya|c@w!IrOlo##Y zc=>I20lonJoMp#%VI}*1aZcbPJY{vl@)e%c^f9@uefu!&%Hq8?J*(!}kMzM^@wuyK z?Uv;Ofv=52K9&tI-#BdbpkwpN9}jE4&n@hG zv#{@fKftefbo7whH_Ue5r49VIgi#FqTQn{?9-=+MCF$jh^d&TfzyJ3fxn&$QgMpzP z8F&vFc%P8!fmpm3(ejlaQWAYhO#O0%wDQ6*R-Ow}@VZ=o)*UF}+k=-jcPH+V zhxO-Gg{rwfoj>2^A4C6j{!BZS7xU*Q&>4>A&*IFV>d)UG%**rVJ-E@UxAaJVKDUqz zP5nFki9W3^+{t|4O7xiYKCGqtcqm~%2k+VE582Tp;4votSyMFGFWaaK+DEMu*Yzdj zWM87@>pMZ;wX)hzyp8t~(?gngcRDKQ(AhZnADr=!-LmWuK7(`zJd%|h>8IlOIuTw0 zzlFctbF5l-bL>4g%&{*%R$^IxH1LgBkj5&XfB(zg(*eJH3x2r*O`o=367bg5z~>0Z zcj0UV-Hyd!gZRhrsZV_Kw%;ES@TJj=!~A+S>Gv#_PlE`R&OaFVZ8{Z7=%);HnTwJ2 zBuDC}=G^uAN2)ke`3tq^#ah@c*W3u0UI2dyEF|7NQ9r=e$`&f#ZQcriZ|h!1cZSjN znAQazk6a5M{thVIZ=f6T75eBiT-y@sQyMJCz<>VHl+?8Rk(7yP2R4;~0=!`o4J+EWm zQzfDsr&q0_j{C5Kx0Ltsi|o|HEFM4=4+I{LfzDLLkD`>l$Av0>6IHzY0=#iN7c(Ie z;OtE5o2H=I1UjjNFX1m)q|^}5&LV{BKhLW_>aV5qpY$p#d!n8sG04UPfi67ZfFcKY zk)@9ln2(o8ouJP}V=U;M$+N#p>Czj1z)__)T)31sBwbJ5FpC81!CTO~7dfbSxA`*w z@P@1S2tzKBkHjKQx{CV2X6za9+QUb-x8&I8|H(VCw1?i~Aq0OmaBL!xT=FU$%p9he z_c1}EC+bCV4I8MxG9R3UM?_|Gj~LnBKOh|L4;OeS;e8H5?H&H`KK#Mh-r*0Qw!QO8 zxa{`+<7lXRiT1voaJ23HJ+-|@2EL0$GTQr0t`4>LM1-ilKci5g1QTV*8a8vJ%dO!n zmr@NE5bvHSs*XqJ@fv;%K-=DbPRYELj^5sR{Ze~>hnAZ?QPkes!jucO_d&J2+X+q7 zZbp`-z3cn)2u_UpbH+mnCJz4Z*YA^2Q1}w{`$NJRrG6iu!0UJ3Trx7%Zz@7mzkj`g z3MGuC3|YSk9O-iFchzF5-&*1=>i4&qAk}St0zg~8k5TgF)^9w-mG!$dOt~=i>mW2y zzs2)Lt=~WLEe)#QQ(|-DMGpS&*YBTc=5vYq?I)a3>i4B2UcV7s9jf1<*`j{OO`}2y zuOX_|@1H5`yWIM1yN2p_JMk9v`%Nm|ZT=R3wtm0Hsa|}%4z}Mch%4)NRhV*N>Nk_n zMEyE5QNMhyYvngYXjkbh?#*MVWj-;Tbi>@xvYy6!{=*F>F1C6)S*Eu8PjgD5z zIe?`T3OpUygRvTT({>R796av@Gb=tq6`WJ=C(Hmgd!am5v^1#VD`F`%gpUub@HdT^ z%P9savCzx7ASq^0UeS(RZJ^A(%0sDUbCZ6Hp+wHUZOCtLY zANrUjPQi=xl*4a)%wne;?!WDJ`iiIW!-o%C11ss>PDBcC=h`FA9kTZy zO+52F#&{G6+ViZ1%uc7^VM*Mbd^#`}VKVDm!h$$sHSCQJe2AIi@jwdZ^T*FzNq>Vh zdd!>A_wd!S>HIxl7Y{J7<9-O$~4_fhihG!m!yTb<13qA*(Tng!hReg}3waoWEzC=x5 zNfZ1k{W~j9;oITf8JrVlZYiGwt6_XWifiVe$JR)yvwsvSp__)bIH3vUtn`JZYMAq# z%)k?4d2P*b+I>+OR^D>#G4xo$fHyg}Z^&->MMBL~Y$E?P2M&5T^H8tp?0dobvOH?O z5t(-!eH?9w;Uw}-^J2sU$1rpR7GQaa&+SCz^qox(C?YZz8~Z5;o_+gHwKiql+d7vr z6lJ~_E&LW(*ZU^RSAI^FKTSxOi585lqJ-C{fWaG}fit#=h`&wo=Xu=g0k002Vhipz z?YE&Y_MQFLW7XC*Fl>K*z@B@M8ak|t{VMSpa)hjSUn4dbElK9DfK+&dq=YXZFNnB> zN0Jh51ANMC$6r$~d5!I+L;KAn^B1XQz`oc_>fcwk0gv%sW9}dRXc!+Zs58Hd0Jb`R z!9jfD_G=u)cxy)WzBqKq4c8Q;wpU;jw>(ymZ14FDKA1j;U^JF;op|}kF?)A%I@S>0 zRNoEprHlLBvG@Q&WIs9*Hx4i!%sl%l=J@#LJ2!gQ=hN92DHMxJiw2C~b(l&U`l8L= z`C{OSF*LnKn~2--Qu`SDS5n-Q)&5T4bS~Y56n5u}-Y9&-VceZh2P)8f0m=Zl*6138 zeL%{G<51l9qVJeJH|1&+?0W5b`}6of`Uk`9`2OV6fkkNV)KAb({UqcEu0TTW(L4I= zmLfbQg+7(KTC!bc;HT8=-#ay>@4ZvQs6%+4cx0fkn+^PzhX*KX%`# zz@{&JGR#rBBLmouAIA53J%QJONIa>~_dz)D8_Zu(@-8I6mC6GxKnUy+&K3{M8K4sQ zKA}%KgwS~UlYlgoIYy~vKm}!UfX?Oy{_%NaKL=xiRZPMT- z6Ykqw3EG~F3v_}}pbdZU{fqb)HZ(*CrpHM6y~=PCSzOfEApKv@*Ug$_qu|z zqV4CLt==ZzS$w5_`#%fXy^Te_Lve2x`G(@o6m0){Q>(M2VEfPtXKQ-_?qTO1E!cjp z)$44w+*cge-yM}+9Cr|Y5as(jjXS;z_z+usyTjg<&<$S08AI0(TbA}k(@~wJHPI2w zJ7cMfz+`jp`yUNsCb2c$4#$x`VEIwMC%MRHeqkbx|8xzQin`5DBY1WU(heeU$6wmh zw!g(JLl*3bCe<)^9I&E*^NfWvRB--~f|p1SC`Zg&5T(8rJQ3o{5VS46aou$|k%sS3 zKZg#9?$P{U9(fzi5q%#4%p-Yp;K0}@MvYRr36r8`N+ZqS2>JW5Q^ErOgyd*4WA;;n z_}yIe3|B+RzypYp$h3=b#Ux08+uhhJr`eYIAt2y}${0%SHg}{@jNhR*--?_g=5nME z*CXaqgvfJ_nAghCKDZ&XZ|$uuH^&m;(kMku#LPHho0^D%3vwVh zy}|%`zBudeLPo^5e_aR;b9iH$STwRmDLFBIs=J!+n znVU(HE77cC1_oDD<93dl* zj`BVIySe+RaRr_PBg`D|M*4z%^v00DEl8&5aLS!?NXhR7fQIur)Mugzp|c-OX`~#` zS)w{i4;?zO`#4g_VXzI|6QB%o8KOx6h1%nEPMW_|iPU%4yZQ5*BnVF|*teUw@;1d! z;RlYOCD?tU1g35^xVPv?!GH*eCyJ?2e|I+C41WRViRi8+P;5AkGGw_22Em~zp%;0bj1{Im&dMaN43=(ggSNqF{@#$}A*L}8F9K<{h!i*Zv)lYuuKEuKm>B@Wc=%V`3tP=?sqYCSb6_5+Nf3R0R(# zbhqc%d0PDKQ@c7+quM9>>`Kpe{|C))c3p_rV%h#avwhp-9V2HF0h%9hzf{)PeeXa> z1znF9x4rF;=iK+|f^W|U`NfCYz4k6=D%EM~a7sf4(tO2OY7}EVorxI6+=Udb5MjKJ zP{dDC9FMj1l;I<-e#M@PaBgv;J-6R3H^xC}79zPJR+!1*eNX1U<5niQzTN&FI(YLR zDT)?w^Pzpr#{Cw$|BgDi;f!a}N%vk8=6yZpqev6;qrXMrbU;^3RNzhClf^`!Jd8&E zelMuutO9(?{62bFf@e4+znATE+OzjTxSpj00B5mLK(8I~Mh~ahvye~>FXBS8G z-z+%KqDp-yyb}uWxwo=9N7Q?#N+VC$>jp|^ohYe0bb~KB(T-1;`Rtem7RH<}GDTXv zah68`yZl3?kprcmELgv6qmDjN?*Zyr-~x`itZ?k@+RSturiEd^!~!%1xk67=lydt zhHt)4jdzgJlu-$N#aOc+()AABVV0a;N`6+B`%)=}&eYQRgJrnox9>Zst66RAza{n1@<}iG@%d$S@{jZAipsxX1s*WM^kmad< zW~nq=fO{quh#{638S>G8(0kpnW(ItOAJ(vB4&hs5mP~vAEMe>lI(+`p`$w?azbP2Qvox^U7B>C;k#4zJKx;%p=u6k25E^q-+L2b5Ukr}$j#lUN)t z;0(bXH<)QCu~>Gz89|o8t`4{?eSnubMHcIXo=|W%3B?t>4WR~0NI~edp+4O~idOx; z0h-;6!VQ-7?=)E6{0lbm{_-*zpz4JN%O?wRI9C=27{Z)kV8vCj@QRpsNsOqp~?p^77 zfC&2{|2XoW!tV{>+#_*3|GW+<$1Qlde=iV$h1b?+F3R9ktDbIv=`9;@!V&>L6eIS8 z@G>A+RpLGORS2(Ndb)@duX+V(_>^ttw8J7!I{XRoGr(D3<3!0Yl@K-aH5iC8VI{K9 zc%J*xf)9Af%%!h$NS48;2>Q5eRZD*-GR?o@kW7@h=-VO;(O$qoQh^9rGpD&khQ>5i z#8+bmsXY?8)>rZZLhDN@{EtGt!q1C1B--$6DK05Tnr;X*Sd!6%%vS;j4K9X*w=C<2 zVIA&uXMfGDZy|+X6EuXA%S<@zecOXvA)cOmmf8oB;iS#*VeTLGIPy)6Jk$_MiOf3<9Q&9b2nN-ubb3!S;>CmfR7t{)Pv`{sFBF|PBx zcDx+plSR!OzOA!)Gp?&l7}0PZyax(uYK*`4cX_sJ7Qdm^(Dp)i3tV1p6${*;duE6@SxZ(Dz9b-|+F)Gv%3EpXzAf)Xy)lp#vVQ5AL6U8$Zy?s3abyUbSf577l-r!)- zaRM|zm>*U{VTTIz84P0s76l4c=5*W@pOwF|WVrvt1xhRFdOzyczgRK_)@TFEGGEbM zHmu*?x(3Ioi?Fz%Z43;|2-3%ND2T6q+=d=<;2gdMx{F@Tx&1ZFPHsnPA7<8_w=bnX zmK_$z(tl>s_A?l>fpP)6b5!D9dmzg| zADQPY>rdeKB7XQ@N#?Pw(plUn0N92BJuPJMyUS zME`54{qIkobujWoE-)*9xBc}`AQhoa9i+dVb}oVIL=pHLrIhBu7~jD5{!T7D*|q<* zNyz&nzG_0e`ZIW%A8A$S)jj7FWWX}(UI)^X_@N8&{2htw`oRz5!-l@IQU9=1_2K!C zqV#l_GJRS1j=P8mRo0m`n6z1@6F1CM@18<`@8N%2r)lRlp;=C7J~iwQrBVpbYw-os z+rHw;F24y2gp@94rG>C&Myk(A;$&6$ppS8zmd|C1&vOk7bC(p3g0U9>Z9G_!< z5q41cva$_FlfR8_0oQqQT`p zhufXwu=?Ajvco>jDCPeFN6?n{sO6u*GFVeyw!!-si#mbKl_c9ep*XkmVZ@Qg{kKEr zN&D3hzNWmN;uWFtJU{OS+L;vXDqmmTVZi4$tG|)o!lwGMQGT9E&nJ4ciY1UogwcEE zpUG44W!m=sQ&g-h{^Q}Gi=z{Ep%ZS5!9Ag#gu8$G3A|ka7Jnm7;QXsWHxJU-iM_Nl zFGBQ?y`{Y#5$;X$aeLh#igWpQgyK6%5KWxxn-|LGHcb!3^9jEQanRmF;e44V?cGcM z9raJ{_bv)wc$bH5ggaG)cTu`ugdG$3B}xMtPM2FO$Nh5>HhaGW22| zf9CKBUVn|mV>j9_(mC(DL^y_Mc8c&2;inIB{7%BB_6~fm_k(yMOr_1`QT-Pi+W3MD zz5+-etjA8yAa?Usw7~Hn`1?nAt4NP|<8fX;R_ve#Yqax=xIZ_s)K}I4S9jTc_{6NQ ztS0cSSLj6~e1r|*+Pg6Xcb9=rF@D$qD#P|nalY6Y#b+#v7060^lV_s?kK#1}BM1bU zxxg$F%qqdm5tyfEMq1$J+xC3SY5FYp^!xg?0wWJ6V|_7!J2f11_Q0Mn?!BxrVILD` z?LLFU2kR+(*=Modz_GaFfq%Y?84bQ8dc-^lrRl<<9`i8cTc)&6tX{MS4SowW{1$#H zejd*6jv&9kfrrZP52Nz?J>^LKUZn74>lexSr3CqXOT+K>QTa{N@SE|e_;{VsX-eHQ1pD9GkGYD<#a5@CCXhitF8zemCf z31cKYJ}m0zHVLaF%#$!e!e>4acn?dsTf%cvuJO@=&s7pGm#|#ITO@3jutUPHO89*V zACPdLgilGRNIR^RdU4~C{iGcA5>`mKOu}pl56F5uF5$}(zAfQ-31cPxWC>M1&0~aq z9TI*?!VMBGlQ2`l@e+PV!d?l}rT=G3sM5cUW*)e{DxsR*hvN`*JtCnaPQ+aj-Y3)V zmQXEst;|>B9kHUE8b2oSPfK{6%rBL2t%Qvd&Xn*j2_KU1fP@1Q4ocV{>AorB?@AaU z<%*Z^BdKq+%va^RO_rZ2;gG~TDeYJz%LiooD{8)k$0Y2N@Fx=9D`CBaRTA1HOqX!7 zgySTPlJK15e@e=GT*g(o-BR9sL;c!h`c4VUB~;7ZVu(k{e(Bv0W&bJRQxZNT;cf{# zC2WweT*7%0CQA6x2a=wIPf7TYgo@;wBjIuhcgy@85;jPrnB}|ntLc+j%0`D0K zep&MOiQNO-H%dqC<@Bl$im^S>|QmnC#dxIw}k2`5V! zE#Y7PDeWQQZzcS`gm+5lm9SDmyM!|(953NHX|LBM9F*{YgnK3Yri42rbV;cCapIG- z9gQW!q(~0cbbEw|mnJfiE@7gCiiB1PZ4#>GRoH0~bn+!VHT_a>rupk>B-hDb2)hwJ z#V@=S$rOsU+4HGN}ky;9rU z+*ViXZE9=91X`94i|2ONZc|#FTa{*K>n1O)QQ4Z-&8=-)TNP(Vozo>qP%rZNZ2T$OTtbGcS)EhA#;mzb_w@L z=#ucl=R|&tjPL%jh#UQ)@uwo)x=zTuOUmz9BjTjz^0qd%d{z0ng7PA^tf25l9zc3| z!OEglW!U7wk=R-{W6Rp>>$gz>Tt&`?+V*BITUNZfqI!8rg-B&=-Kuh3!t#<(Jfn`W z3g9ct3rb3hi>_6yhPd15QI;-M*4|jXdQD+r@#@u=%2#I3RyMXbl(aUqWtY{uT+Vuj z7lPu_@u_R6U)J8x;B>F{*1EmUdSiauW<9=@=*}!@soeyDTHCxzUAx6i>U$ z)kZY3XO85vrM9`LzSiqp-Sl~58DJXHB~kPg$OF@0nYnY7 z>$vbbyJkY!T4lykrM11e+2wU}4P8#Rr^(~x<`Q*6D(TCCG_uN^Ep6^?24yZHQRbGm zruupF=FTRzL5YOHbo6!h$fjyQZRBaqua}40i<@Qyu8!`O%@DcV`8h!n2H^#H&zh|o zHAqHQqy#!TRG(--?%GW)Fq6Rms@Xl+q-N&aEWxF)t-aN&h8t^JH^Bu}g!&*YE{pI2 zLn2(?>j@dHf;rJ+dolLZBh%&}X&_X*y@51XU6p9curus05(=QYiDXoonEN4B~wz>R35)Lumz;f7k{aJ z*`-ROUU~27WY=FnvDv0|j#$>t(*HBL@_OqlYcRJJmbjOdw(Oz^r~{gy$$T4+c0=Vl|d)5*nK z2_K+cYdlVO5zSuUq^<2O8=dX~kE0DUHD{Rl<0$1^0>7edbz6(m+lX}y&5>!IskGL% zIF+iIbCz5;J41E@}blsfq|^m0BWJsq7e%td|h*`&&vLtVMhI-$6{ zd{ue%+Je%OqJoN&RV%B-(xSS!vLa|EB8e%*$~C2>)pjgtN{d19U!#YKh-Y;m~TT%HBjA%rP+#Wt7Ib5qFrq12nRT|jiYz1x;I*KYDCv&nL0 z#bv9?*H=5rS2>EyE7n)rS7C{_wq$k5veM$}Wn%63zs|34mA$yUcx8oTsN)w-6S5lg zB2}phM#0ziQ6uddk`fA_<15Oyx3+tn^@OP`RYoJCET9GY>h=~%F$^7Edwk)-*EpmTiUikDnRteA z)cTX!F41Va2HODc(dt{Z4g?OGr?hRW?DUO%3^>Y2HhMnIZ62}A7K)4*7qNtIF4f~e zS$!#CZp<4l*Dq*w(GG8+6&uZE#7t*%IDgTb6+V)qm!B$eH6Q!A69lURgO#q{!s0FA z3x-SML1Qc|%HZLxkX!gUt&zkGj8>$V(-L%iLhViN8}ao=drQlO^aJxUfR1_ ziQOEh3`v)jP8)np&J$ zK`FDiPKc|!y9&l->=A9+mPN8EZ6vHV+l%5#re|(e5&SS%NKFmXzi=$x5Ow_|CK?wX zPebKgVh1QR(^5xvUHygpDI{0W`I@}W77yQXp`Kura-uIPr)`m7LB4#wG(L4qWwjmD z?JQffyn1!Xh7t3FkbK%cZ*+XIq@ukY-mQryr8S{p7s1%Rs7DB@A!dp?J#_hD*7u?I zAm&Hm#vk3RMeg5e8id;a(3rFdzPW|_X*uten%L(73v?rt*e%M&3_P3nJUV%?rM0}N zwZ1Gk`QQuklvTHFWaR&8x2NP}ApAbGxEGT|F zAAMnMd(qz4fPY+mef@j((HNIYa|!vi&Y_Y1-|@FxjR>m0>AD@qs#T@>NiR6-b@A5xsr5Ay*VdbA0l_{v>T>Bq+(1qvoUuM>odr$AlwgTM(Gtz9 zNSUvQd5JbrDyq{H8$S(Ao3bk!u@I>j)Ksp8d_LZa*(c4wLv!(sn4>q~pSZK3u=&WP z`$2TKNbH!>41r2CB2GJ-waBFq{cYj1S(@FeJNI<=L&a|bw!*|tU-o6ni&aKhL1jtV znzHJ31tnNkFT0_*uwr#^y`;`p)H^OBKFYjAJB6Uzm}^Q_R^U!880hOWG@sJ0FR*)P zwQBBuat(C$9=+T`axS`kXO%8=dF2)w*-5Jz{C|d+m6SU-;ZCk+)fT6_xz@#R)>^ID zuk)Z$;dV5pSi!MBiwm6H&}G?&m~nk5ptt8mdJ)e<_Ik{>x zf}!=K=nXtsqO0^Mwb-fV`;MM0>ZCkbtFWKK*D)ULVlv!Xv!cyg+pN$kRmp6}I{n1})5Z3Eh&Fa|jX}nH3~AGAl?>$SN9&y3)t(;ml=o#p;=_B^9!O z!pVh6=d_m1HLUlua~iTTF8-(Fsyyrpg#Lnps1K*dD;D{Myr<6kf61Vtd(1@Fro4U5C8CKUgiYT^xM7q9Ua34kW>EM2gW^h}Y(XKdXOAD2_G^4q>9eX&q zLBcsu9w$j-W-tYJh4{#-CS+%41CI}@6>Vj;o1JU;o~Og-aCS4pG zf3q^$V>R{{go1XIbQRfss`4q)E^4}JCq>#xk(Lp0jh$AvZ}il;oA_DF>Z`7TDfp%6 z(ii6D$_zs~w;dJGrw7pu=?jAlmSMc0lRkBrk?CiD^%zHJ1{eO0gl@cq2ej4kDS+H( z&!Af{bYKbx<2JR@h8UkzL%wC0R|?AN$Yx~=!`;7S#f)ZmC2S>Tir4bJrL8C*+y(Vy z*Lk+!tfJF{ct{rVmzNh86aJc&H?CZ@ZY9GO;qu~ge14>?U`27YnySW2OI8*aAXkmAa#R4Z z6mj)F6!k@vkDi9#f2J^9d})bpllM@3PfMhi;*2eDb;)ZyxGRWkv5P41WOJbT9#0S* z6(7WS0`+Xm=oFMd$GP5iT}r6DkPxFQ!??#V#u@2x1m1UQ2*N#i?Td*5W9pqJF~E}c zLMbk@Y6MW`ao*j+2JLPp6@|DWF<8%1Zim~6`|EBf6S_^c6?M0^y?JwOtD+yzaX7LN zQh5|NZos?i=?4A6tVNs>@49m_QnzPe_dzV|r48EL7+v)FMd&o#ZQC>m`ux!DA`ztj za5L>6L|kH?fKYajucS+Zl!_qr)YF%Yo3eI=y9P# zrk79f6J2tsI);HF_pvSv(y|II2E*UWJ(aS6SKouCS5;k87K%S(}U!koJ z>VSjR(c{q+hMX-_3m&SJAhwZik^W%mGAt!V5>_*Snjry49m_xeRo+I9u!6 z+$BYl%1An#-KFxW8yiIqUVbHJm$K>U`Ik1eGD&_ zXCVn*6t98Kk_aWk;*ni1hp)7MRwLx(!%4_+qtYcgb!S>cnc@zdQ0IPyJ1;ld%d3Ms zqSdv*RcnYm#=$ywTh`jL0-WMlwq`|jLFu}J^{WvnDk&_eC@zv4+`$qf@k>^$EW`N~ zX)j}bSwZ=Vl9g)h;QqLQU&+dniV_@i+E5JErK`Xbr=3;@*^R(2Dz3o3Sasp5^71v9 zgscK--hPa9SKu~C0k-GYR1{Z+sEy=bRrTh2w%@H~rlqhFoHhs%x%n7pR$70>nf zee$`Uy9C@Lp>>yt4^;~2l5w_P#1rop(DqFM9p4tP^9KU%`k8>P2Lw#~qJ$61{QnWK zPr_Z2PyPc!uA$F~{7$!k4v&C?63;JTpI4+0NtoC!;(Zb(ZV_>|Rlr?e5YTq3fT~=I zlux-`q<2c_mvFDN*B;5YQ^jQ6SWM@4$Cgnf^Rc;7w&Rr@+*xj~s;BjcSaebuf~AM07k?;QaLC7xqQ z#QicpB;lYeH}rRr&o&5{D52||h$}K~l`#8C-b`3__LUo(TCZelaYnk7-H6i(D{(To z46Da7wu+Ur^@tT9rxdY5gerJyHP)5|z;Ga)FY_2igVKWa#pTtdYgVE|s-``P`dW8= zwS2Y{N2HL$`(mK0l+i8LMcz`%$L5H=;jDL1+CG$VK;2#8Wp4 zP77x#> z(edhNj81(dt#I@{)9!3{V$q0s5s|{Q#?{zV=i$jXGXN1iJd;mm6>E^FQ7&F!U2T)M zq`uiXf>N*;Q<|`|-Q0wSLF$#QIOB!K`dSyzf$UnhLh_Bs*W^+D#&)lA(J?1;uF_a5 zo)+Bbbhav(!_v+sHbT9PwO%^Wh;=Jb5?7GkC}Yzl$TiWyV4_6ltLTARtbKW!P_DUl z8=j++tT?Cx0&Ztu2ve{L-;M`j8lZf=veDUCyQQhktu!@2P#CF&AJQRn&<26BaT}7I zc!-4_fdVD$haJG#I6ergb5CMms26of9dboE<>k*SZrLcRb@rBxJXtQU#KVhdaad(+ z!kpgg>=5P28%Cz`q8G{1Ire`}K6tA^HbaV(OMNC%eR7glL-5b1lBY?vSJUYB;`3pn zxipCeYZ-HN#`?7Nc~SXb9St+omp)+>YmH(b-dd=c^J2Al#608xJWo27bQhk)QNy_y z&oz`cnu*#OPcs7lJk%Aj<7zf=l>qggPhWu9Jdj>sqIk$}p%+usIt0FIrvb zmz1m9-3_&M&ca$(?Z&3&CU29|gH;B}p~uTokR*}0EyEQ2H~8bwWD6e7x*XOG9968V zgJ%uZ-bin6KCj z|E6lK4n{ku9%z(D`0(njwJtoT)zm8MJJ^Ih$S9-?W7}<9>D(GD6}0w<@tOQPh{n4d z)w(JR@?w=c^^t0rBVW|CTqUDE{}Fz0DxDbWBk~$OFK$mR{D}TguUq&YQO}rg=nQX& zu|Z#T{=*p>$E$k0Nu`nJ$Uz~L5O!oJ3gU~-ITRm#R4~YM(eWU}KD_Q2IaGlOA*8fw zY{2mR98E4KO{7ctd{j3GxpY}ZH>Pe?hhStHo?kh(d7W-Yt;eI|g$+=d9EMM*jF!jh z##$7~#^$m&+vD0YihcqMom0D5e5j3QwucuN8AhIb`lu}#hA(ZGbH1vH!|SW1qSN?d z{E4=<(znqdGXzGlrxW*=8r$4_C8?@4+oPtAsomJ@T&-e`Q1Cy+ zPk1YyrfBjs;^Di?=2wh|oaq=B=ApEuQAbS@Ek!)+iMdSX>?UKzMLT(;KL0xX5q!h; z>Reo9W`<`be<4rkH9WFX!Fcu8zy9!Ho6_>L*%N@b-(w1F3(|@d}=)r%pvamOPr$|I7R& zdo&co1WxL|$?2^YJ11}|&5a1v55wJEQccC7QGVfH!dTSx87-sw0Dmh~n0`r=47r_w zzB)hQ69`&HXNs3bQBY7L9}eb*=%_*)=*jto+#;l#Bf9>&PHaq4dKg(XrGku%^ra}c zO{m%8)Q|F#iP!@XY&KPwVNBDP39?XCxrqIjJJD0>G-;rekvfcEQ{3UKYsYgV%c-vo zGYtJ_^rK1&zsGCT(#TJ)FZgb?Tc@E%q>a?EQS5EIjifJPKdkxrS++JePK<<^1Y%KV z%n!pi43!Zwgx4=ra)e8USdZeB`m#obMt-V)hDt$Wy?=%{B1@=A^`|QlWMiaHTijTS zbBENCr5RVF&clPQN3P1JWY!LRxlnhtH(iw%j^rGkt7*dJ*Ie)V4T6G7UvC-b^>BoCM6<$KT*OTz* z&7Vb@_kWrZuhHO{5GT9{!25ec?+K+hC+|a?K4?LgiYH>Bzm#4BJVhoE{h)k5578s~ ze#9#!o=T7S3_TRWi$**Vc!~i}MuULL52BnRQ;2W1tK0g-uhOr@8#J%~!!Q4IJzxP# z_mOtvw`$KwAd$R&Bkuq?p^eFwF--hr=-vPus0qIilDE;N%gz)gu9i{-! zPDr>^Jc9RV@JRnXhz}4FE-l^?1D+l6odPY0N99lcI>eVk0mO}Xq{mbZUOeIs#50K@ zE|o9Qt9e{c&xQCP#|q(5J^2rW@M0(*c%5PBWohupE;hvd20Ws4uckhTULVB`coa3( zn+kPuy3ym|wk3-KBfNwDuRv|j^X zvfsUrhU6o=^dTP3F6kOP(!+-M5FlNu9whFAzY5VKygtN@^-K6g20RDiod)@cp8t^$ zJqisWZtRB$FI|I2eDe|a1Jb3+NAT1y_4NFR@8(z`JZ^RaULxW~eq|=+eq(tha$zNA?@U*Y;e9FNJn$@Kk!?@El?ANCxdiAKB#s;^FFT>N9%0y}(m|PnT8?BVJ<|Jfak(;YNAz48JXJnQPe!_WQOFtw&&ZeX_MD7lr({8uo)M4YmCuLlqRJQ6 zpV1B!Nl%N%H0^`RC2H`rdTMU|H3mG=J5__n?S!~uz$3nUH0_P(^&uXve>VC-Bk)vu zT03joB}r=4;Hh>Yz5~At=|Q0Y;+OJM(qr%Ok$j1+Rz6<8$1aUWa7uc?UzAs2=r{1foFw$mqRb}GU|=&G8D$I z$gb*o#`dOIr!)c@<)e0e`ZQ4y*LVQs&zN^oL&T-}6=76p@M!+$LcEU{;ZpI4Ufvra zdK?dNt-sL6vw5N5pU@grCI#21GJZ`E6@iy5M~(TxG&S$Wo5$g4O|d4g2K?xONcPd) zLM}Cr))dM3RsDA)tNOY=F9GRMNOB?1Wyl+cJl#dg|1VrhqWF%Bk#tPHIM1CZp)sFV z#)!oKy#%fLdnQD&jvv5(@Eh$S{9T&-J(@6;AsXrhUm2#b4L_o*y|jG$P-Y1E__mD} zMHePljoA~*R}dcfVJNO56D^gtmOs(gUMdcy1yIHXdiZ9J7DX4PoSmBdy$Fvc0~I8i zvUy?uCu9fW6Ye6brQR3H)5;e!F^UZ$FA%0|XG#=POKJHG8gMItr_$8o*?**#)=NcW zO7H!lJXdlQ-!CC7ZTT8Q9r%GqR}BA)b_i(0&#$3HS)`MCY3Y%k+A9%qtCtop-#{-B za`Zx%8l>+r$fTAhn2)@0eiJT@@JSlw$03=>hQ0WaRAdvb|UO0Ql801z< z5pJc1j#`%T70@~b-S7=16+H-t47#fJCj_m%jP_RvkdN9lbhUiRXUISB?IaaF2uUUt zSIdXw)LsLip|%4`C%ZZDYt-j`od_FkqT&)HnG-em ziY83?+Ipin$!o*USZ`XrwE7$M>OlQmu5W@(@FF=K8okwerFNy|XROzqD04BJ4NeMQ zZ=D(+Cm|Ytf_kLiC>Pn?p~+W$fzYjJ+m4@NerBMlB}L^MHThIVrJ>c2=xDEfY2o{W zeHvK^U!_Ii8qm%M9emS^qPojQGK5JVN)0bhpGIaPq~$|pC?DV6((%wn2tQM!(<$JG z?Ry9(3?0%%<*Bt3>8HEGl%;&Kdmv06r4MS-RUTA^e1`l3-x#B4*o(?{{w$PVxflI0 z=53>0gs*7uU6V)AOM^qSPJw1(7`j?{1EAFhS@G=TXzaNi_GMs462*2yW;~lXLLMz1$w2(@jNC=>dV$vnzIdi^1l}p6hqI50NOpHP)qT)~~4^wU<#=vaNb)%aOgcmr9$` zV!&$`=;9exEea1)o{FobNBG)nG#gXdof^7SSL&suNA;(@$Y$aAk{#8ne09<4AHKRt zQDX8|?m7RD#*UWd@6&itcx@HER;)^#9}~|%2aohr)59B8cxF;&i^CmAZFb@zx#}&R z9P7fXvt9KY1($39`q4EHzZHcQrE~+d2H|Xapd`J%2|-qaG{=}onbyM3llD#$doBX6 zr8ONdJsd=u4MK{v4wk4yvOHA2h;OSWTFe^BVhD#ms#lxNU)+Lq62I@7ZMD`lY{J|a zand!ru)HF@vao=PDTXvk%+#mxXR@tG%M6uWLuD)VcpE6KMweE?c{Cc*DA8p|D@0m{ zK8@4s)TKE%y`B0rj<-vfR>ARh>(e;i9;Ed)zy@yqYC*nEdoWIor}L){X{~%KTt?ue zfBy5Iuipssv5tqN{oLNV^pXP3qf?i*f%Dj@O{)-iyO4(J22GT$5O{l#Hh3-4I)p5) zGm-2;lNwL&06Lp`R^0oL^_oLzdGAE>b-PTKBq`MzfiC=fSkRvCL8)=c0TY@+ptn5M7@DLNu0`D&Q4>uT7Bi zLD~$#l>sG?Es|G-vK0AQYMFxDVD{SN7u|SM=-N@EW@``*?-|@|9K>vfil{- z$r8;h^)bwnH-=eq;#{WW7?xZ=mL*pJA}u*5(Pb)*WySSln5`n3C4ydJUJOgjiT9^1 z7{jJ}C$cGdDa>LT&!VsZa3#Pupx>9khvc$C7rM--FS<#=Lgx}2| z&Aj@ww5d@n0rV5jMZ4aG?Z^RV#6+-|`ecT0%Q6>!sl7rkZ9L6_Ix3H_lD&uSiexJl z!8Z;4acrK2#Xc*k`0p5qFGvfJoJxa(e4OdX9J)?a@(K}!5q+~>t{0s z-GW&>%31)ap8gAv@Jvzgi)E5W1$+RuBOWs{qFBc1G0a>a&HTX4OvhSU>YGHj{)gQoGs*B07!DK2c*lC5Dk0Bv4otl zePe2(9W$0kv*q3xHtyY6WPBLBZy(Z%7`uvD1^RAAVZn8$i4#oJR z2c!BTq@V4+Cz8!f67svGzPAFB{ddavALfeu8?!{a*@O2DP=1wFq^B*4Wm8SLEMX|l zKX!0TpV<{v6KRcbkRQ81|4Tq7`|JTdf~r0rpoin}Jkko|DIn3k6_C=oPkG^M^^wes zfiY=DEK4&@AHj$G(+)mVrdP^LJQiNf$+89Li-~R_AYGyMDtwi(F8t72IHm)sWN>9L1uh}fUK7*y_rL%O?6)bgVqCa_Xd|x7L zpLR_Go9~^$=F}@}R^D_r$&||ChhqIPgVB9zAD}>|1K>k?^+B)SqYcn?ZIMWeEf#S> zYd&br1FgBBHFE^53)t(8ko7PqZS=B)w=5U-S-(Qi`zIjT>qEPUf2Kt6y9bccO-dY7 zymOdRKbt9e8B8%LEO}_WpY$?K9m}SAr?aW`SFovh)8H4A8BeGF{%-hb`WQC8ej*!> z{%AbHnlLsI6C0B4m8xo!fa z>)U`NUmD66?#^J>?VQc7>6pV7HDN6ia`3T@;L`~{A3#Xrqewnk z5}w#7(!XCP=v6v}J|`MP{M}6=|E)%;=dvW0*O9`kjT2dx_bQfIzX-mThcgw~BluR5 zev+?E@+$>I604N)NBQiPu^(*~^b?vzd%vee#K*P?{uO{Q6-!%|0K47=yZT_)JE8B6 z5qz!SJ09IH>FbhwZk2p@0ur5jWct3(2|d4s84Ag@z$?b*bM1oP3wXyRU9aQimz*93 zGODSs^BEOmkIJ}d-B`8`Z7SKMFyTRwHPqjk63sYQmdbdKkNAwkQS5Nu6)fiTbmqU6 zv29;uOnFSSZG9Y4j=-TYWjZEpSLn*764o z{bCI4a*&kAmD?%wsoTz@>^4BsPtckUS}QhOz41m2H+B~m6oCfY&&o%Ett5%%@62Jj9l7kv#ymDZ&x^6*vuxZ@ygzm@rZ3uM zu2ILqq5pynAu8!?m9j1YB%SSm)Ql&>j#2a4~ z@vi|A?W8MX*&Nd}Hf1QyKWQ+fZ-Q%lO_DXyLGz0N$UrzXl7BPci?0j%3(gWZ#w_oM zxM^BEo93Osrqz#U(<+j%Zb@X*9!?vYHGcIUChI~TF5I<7)Lb2Y}B4z#DOXir;E z->wn$y$5o?F8!}l%KkrqWV`)L3ssPFUjey$g8LXqITyeCCm%D0=NtBDh`U>fT<4n9< zcA|+zBVL{);vY;9@-9sl>7pNB3cp(dzgrBs^G3*hYF8w?Tgu%j<+uxw`0oZJx$g%g z33~yF|E~e*T96{-oHjJDPd$UYz#%aF+IGxC!yfB2LjceD4JE2cxu8>pg zTa4B0TR>Li+3;h1LN;yObagDd8v7JESo@62i)G_-`0CJ=G&LIY_wg(>FNwuJ7sKb@ zHQ+DJt%xc8#j%MhHu2FzELw{aWq?u{i~G2*vo+54B1R3u@+b3BZI3qFBigtr&dlPLjq#fYsl{Slbz=;>@o+r$yioV=Mxm}_S?6a= z>>>O*4htXC#*0a`>sCFqHER~ftO^+-!1dIWd0uksh>$R;JGB;>a|)rvBCXKyUhQk z%%8kY&@u8;Bwiiz>DnjpOykXL{Kj#vgqnCq9PFs==kvc4$;vS?B)LpUV_1^c%#!M( zSW;djOB#aBxi3dB+Xnaxet{>2owaeXZ=1kk-O)8P&jbs(w-wJ4t4ypKKjkS=P8&Ct z%h#0aMVVpzDrGrs+?0a04u4L;7?VQnzC6h<_QtU<5GHo*EN7fZw~aj5cnHi(+x;XX0|k z_-7QjHA}XZo@D02y8(11TBg`h+iJ1L2qMxM41ZA5jL7w8-5)>m$uZz)25*B zz}|VhJH|zO!&DDJd!8Q0Se=Qv>P_r^$xj;>_JkcFR|@1!$$>pnW6^f-gFdOSRca33 ziF27I$Fj-Zv1~H#OH78XCj)*j_Dyu9)FeAF-g4RrpdAm|ai9%e?uoK)EvVaZsM`dVe0QRl^PvC3yNcO1{5l7O zO|)^-^myC@Nn+FM6WMgwX1Y7HQ_A=LzXYCH<5*T>6!69a54c!UO}|@%H!~J{BlwNi zOKVDxW$7q4@o*BGh%yuJ=BsqajKU~Z=(V7Z$1&?p6KlaQ`6$VT{vPSNG`dG6{mS1B zTVVoeqq-d%&Bnra81fkGtILD0=;Gv0-Z5+lWs`jHDY{p~eJq;CGm&;L=Z}3*ghO$X z$50NmwRDVi8u+&8@21`*>UA9;?>AE8;4AQx3iwIhn9zQ-3-;yr;WYEq`e=stWwSl_ zEjcFbVn{PhjmJDOflaLd#2jI2POv^)rtvY@=fw}cH-0G5KW;F-ZwBsBCVHdM*31m= z+-6Sv?&7qWA&y=gc+7xjNeI0-2W?>ipy-!^V=;U)_Lssh zR|1k>rp?59DlZj#v*Q{1f9#%NDu{lu6ZSHZe0do6<04tym(XLP?q+1fvkcUS0{c%v zy-hqkM)cto2@LP%X3O@Q*vnELZCvz~@$i4N1L%`lF8j){QEY6Ec?jNM6>V}Xv;M}! z3h~ST9qfTKUX##f%l#X2)QUIF`-8 zSTGefA6FjB=RYnazyheAYG>cDU?QiDSUF8v=i=wlYE?(9kgzLQj|-2 zN|d_>km!j0v$_q~gA%H;P}+n6L2v2rgdEj?@MN}A#vhjPlYn&1eOB^S zVwr+5X9~uggz~XNd?xCOs3Ch^*e&eVC;9P$qx^Xs^yvD_prkuD4*RInu(zAcCJrV0 z#}6jMqF)gF z2Lb6CJ|*Ou^P;GSWq@>515z!fWsGGLwD&DtV*XEUcMtOU8H0*RY!~ougm($jK@8%2`s%L3H!{6G~XB+=Z_zZ?Gt+z zm9l&y%4_vJ_=YThTF~nPq|ifvgg1lkL-M{Qj*a)mvILrkqWupd{{{Ts{RQ-(ehGaL z#jF2pV)he4H)C8|Che*?+@Dimvx%X7DYBUZ;{xspSt0Lzkb`7UAam|pLcU`GDQ^b8 zDMWgfhxDX5B-L?@)YU3&wFK}-XM|n}XGQrm(#so%eNfy-M;{^kWj~eoe@ky8@`>-E z97%8(kgj)ST--m8MV}_l<8WJ`zp=f8c`tsA&k0*-<0hQ{O*PG8NsdJ8j4APK%DdCp zq|;Zh2`8sB%VCAZd1tVw`k82anET*&ucV=kn~KMK3Wdsot<{7@Xd3yoo#a>lvTKG-GeoX|gQjv4pfZlHT_{2o2| zIdxDo(#K&h82iADGgp6`90 zx1XH+Po1hdRekEx-PJv(`pNy&(Kr=k6(Q&qlJ07JbL0hyAdpN3N zj66>?woPs0H2ZE_b?!c%5q6)+{Z?o0aan&j8KpWx$8>&sIO?1>>YV#|10nA`#HMG* zscovZtFsvZ-WptR)eIy|t0S&v9ZhjE?B>ulDi%Ycr?GeZK;8GXD9Nr>dQ07+2rj>y*k{=6^Urk_#GuEo>q7$e>LYq@ZPqJoQL1$|==xmeH1!4&O}%ZBbiIDy z8kMSbG@?UnDf8yjI^RK1jvgZ&M!!o0C5`c&%lFK~L(uoIPS-&`2Id$98u_GO(kBzr z;y9nqE!N8Ae3Y(>I=8tllFv}Wzt6ND_h?gZCMe_KW*vS~hkvLI>+gtG>0`|L+y%<9 z0~9@PYWWZ4uOWZxSTp~Ppd6b($^U|ucQLMWJyiEcn#Z$som)kIIj1;%j(xzj$)=wA z@uuFWiKf0{ml-y{2k6B203G=rpgrFM#GLXyfVziw%yC9_)MGl|ffQ5knKMj1K2VMu zb=X+T3deAN&ofP$`JB>aP)9W=x1H*r7O&17)l{~Fd?+%rvmLuZ-#rh;-# zU50fu);NaHUkA`%<8L*dPmFs`Y2Q`kl_P$(nLYr@(d%L>zqv+Y-V=3YPGnAQbx%~j zmi3-FX8Prz9B=4wud%$F&h5k)9H4rz?sM9A@z`mXk&Et@YU(*T*VNN{o~}n?N0rF@ zH=MpYl)gHUIk4BQJsP_;bTOVG>h*c_o^PhV0+eH^4jc1&ZX5c#HFmqGxJQh!YvlKE z4su+C&Bbo_fpYBE;V$xwMfvAp$G+I0o4Mv=tQZ*Mo^z06&jK@l{6aH-FJ}kUFE^I= z_3^3;&!Xnru;%X&;wd*ozS~GI<^JXpGv8WJ!e+f=Y-5z`uQ}%!^^WLr+blBEZv*9c zRfm5pf2z)3@hda`7oZ#i7n|WIu4BBfb9NchSq(Al*MqsDJ97p15~ls?xN|Y;lFPYd z{o2g;-KD0U9+#QnAG`jg>-KwJr%$@v%9;CD?chhprbmqE`@ttb0-}y1>eAaSgjXIt9>!Ajt-sn0-rdp)2 z>rY1ix?EGgU1wv!p}JntQfEsU?PpJ8t50g4LxmuFeOk)+R>7XemN%imp(aE2ye(x) zwFuktrWR5zWY61DM!B{$-?v^8hfU;oDQLFKSD;*Hp;Th;0 zo_+XTg7F-6h;Md>hRl4ofRe8klztg?Cf5pC4@&9~Jnh;TdF6-?o9UB5Ins65T)*nf z`jxqE6d%&-S7)_XXYmYdtZ#?(YHP4Vy73IeGl26dhl;t{p^m+4_Uo2m%4n6wc)xVN z@ilkT8f!nk`R}!_J>#Uis=LOa9=O(_QX2F)X%S{zkWQVFPRePS*LZg`&yn7*Pps;5 zfcMipYq!Uq^3JF1dWU)vs?lY%49h*J`99TpU)(ZY#wPLTvdg3TbyWRWukOcMLqFCU z`X!yycy>dtx}LQ?%=aY+xQ`5T|I7H%?@Jn^&~q06zrJZA=Ro^`CSgR4YJou!h4`=w7%A1dmi(ff9|K4hmCdL6=$gxd@pt# zYapfdjBnl_=9;!@&Z>55QfVyT5c1s%&(VgRcFFp_L$zZ)tk;-!+<#2w z*>r+x(_pUm%Ua8#d!yBB3{DxBPVOP)m;lOnP0?XNi5H|tsWec&t4i1LQ4i|*0F>iW zQ1bgKX%Fy9P|`mN%J)F^C3%=jKvX8v9Yy!Rb7jPK!pq1vXh&cd}S1x39Ewc)+M z6hrAaC_cciMRcBrpNT=#+5 zhW4=Wo#^hD`M!Fu8k+SI_IZ(Yjpti@Q|!b({Orm&|Dd+RHK3f|Dt-Q6J!aP5v%@_9 z#HY+~&u2_KTnkFfuop~u%uA+xASm)2Q0!}dx7s^ucvi2dp`+rXdPjAQYI;(`9>(|5 z)aQDJy`+!m@;B-FYzIZ(ZctjX4ix+E2gUw}Ksk=RY?fF5idm1uznS$|^{T_j7xS9g zF28x*Y`0yYwA(ZL%l6NM%yQqF)zD3Pq{8DD8Tg|m{xhHPW_tYdc+F05-?O?RxYaGKLNs2`D zrC$QS`p7)ruR%G=KQY7IiI;rF_f+%RrdoP?4Q!`+hRuM3!X(Hb^)Hjgz zeL#z_PE+=+L%j)EY3yP9@ABk#uh;*}YzMJ@m-sd+K7)4^#;?OzbE9oV{@A}A>O*KW zZ6wDT@w@}(eS17}xqi+JT4}qVaHtC*A87@<$n{y?^YX5$&phKDZ&TWvh~Ey~Z|RMp z-}P@UcO)ZVC-e&Of>A648o$M8%UB3GoM#xnoz%bSo7ch6D`#_)gYQS9qt!w32|f?i z+rq~CW%^B5-eY&|%Bny2J5gsusWUR@Q?bpz>x=3kbLbPyq1%l)G)lhrV4blO-}{MO zjQTJ@jO$wX41Tj1%i4H5#s|M&OzP1<67!nU73|47;@qz4%+fAgTLw#%`jq#`a+H5$ zq7l&l%URv3 z9I0mzJ+130`QkWlIg(qQgDK^=L-??f!>+&9G>w(SK+C6*J)M)^9kxEK@?t+}X)`OG z_(GAxuBUN`6>FlLPeZHoS@T7+zSYNBgBs0T_GzoxAy_1L2EZjBRk zeHUpAXcz>ZWO=GOay(i7~xlZGy8b@h#Xnci!Eyp7o*JwPV z>(#ZbS?(B((>2c1c)7+rjinlI*7#?QRT{Tzd|qRMw!=bwUPfj3ca^S3mc}%VlQmwT z@d@2-do}LY__4;XHMZC3duz1H$z#0AQL6EJjf*u-);L6CSB-aTtkF0?_x}qtTKcc! z`Xk598ZCJ(&xUer(U{6Omm{e0el5RMqY=r!@jBiLm-5cje!QsDAJFL1@v}58)R?QW zzsA)XH*0)C;~tH58ndU8_G9n*FGLt6fh z6|eC{jkOxrYh0@_OXH;)JsJmS?5(l0#u$x9bouqVzI%1ps&`1&H@Q{&dbE6%#(5g8 ze3!Qh$LM+K=)d&*squM@n>DV{Sf(*s<2;SO&={xj_;*^L#^*I|)|jBnP0~1B;~E{m zQe(Ep#X5eT4kv5usPW*pW_fu!-8LP*TjOu7c#R7+j@LLq9v!|wW4uO(#zWtj>0Z&eUgOOg!x}Hwn4+=2#yE|~zBcu|qj8VMYxH^V(dUt_ z%iX5q|E%%18bcZvYfRGETVq>|hriPH(703MpEdqoV_4%NjVT)YYwW7=h_=`J8tXJZ zp|Miqof=na3~IFc@$cEOnmuccU0O}gN%L&`y762{{NzAEmMsDPYuL0pKWj>1aGALY z8DBSB+gNLXvh9SeMQybvGia8j)a6Lo87dqqTxJxZ)bu7@reGOiYG}{aqKc)&PY?P1 zmd1O{^es1%Gb5Wf;ZskMYC!?JVCrneq9w1mD0j9m81!eE(s{mu9Dk82Gc}r9&7mW6 zQdU-o>v72vYd0*b?1-7NW!bh;tBIdcP~yuIuRi|M6oqfik|oHs!pjJ0>r8Ln3Y66= zG_C%dj?bGmNvOIi_jjWHIIj zYmhyRXj^}}vb05~wA`y+`AMt$md!*Cq}F(C(ZHOW zw>d|1Cs!9CH7(t$aa3oCF|A}pscB8APuV?Kon@w%8PlB9)WoLLr)!2QiEY|R)Ut)K zHLTsHm83ew{@Nz?z1YRBo6WRMz0|0N?1iq*k(%UZO%3^$&XBR}&*H;;d-*MA4y=_& ztl5xFU4ivGLkok)W^z`guz9AjPw2(?&NB*?G6$NeE!$1@g+c*;h`VXzoRN`kesKod z-W3*yj6Hk{i^D3NSlzCcP1FR@WNZ%FY)5n6Dj=rGhGun7vm2{Y+09z@#*a$PZ_;{+ zY^bzQlcSLYPp({S6JlN zMRb=Erxb@mWJt@Udt}X^S0~0`#n7@;e<-^!lrI~Wa;>_unC&B%Dd#D1#tyu6%Hz>< zio?O;@Ko7Kc|PYS88k0v$G19iYRQ< ztT|V-nv``#aZ%VzAyu1J$}hOpRDZT9l35u|3+7LoXPwC5wbG#EOjOiGVb9d53%s*4*q=J=4|_aj*+~nXT1}E?lG;cq(*uP;e}NhM zE77{Z8z{^u&h~gxr%j%kV$|klhuga-&tH%e&h`33p~8@re37J)`aW#dH`5pXkqXS8 z`0gdZ}BbhSk(-iyiK(QeZp&1hNk zB3eGRAW-nLttkqm=2)S3i;~Ue{PAkj{U$B^>2^=_7-#t%W1!$iy5Q-JJy{!j@>Hqz z$Y#TEdatoI-yD$6MjX9h0y%*7x zYDyb-X%XW~Z2YrEm#IQb>%EBHI9>E`?f8%NOg8nbuv>)rT&9X2?pu~UZi;cs<*(^H zeo|%`PxOq1Ot?*_qW&x+)4wF=KUPII&kiz_?W5mg&VX~ugU zZeQL~-?AcaK_QpX5|6%Qcxh6 z3gi^{^3)&Oc#RtgZ;(3)Z+6o?L@~1ncN>LEy;Fk0R#yI z7~%~SP+YiJsT)i=6 z>fBcp2Jnu3qnWa}z&MZf4tFO0w?oZ+>1RywYWDnvQ&JO0InDd3&dTeZQd(-kzmUCYDfdcL()EIVmE)My*E6G!n6{C(n zSrMZ=Z&A>n8OY|koon3GBIh?^RKsZhX;z`UBUkzUe6DD$uS}V@;Nr9yv!^xHe7VYI zZpc*K%#s6z-b@xAsA@^k(jfPl;cPGW#5|pPeVHK|yfDL?U0jfd^01_R>%Q!?8ZpT7xb{Ei=ks8esP_%u>P)kGfYCv(84rYL&`HD6uOPZ!Dg9gFaafqp29$fr9KpAmk4hhv;dfLYcmw@}fVRR6$aax=7Bz z7~G5}z4dCTXuzmbIPAYpRg;VLdOWz@H^k zfnWN870x$qYNf|WA1d%K%~idW7VR^minrWFiYM3;Ov+h!(}dMT`z#IG5XFd@-Ar)^)Yc!Cp~Dki?2H z)!1g<>sJeuSu&%}VA9O;vh%sC7f*9uZ_rD>59QD++BAo-P7i8r=9y?aQ$5`oZ7V0B z&(Sb^hL5S!Xa&RGW>O=iUu+=fN!xghqG>%T*%*jYlX&Tepf4C0o?kS4X`o;@797sh z5-1#=mpD9exH%2AikIP0w2TL@e0!qST0+dC^gDZ!R(e`Tx7JV`4&(*G%YHxuotGz8 zdxF+Fa{a#G55-G+|4_VX$rdSYjnVrn%%6A3EG7shL?*02iC^8;Du%V{kgPZr=f+%4lIWRJ7&m1*%tJozlNYmhXS+~95ct*||fQJyW0 z6}IYNgXM^9*4Pv21w5e24HWl5*@YgD zI$eB*FN<47E~6gfvC~lYz3DF!dqoJkw-|6aM=?i3Y zbMd1q5vH!w>0v?0_9*`S>D&XnGYX3`JSnYDmMo{!Yt_%J-A_B?)9I0Rk;iF!v8PLC zmG_g@RPiLHT&d}F=Mh5ok>3bxG|$fWhXoRBQrCy6GtV{ zL9<8uvA~aap`Wenl(RbBIP}VR(o_EF;uq%oCUaHb9becdJ9v2~7iVQH<08WGlhXgv zU*Hb~G9}0Nr011h$d@CF3O-pNn3R`O$O=wwzIVFJd;dW`Tai-DpIfeZa^Ih?r9EZ^ z3S<${^ApoIDJT8^)Fa3bOP+&_{pY}e{JuhX`JF{JBt9q0!PlWJBJYYEs)Y}NZ$Of! z0Zi^@@*eO$%}2n8y7N6gX=*@j`VBpTzl0>OeP1-$FYOm+@jJ5WXamQ>=fZoy9zFT~ z8a@I18WP{j@*|ji&|3Hg@R?pFUk6sOzu6Y#_Px$NK=yB_5xHQGb66eXw^<3G<6N_D z_WjI)$ZqY+?`%jTSOaRPYrlL^9lX%+YjYRda$rR`&3DKaNBuG zt%k1!UxVu58^9q0_#FzoVCR9<6JGFKDE8rK0|&y#!wUwrT+lm+pQ9kR?+qqfAvk{w;}||2e0(D33ttP~=%Ol*q6f70(X#KCCHrQz zck|5|X#~4NdGLbEp=;pfE;?y4wuSeAH&3IU@a5nw7x8;e;lX!j(JouCIoN)VQY+!( zz^|aS@O=KL4$o!30r*C+cs_j|z6?|uw8d7^fWrfvKfDuM%pOOL@abS*R!f50=vUxd z*O2}($^xzZf&~A09d<-67`vRWFyRH4K#Sl7KZo+*8^L>Tpq}s%u-oskA$$VZ;V+E6 z?bILiZ({ty2f-^JMepO(8O+*Aec*%OTQ74CPf#E5vo{z=@Qq+%1Ga+qfTf?(&z~eO zc-vvpz}JFbe2Goq)lq(%d`zjm@QvW+f78!)(8s_FWwqcb^nm@M@$(7q1aF3n`2;+y z`9?6kJ3dQEBiLmCzTkF7s{}CpeDpkx9NYm(-db?LFnpT9JHd^R$nE{TW zw?Zr7BjDK=nCTP1YcyX5?$>++IEB3xM1Lx{Nl5gA*8U5EOP~lk1(!ox;O+Y@$UY1g zLc5U*&VlOTQ^6M@(OC!1O)~j(@GZ?ZfM<@xFDvQodoReI49lU8wfNHn?}ZZJBVg(% z&PT>9cr_&Bu?&1?bd++ErUC3f#*{n3z3iWmg1in)9gm-Pc)^X3jQ1K)_5(KJW?fcAx#?%Rh); zeJ66k8=$H1f~%o4c){niybiSd^$T{HNuMK)U_5j?yx_S|4ZPjgzWC>_n2pVm3syq= z;RQEC$KdTg^@m&>rMAw&w)nvp+yM=M7u*GT;O)Nk#pix_F71k3@G2-5-tIqN{OTW_ z$NmM#1;2wT;S=UZsU%4Hj|Y4=4Ik>r8^9S0r~^LYQ^60RA@Fev@zD*X!k2*q@k<|s zcY@DCYvAj^Vfdl16*+i0R0kgcUxH*Ts9!~?9*~q90bhbdZudnmKI-p>j+0l=acPu_ z!&kfA@4Wbs>2BpIb z#$;eyc)PE7@frUqREAtoe81lgZ}$N&zTxBj^c&=YCE3^?z6^XZ58s#Yb>P54`UJca z>=$I7!Ec5WJb0zaH-h01eGhpVIK7Cz2cHT)9yasV7V~@R60_~%z;7Y3r&=1Net$LR zMf!5kbq#$I-tN;~e7%1S-Hu$a)3x+T_yn-~a{8h0U@5c@z6`9_d;>V~dd3m*2q?bT zkHg!2vWvg=h4^A0hR<}tx1p)Mm_oHt2yDt9MOQ9Ol z2tEMqh8KJT+6OQA7IX-{0bH?~HinOY&hjXg@FMdMX!%eVd>KkXE;#XzQR-57!M|u; zFzQxQ9tV!SlRA^e?k`>ZruSKcUgUy*g0{m89)$M53r@Yul&6Br@1t)ZF9XXrP)~Tl z!YalcKEv&P$?ZPN?f%TguepCCV~zBJ%b_xO!Q)UlykMtI)D7P5vs`?chaSR)$OTtJ z_3(DT<>J5mM(8MVLGeKzyO-+-X!#}=oL!=SWzGXN;OJ+y9Gtuxp7g1pdJcMpF$m6k9vi}^gR}pJe)v>y{;Tv8k%RsB zab5Tubp|aT)Pl3#zz)a-ebA@yLGU;KMW2AT`+621&>y@-o$I0vT)&_Cz}J8W-p4-h zjo{t`QK}K%?mOA;S6Tcmk2**{d^Os@H25-j!Ca^uJ_vq%h<*s)2oCuKo4`9k@k^ZW z8e6%)IY$_)@OHnz;xD-IU(`p^fSr$HD|ox#U-1Y0*$J-2$OXTF4#C@f`-;!s zoGAR(?29&VAN&yb2GH{7D|j1zM^car-VWu$N5IN<_#lC|``s0Pyq+^0YAUICQ>2Uhtf54z&tC0gUv(AIoO+gE#afFYP_^(8u=8;Az{i2{BOJy!tp%qgIaJ)6v=VuY8Q3?~fujNJ0OriYj_^V7p84p9kAS@vpkLB}7cIp9&s)?D+;a)( z;p@O7_>3%rSHE(oC5zDy9|Ys^>8SRT7rgK?^1^#S|K*$ud=Nb9qbzup;V^s`?t!=a zFtqzU6hB-getiGF%`Z#9>!4Zif_Fog!V9j0^56wGX?YD8pMxEcCxE|(_Q2cy6<&`l zE!UwwMJ|{N#k_+*3~(tF4_^j82@Qd-1>X;F4&2+={SS(-!p|TNX#}H|(7y13aZnz- z;7t4uN?yDFLGe{MJs^+)=Eqyg)1qg?uc-M^mQ_n!FV z`@`*=Kj{T)p&jsoL+@bTgSY$Av-{Z-|9d^}Wd0$I;IE;M??)Rbe)amm+x_c_pS=}- zawrdS!SQRTGrR{}fzP}wc)Nc*yYD>lsW;^w{0EXo@Upe|WrP>}7TN;Oht8_opB-v9 zd;<8T=GA@FC1Tnt0qk92j*A3v&^r1#>7C$hP|OF+OW-~0ITzu<(kh4Q4qpat-)PR~ zwV?Q!+m77sZ%+Ks9eI?tLoPV~F~;?QXq66bc!_ZhUjwdqnK4+y`GPawMGx{+@SOM2 z`yurQEgxutxBZiPA{TrYIs`A6a>${M!wW8fL>>f>LLCoclaH7yK0z0JBY4ke4z&h8 z0`@*^jw8FjG4Vro5ZX-|!IMzE==t2Cu7+gXm4TKoGQnBUA<_tDL&xER;Kz`pxBDRz z|76>sj{ju5fPaG$;O&0M#6Q`jFPRgN3vPm@!rOh4iEpyBuOQ@tJE1&yyALw)O_ul# zc1A9^7Al9g`ydnFWL@w*7C|m}4YUQ`?t@HxlYQ|WHbE|U>A$cEyx_CYA^6?kamYB| z2W1S5g>oC(KgDH^61$~;&0vn>E)fV&% z9)TpyQShNQl!LqmJOD|#f?L~4zl&De!J+M_JG}V&S^`NL@yYd)=2a|v1VD0+G!@K( z2JzeQU4!#%-=ci;Es*ET~_6tFMS_PuP~Zr3lpUuG>iao9Ed(ENGX_wIJNvtNAa^$Wjo#m^|;KIzuot_#2IzHR@} z6R!EkYR~v$#ctOh_WbUzx1>e8kKE?Cx+JvQb@r?)6Ske-&i(PjX*>5_w%fHb>vy|n zoNVvTS~kLQ=JegJXG;I{;G?f}cKh46{bI=ZyIr^4^g;FJSzX*iUf5bQsMBuOeII4q zw`fE+_tkg2|6a#WpLLycA~>qknLXT>CwlH{c;Q)B@$Dyyum9vMch`sZT=QV$Sy$<# z3qC2>9q)c%pP>=l=eRKJL68Ti1UU|E%lx z-}O%(I`&-mIpwPd?fP(+>%q*|=k`6>*S+<+!w+n#-sO7u+PemKdoRH~wC};vttGo$ z*Nl5`?(wJlxj%|`FZpo%E|>eRU%fD7ZGU&kwqIw3CFhqJSu9OPbhL$7l#ySvu)OrHZcE?GI){m7=_>hYPiuEFO5HdaUB3L&$=8z4cVBn(xo6g0_>?Q-kx=@tKR(}mL!^BEj^FHXmHhLr+B^O<%za7z zv#ax;ebO~&dnAzK8SeH+w}1Zc>Pgohz8pM%+kxTk-;L@Tb=LSNTxaZjZ%|<61@1=+ zKKWoq;p47eFD;vz(|3gX>NjR>`D0|e>(ENyb8EJaaDTC5*St=zJm%W;g4h95Rf3wZ?<{9s`y*J==U;5g^S^dx5=IZspUk|t*a=LTIJrz3P+3LFcjh=ZIA9lKb zJ9Edf+tRnVzTH-E`!mB5-Sry}c8|ICQCE+Y>v!Coo9Is3wEWjacRu18b=PehH&rCM zKRYA;t(b=%cKz4B{kL|lPju&cpS*2z9{$Hh*ClVpUSEGllDk9r)vD^xH@I#ZH0R&W zok{M62hQturK)sI>)G><>pnlB|EueOOGmo*{_?tyuAa8mwegXs zZg?{^(tShy_L5Cg*0{cOJUaRhza8m*{&!;r_I2Ivy7TR?<0t%Sr2FH?9NR;q%Uz}K z-M{vq4~%qASoYjMR;R6U_1V&O-5;t)y4Rn)d2iA9GS}|pu{RHTY@|DC=hI`hTo`nH zxbV&MAA547+Zy-pOxyX_2~Ul5k6Y%-cY3FKQoQ#zKA0Aq*gCwbqbqdw-haBkdi9dQ z$A|WqNRk5w#w>E*{oSFB-A+6(0Z~FI^oeWjde?U!ubeaC*op}o5<(fSh5||PmkS5? znJ{NV!uu1)2FK36_UPyf`7`tk-PURU?D^jD(@%~s-gVz=6V}y_A;rY;sowF|7Vq*- zc=@7-^BW4P#?5|xoS|n~()rJA`%Q;&Z{I#{!!0|nxNz0rN9X08y(hW$oN?|$c_ z6f69PiRlmRJp0$YMKzB_J?_5oyzsN0gctf}jA-28K`Ng{pVZOniznthZl`rszu0M~ zqTj_%uv4!#BgWWCCG;O2G*C1DGvOxkLhNCuU)Ol|Jz#%CO-$i;14 z3W?LxV-&AxPB|vHOY`#*O8BW9pMy;toOpqAZ~_i83bXhT(ZsLkqHe zdHlL;;^1ZeqQP!gr}h)@FX7M6$Xk|xf`X!ngZVMu_@c~Qe2x?i&ktmV3X2M}!^8Pu z)p%b~{skq8gA@3Xb|9OtJaIJCTpFqp5+;O0_@9`;XQWzN{~ysdvcHj!oJIc3Vm@J8 zro%{#-{=#_ZkE5qpO=s)|0fRi72&X}aEU)OIH5Q& zDN}yxGjVXXFR#cyIAKJSo(Utqzmy3hTGVO6h^CVx`-Bme-3Xl08mAFBx%WpLTNziG zP&uU1S(#kvsZ6O%t<0?qR_>{+tE{hVs615JSb3~cZHV0vw_(Tz=Z54B+c(s1*t4N- zL;Z$^4Tm-~ZaB6#G~8 z4^=l-AFJkPQ~VN?BN7{li^N9~B10n1NOHszNr|LJ7Ddt{xshO`G*T8>6)BIbi9{lm zk($W%NNr?Kq%Kk)X^0$(G)9g^R7Gq>Tt$3ELdB2@XGL;_ry`{ywPI04dPQzUu%fi0 ztYTF~dBvKFNJV8uO~v+#+KN3Dbrtm$4Hbtf8Y?KjoHZt(a&ztGJ)2?J+c;UO^_SYe z9QsSb1HlKjKhW?%+`2{U%GT{!7rWlMK7D=NgN>Ejv2rXH4Pv=NRq`;|N4e%C&83m3)r{rrA^ zynL9P+dliU_PVdN_lbPB!{v6lT>1QL+vQruQU3YW-!J}CnCEhhKJLlUuD^|Z{`h5i z;pdN^THi3o*Esv;nX|9H(Rc0DH{En|)OXEwzS*&xd<{4Gf)`Hl-FWki>rO8!I;1pJ z_3;Y}PP*=`S+Vr*hMy0{ZsGcDvp!mzy7-&e=)MX%<#Z0zEg96x^Qd(rDT zUia4bV%NMmE9N$@QP17xy|EeGcbg4SIbPgwZN0Q{pb^!CT&@{+j&R+6;-4>1m+5zX z-F4`&(VFY~!(6U^I(PfO$GJzH`x(A-&aL6j3jP*F6FsP-cSGQ80P97 zk-pgXczX1@rYN`1YD$e$#-18Sy0p*bsylu5jH{zpyIl1@<1Tf&9^`lj$DDtDx_7!$ z%(d(=9#jC6Q5=sFjLZJ>yIh^8H#!B?7#XXpgy$#iQ_g?Y#lgVTfXme@0B_`ZA;<6R zTkiDPb7o)5BLzEP=#s8Y*k8HpZn&9(0!tS#Rps=3%AL>C|6l&olSMmY!(3Z$s2%2d zj(=QVHka!wve!?@bvS!{j~aaA~2Q zm|E%yCxWFVVJ>}BF1b{XZ>!MFoq94_TKj}dh3!4Hl^V@W)|0kvhbq_TiD;=WY<*|9AIa|Ka+}`gC)%ZZ6l8QwkY#;w~ym z94cKrL{D76EuH?w7fAIh=%O#TgSvSU1qz;$ciJ}UT4;H0?*%?rlokp&e3UvvN&%Hb zp80P*-bJk^QtOTtdC{ULg+6q1z4h~prOXwjfUjE`yK1lck_8P?OLstV7d;V>=1*$p zRw7R~yM~sQ`7e0lPUj&F>h_6$ZE+Ao%^uI=+p6^LV5x5pVAD;bP;XlkJz6*O>>5vp z-qsn_!lvsxBn`hjJ^8>=DU~$qq^4_Y@@e|d3Y4u!)u_tj>cEp= za-p0C6Rk&uIa+5IsTb=xx4qZEb#(Jt1!($v8<(lR$|2nz^EFbsdX8>R@#y6*=nHlU zznxtYHUooteEG>nd5@ly)4}8B9c`FB*7+9=b5(Zg3AdgY55|u7I8$kRpU8zsy8eqw z1SjIVJhM-emh9zU*ue49z#BC(FXNqg=^kIA0glyfL259@_Fk--2?~KIqH$;>1A6d8 z7n-Qi&0V_r5%+6(A9xmAmYTl>T>@a+`{U+et`2#wW+i#02Wkp2G9jdOz)v3Q=5YO5 zU8bO1y`6t%>Q3^<0{O($Gd~l|ECbNCw_d%GEU0S}L~*Ykf4gh-_*-NWZ11@|)4Ss> z(ii^eX2Gs<#*`QzWvKPet(PfjeIj$%))@nCPSfsnbSjXn8D@LmyIpl@wcxd`n=(kD zbU~vW`VbmiYyAQEfoi%rXnW7*z1Gg?;n}8yE)-D+h9qvQcPK^ffGQmnPt9Nai591C zo1@HMp70#ot|^HR+R?GPY05*}+cAHbtF80aLzl@zXPN+`nyw7SN&iYNaoE}Bi1aHV zFN1no>yw;R?ttuLBO`5{!Q_Pes5@w`w!Jnu+t#VIJs>3$x67EH)y==livTyz-#+IS z+xzBiK(@_}P7Ws(N<>Tj;Uu#vNKmUMrZYxbVw` zkCPgHwaifMsi_%)T_sejx+$GV%u(=Cz`t^ZfUZW3x8lkyLCbhO(r}4lT88N_G|Ya3 z3Wa5*p$ZENmR3aPTYoylB{MWvH}96wNw+JQZ;%F5s0*7DOZ4Q#M%|oDofk%pajsY? zMcbvQd4C57BT)$PniCmFA%~iTM>j8w%7`k2R2f0{cu@&}<0KwkQCcPV<4+?2rwpOn zt!6M&658fPRahZQW;Pvsqc1m<0!9Ro8f4hKs0v=u7%;aoRvDqq&Ywjr1t6wwB zHM$n=3!AH~vg@USiM3((fAsR#PAOwhe*M~2dS0HqfRI-Zkm|;S%@sh~kD@&*EO1e1&X%DSAy&FJzk{T1CA=w(K6usy$Ubr9RNuPCHFhR~0kY+Z6FZziWvsQEl8HSOdu zuY{#|mPRcmZufg%e#^Z+kyPWp3+ix+OlCr``RD zbV~}4qYA3k+Qe2$Hzh4h)57H&!e;2~NcHlFcK!zGQ~G)23}n$>E)Ys_a-6F<8=F3?deg6GQU!d?L9}xg$Ki>T;ChoW79`${XSiZ z54yFs8#s|MXKU1+F4=lHcda)k4s!`-icHiipR=M|g_94KDjsiMUSgQH`3&=|=|hMa z{7&mzg0;whg}7ifvg}%q?tVe0eWdnSo+FiY)2-zG?FgBgDsvDliP3Q`GtbSJDPh<6 z#IVTR&$7oM`i5##v@O*t9YD5@E1Yvie3u<9)E*1i)yrdW>hU!;6NIR>3oAS0wygyR zrS=?EF@CIAmYI;Tr3AfbEU*qn!B{5Mqelsd6#)I( z)4(f^_4)+(WLI}-q4j3h*1z$xj$)+y+iw>INYU(+sN8B&ge&?}KrU+)`HB+dm&+PO z#I`puTM_T~Al^Gp1dC&J+S6eXf~r;9TfA3`eIb!Rx>I9$fAqMpqcG~UQyVr{D*Zvt+(F! z$hIB3)+5NmofYF{^g=X4?AomUuhcH9iQMeD0WbZ#G=A%EI)5+Afv$tG%r>$D z1QZb=xK}F}S7m!g0vjl2blk$8k6Xi{=Y6BI@7sE(k%@Tm$9ND7}m22J(q z!PwfC+ir1b_jdLTTZ%zX^@2Mmp6@`fiHWGorEvv6QQFj?Q1{X*`sUd1O4}UhJHZ+W zNK-aXrQOEM9eymZF;mJ|ViLo3EQujT*NW%- z0!zQ(UzMYmTp?r_Ed{(_6Be}7AFWN;!;>ch+Flg+nHmsOL z*es~5*t)f}*+JK=lSLsi2URcsdZV&IhfC4E(T>eY*;1!$n0ZC7J<4&I?AKr7XzR7Y zQ|g#K|5Ync?IDERJ6Qn*G*Li`ehYK4y<2EI`$(Oo^!pn!rVAb$(faZXb1PJiE5y?A zd_YXqFKBljF2tv}5jZekuil{D6;ky0g21yvf8l){qOb0mTL4z{9jYhKIz+DqJhy5=s>+Sl(#bMuU&9JO$1Dt&FSp*mOzvm`%>P1f{Zg0C zH^t*}GxZzE{A0t3v-QNpLQcOGc8muZTw)?rudJOL(sq*VeU5p14)bO1$>=|o8>Ls~ znn-zOTWP^ecomCVs}Q>V9MccOX4>M^%oX=y5TMaS6Q zPo$(20iu2>0>QizLquIcb(~dWZAx=!sY|3Nx<>Td&Qn-jBZzIidag7ks*_!###gT= zj;WSDIo({WIwP)4zj$BeZkcsYpJ;mC)861+bz?_ZkWui@H|>(ZRApS64IW*os0zKz z6)50u=;au|AJLCIZQ-#Xn_!Y2HgCZzk^A3Pn0PAASTJ$9gNYQGu-5voZ_`B>@2z^W3B~liUgl`9 zZ;admW<6GsJU0gcsvS<;;lX0n6IYhNJdF4r&+H3y^H^MG*0q-kE?trrY%e`yb?}Tl zM0Cod2-ioQM!QC5PL5Za^*ky@TYc900MPc8*xnekimmpNHI{0!RV2QnSSbA$yaWnB zK;{N%gYkXEmD5*Pw?RHRG@jOnsXgtF-ich%_wh%6Lf{Hff0{@Q$5BF+Ox{p`uJVzh zBIh|mW*_&h_X{6}dIN9~lql{Tv5KT7L32DMXKOKY5yAC~%^lY2`mI6>c-KOCA9Q@zdS z5jEva2&EDbkIUiFGM2=x9{maMLjJYriOD5;VuDXk;CD$(hOga$LUV_oAG*3IA+~pi z^1y7ez217d{eV8OTW{!bt^HZG-mqL9)UD)LszwvxR~A5Io)&Op?#iE1`QxLm8mK~i zZ-zM)_K#_DD7Hwv)5mG-Ke(=^YL|9?qRkgghFbI|5tLwIvW0`~?WGqTqUIp7L%rd| zb(Wrp^~()99vXBsr$mSs+T;c96*@{$BIJ>isodGXYn!OL)NyJDk{9JwuCcu#alz2P zS{}DxfNr2@JI7v*G?}h%QM$gxw7f(vN-3~ZPS8q1%jCqzNoNDcZ?^vR(1Kal9{H_c zgC*yI1*_B*ZX#cx>w^)&N-I6uy0FPK-Co)lHU~oEpk(dNI3xI2^Tax}vgV~htt_;d zKamdpOluL-v;KPRxS-25KAC@dP%EBR!JmkqKcO0}xVi3QtvJ@02sPoBsL;)q!_{`w zW4WFNDDiHOv{VlR6Ygx8&j_Z_w5rswSIf-lu*%sFSK8Nia8Vkl=FXf$1Wun{3qR_s z+!$C^gKLGE*+4g~0LHkUyid-NKo^a1yR3(s;c#ZMS4z2)mK+k3t8}xi1k428bWT0a zsb6WKw)d17BF#}H5oBT05{_0EDgV&u`aD0<_Cd7CNZwJ{5kJorJ31BdtK|i`J~VZ$ zb8UOS$5f6R!Oa4?V|y#Psd*+2YNv#JA~++U^Gu4sy$^jt(98CEC?a$|aWFB$u)76B zk?PN9->Z58Q?Krij@6E9rL4;ph~H7@ay&9de6@#Xv2CgT-vVrmr~dFi>ffnh*f8V8 z%^t4)x7IpaB$c_mg9#3xbl0GV+#cVW7o9FH;Mu_jT=Wi(bP0}x8eVX&mOB_TwbW;n zzi1#?2HvL@qib*&`ib7P^}MjV%jkN4M8NDaDwkJoGzR+d#P%JJUw|<_7b!>mSY-@Y zde^RDJTSUGJkPM#h20<9-mlYL6$J?}e4%MO+%UKoP1|Aj;G|^!+;H-&RWf=RPnF?D zq*B}YcsM(wSa%zk^eYZy($Dw@UNY>uiwmuXpUDHVPb=ROk(Sq_F==C8zG3SOp}bqq zn}FMQg0DGWF{5!4D%2U+8B)0&u4Ir^#CxJsrEZ-#qnh`vGKC_v zyR7qc_Xa&V=3>3;O}AlorWFa*QpEHR8s(iC7ac8)la@rCU5wLJtc9dV2eJCj;D$KS zL?RjHu5(*sweJZ=7ID5zNx zcSC7AzMv+M9m&?CB%4`a+02%RrBGeOd_zL`bt+=8HjEyJz9+n(SFlSlVDnEF=Mvj% z7*#4%ut1SS!$<0TwGc!gaaO6hy2~0-*=3lcD>n|DX_%7>&490Z!|c)J8v=Ic`DSnd zS93lVx)#NlKaKLIiC(nuYY!xw^5BCvn1QJyDd3c=kRpg4&@9MvLJC^zo%E(V)FF8& zytjzoVtz~cE#uc2PDGY;hnBYRIAUIpA8YMZf}6<7t#uC0BP;v&qO0Aan=2_JvJ0W% zmo}BPUm^BqJ=J49}Cvt z_4RR8%CC&yY3LSW5Ch3-Tf46_5jv|oR8z>E=+SrF5VzfTjE>v+v1__RRl*1RF5{i5 zz9|i>1<*=xHS}cRA4rTU?ba_2z7? z`)VofE*)-DzS_tOmU;|CTVZp70rU$E+-oJyIqu{MCGje_h&bc9@b!naF4%d<5~8e? z&=cfd*D!3quu2@_K-%(WW*1``HFOj19_UF3-#rFl@9wBljt12`6u?H3RZ2^5y}9V&pRux74yylA%f zKBuygQrVeQ*3k!POk-%%_Fs(=4^d5#s>U#%w~nDE8M_GzN9u$}99qE)Em$ceohE&Z zxQw4&#(_dXJXf>|UkDLoeiTj|7C=fRsARKY4u)aSg~5hn1y*5mE!x9^6^@i!A-4hx zIvpuDJODE@M?%+u$`u1-6p%+@>J?@lWPOx&T#Mk3Q1@~w*SmTP%=|(~{DYABZ=>t2 zg0P#mh|B`Ziw%=Z8??)B1soKv+?Zk_U77hx42P4B(1n9c@)+fl3iW}H3^%XAz76qN z%D7NLFyt85ihwzOY@m94Nz9v=n4cK2_hWU93g5?`y_*f9LdUs7Ee`sStiPSmMi(Y3&;v(2<1KJ}!#M|!D-D^dusgB&Q#hXbV zJOCUN1Qv;%oaw0~e$3E;L+k`{kN zDW~({dN@3H;VDb=qYrC^PfzrPO~^nm3|Hg6o=)Ll!?RL-3b*&xR;SH0(2p`57XQn@ z)-O|Ab;JojN6k!x?gpJ6zrdVY;<1kWeV!{^9b2z0+$MNz&O=eVPIU|++bVuNM}X{? z)h9x~k^x9K-aYN^ef8Fd$(yz2{UxKfZu=XJmB-eH6S4Wac@-~4?$+I~EqIY=peUce z3ejYIe4%A~U&kAxWc}yTpD*Y}Io%kl;AX*-qw3-l?K>n3IW-^4shasvEd5H_>)3iJ zGZ3lXrnLqSW-83*jPmDoCAAUUrylfFj8#)%xL;!?-jggitK)ZSB5ZBpvrI(ToXae-9?F{|R+N`z>2+dE4&$jtvB)!=?z5hCP~@Dt07>WP>XMnX_PJkA-g8{;Nc zG<$eZkqdF%UM<25*F@}h+CqD`xSrmI2rHHWYFEG3h1|ceN~L?jCv9)RG_hHg{|zTD zR-@K>mCTO0Dq=pb9p^`weu~@Kyr_ogZ7lwNUi6aquDsYRYVijfMt%Vi6-Urg@E#9f z2!FA`r(^IP6)9h@N~g?(k990P$tZGv999H#79CZXIz`|om>O>A-CHXSyL%zQxkhp< zO%Iu_=6t5B=FgewS_Q=rKbV@D{5?|pfhJ$fyOjpmZz%H)AHL5}IL}#yGuNw9a~&iM zWyDum4*Vk3+q3f(_y-e!LiPuQScJ5;%S92ZF&K6(Y9Gi*>fXypnvao$p`XfQz(^Vc zHen>Kw{9aigT5mo^C0w{lxOdT(kBqJk~hy};G`ZDiAi>HWPQZ^2yMN|I`&)Qh6t^% zeB}v*45yJTutk)oihx!;Jk)AOl9LN5uw4svDQ%3~lRb!?ipow3;~f%4_7Zo+Qy2Oj zMM)wFJtD=hT0)Og=s1qgNC~WoP&e}QSCl$KWLKFKtI-qF??;7#+sa<@YBOi*{wH$7Zk+NlcHK)a2@+%CsSXFat<@W2MA0*ML8h|n`M zJ(JiCi=_vK)9soL| zQV*4W#KI8UJ4wAjJVuoBn@YuiCkz8bklu5AKqEJLtMa=pRo-U-Mz?>7QUMdSg?geM zwY`y-K8i@3=zWE+dXF{f*Vq!3ub9t@|E)&H@kV|^cSc_`OfoI<*XedwBk(G2HASxyoKM%3ngiA2jx=A!5#j9}KA>UuqzJXw9OAG>8H3tQ6uYqyyA~Zz_McMxXU@ zlSFOY9}&XTTJMscilbFHsoTw+tT!Np(dIhwUsiy$0 zh{{fLN{InYh(pM$9p^IFV*ZR9wID`-Lo#P(e<-yO-aDKMUfx3mPd-F5@f85^^;ldv zb;USoBu-I79*B-gQiY%byn`}2WimnkDs!4&5b@Ug@?62Hn>~gW z{1269OV?34$OPhCxuNvjjKL&5sg(=`zpO5jt}6coJ(-FkVHO4Gv=#knp38Y7R1q=X z2`6s!2h7ib|R$Dh%-g>3+a7h zPvGJcNcFqXsahE>FF5&d!UiqE(LDqaIvgHf7Fq1@__EMax#Ooyr`)Nb z%m%qb1i=&Ob>st%?k4oIs=gxyk>NE2gd-K!um37a;B|iKupZx@r?uTIQbyu3#pYa;>J8Yc;c6rzVy=umo7G`23uC zB~GN~bV)~|<$H1mu$}P6*Nm<`Ogw@{Ccgg}rKF|kOObP(l40wtKZr37w#VK{ z-Pk1rhlKf-F>4Q)u!s1Ho}3Q`TnYwckoer43{q2rl=Ks;6?KNU1OsAK&&&{53GsI{ ztQZmb`J})9r1a)rlyBN=@}IZv_>WBfyP3&Xw4EkxrM;=iM_p5s@6dIctg}?j9VQ=0 zW{Fy5c|g?9J(8aMG*N$#$rrYcIEB(Hj4z%1S-Z~FmwEp@@s00~`SV0$;JtA9F5`vm zVfQYhYx{8Y0o~jK?rW`&F=*qdG-jmydqX`7frialHwGP`aVHoOPUaUA1!KlKof+#i zS17+>7f!f=esirpYiE<;-m}*%JT|~AB!2C%5@un~!yrTQ(k9rvvOv~b%L&K;8m4cL zbQJ&@inYy1K|#%uVXl#$d?|-Xj`@kv_2#f#XuKIN-)Ow>e%QT{u-$N2OQdb1)>sldjuVB>DaCfYFAxG7?;OY;}U@sjd3)oH5|Gf0 z=%V7AoQvHfDjHfoddr+D>>hx>Z;UQw3+068l)jf6pj$F1YW9L{8g>n*q7Eb)uM`3Zl_ybQXO%aGxrRY^RkGc9&OxsI4uh-^>d^m$EZ0~jk zg1#l1*AS&nF6-n^y+ky0t(bV6gfwc&U1$ewcmPs_7Qv@)$DN6_^IK{r7v<`H&J zyrHV4$maOy5bi_iCqX zU_r0#rlO*N-4j^Q<3fN-Ayw{>?$DBq&bkymaT&iJejE60lCT7kJ_4G+0e%tcmobRL z+9xMI9K$MWvz6cP?J&->QX8~qoX3UwJ^ezmXPe=oSvo?1tz`kX!K{A zKv6)|wtn?{5f)<62u2u6SLnkRA5J_aD1)GO*k$?i#*=A2Z=eM3M;w1UgGwjfa`b#bnG+h{@Q6L=cm4N7`h3 zSSq*+i-*h~u^J=Q1KPqSH5|ZgX9pQ6$BJK$I}-1pXDF0qWpFamZ+Vy$6g+rNtlxMd zJ>evgM$e6ue{K-c8Q5;PUmnUEarMbOrsW&{1wGfluy*zb0rS| z)%?PD!J5;Ft1BnzzRv93x}>==NoY?F6X&be)NfU*?>%AlR2%OPCj}8mGwU@SQctAr~Qyy@5`;% zxTSq}-SxV52TRV1j*zje;a=q{k+!#^#~SA6fvkt6cN;@@JS;}_tFzy!+@bWayCEeI z&w-bq9~$=I)(Pk?nQcnbE(U9him4L$5gXFRvi~Hp?FyZY9H6JdG#RSdACFXO z#Hu0AuZVf$pg~4-JnPCfq+{X=Sn6?9hYeD>c{#Diw91g9C|pI7a83l-i65(U)f-9& zQ4|V!xw2DCC?z0SGC^V_nBQo=E{CST)1Mqyvz%<1<9X7-Z&7!s-49AZWG%|r!Xa2= zuBL1iS^^a*zm7PZYn2n(&*a30AG$&cKsJYOgG7=de4#ERD~T>4U9hrBk(oGE>a7Wi zH#T#hg67Nkb*xW*2pT3sOyrNr$yk40iWxsiyeq|U1D(8J=6O7#li$5=2kJLz351EA%_s(%43hy#E= zCL)VzFGPEcGPK0H=f~2I_0rW6P1sCxture6c(&?$!D30HOM z<B0hv1|wl;Sl|pd|&E3Mr+g$uDQR{E_K%$qDLGTW_i$ z8dFk|ykwBdO;!h}j*=1FNZa95Hkf&zl;Md>p1de*g4Re`10i3*B0ZNYKV6muqX*Z- z<6Eidko`6B3whE=6V!Va^{TWbY3|&S(uad>;1a3k_i)aA#&E7Y(Q?NiBQPT_6-H;e-^2&a?@2NVLC1jgLi}0cr+urYv%jPFcmKww|3rhS-D|`KVL%U;h2vSda zFStl*pIm4?u%0D<6Y}$9UBKi*+q;udc1V4w($sg2Q{PBQnc8zUkD%o`A;Vv{3&p-{ zHQXic>drJZWbA@oXu;ywk2c{KEa?tyYT?KTgya`%m#*O$S=t?XP{7$8S|m;-CRx#m z(1Y@gG4nzX5^$vDMc5FX;QXlfn8>Sy;7zBPd7sMSF^fKamnW-0;&@~cyW$*FZBNTP z#rv!6VRsc{s-vYSK|* z6fI$3GT8iNBRRaclVFtAcNxZp6njjIIPqzyEYMndg+9H`y3_;{tG7fCPbC75kQ8;n zd%bV0!$Td5%x}H+Q=vBctXdK8ZR{5kk+{M^N;tWM=9j4F>nqxAiVNUpB-!Q8x#~)HK`!*LUEo?+?o=Ir9h^`;E|u^6tKe;UG80 z-l6=o(R1;OO~9E2)Hs_<6mXrj0c5#eoAbKDK~%}k=qTkA7Us60?+}XE-hATz1R(9` zIY#*wJ^og|`1ls=R*TJuUamlc-6t2Jop&&z$oF4wZ5s}*nn^`Bc!Qd`6mQT5kP4N~ zhc}3>+(t<0+A5{qr8K*YPPbRQPZk?Nvz-W0y}PZBT14o>y9)^hgHme8kE)Hd>5wZ0 zFe^0FA#3*k;xKvYpPNRyy4yrPr1@c+=opZ;$2s_%Lz#1^kVCgJ8Dv+$?lyV6kcs1k zDyN9wIq1%z#yJSlbhn+$U;azsP0pd$IVi;-k>a0i4r4rK^BV%HIRpoVoh;Rpx0Rq9 zRgIU}U^kj1ckYww@FUyzq@sgY2~oGz%iTmvUFQ~ok#=!?gU+dkeDj~ULhq`+DnOG|`kYSP48D_4yW?=m4?MgA3v zS+Ol!Nbtb5Orj6UgF}27^H+bmI6vT#|6B(eo>v}tCkH>9J{vSG{BPKKBh0QaKr~ky z1HIynCu!>iOLt48Z=~MP*)N%j<@k6zJ*201q+}x+KuW{s1kgK9v_auzHNuCmK#LCP zW%SeUgdWG?VTvAx18oprR+{xy^+|fkAOhO)MaAz)4MLCi5wZ$`)`ppnJ-8)URb{Z4 z(2pANdRC1%{LHKxF`@z^F14KkzN;1h$)VjjbU24a&S9}SJi3HKz#I@e^&60BHePF( zt)+_;JV#+lklTiczE3K&Yy9?VbDdGWZuTo#<>MV%PAB&*z|Yhn9-j)mdLVj>V~>9U z{J}M1phQ9Pfwg2ZtK~nsn?+7zUs4H+%1B@SI&i#?!Qj>MsCQIa-R7}SOMJUWuig{; zS<0z(V`rX12CeNsDt)&%7a6{+Kb@e&|66=0%*6-Tkb@71r5t?N^%~=dwC3VN;Xe2P zA5Y=K9Gp=Ksk8VHcv@B^1k4?J@+z4AM36^(Xup<vd`oJ43Z8f^Kxg+kk zwbmDz(=7ctV@%HR1fCYlts_eZPrgETl5L_72;C?r9bWb$9WoH-b!?*=b|87>$jp4F z##@y;UVnPLRYS&mnKban1w+SMwa<8~GUMIwqns&<-6x~g+Nxn(!Q`AVxM**H|EAj4 zbK9>;w~zlLhd?&}oAytmeL^H>W+6_PB?n0!fobZQ9!yU6pzr<+_u`bzlsQ|&4p-P z8ltsBAbQtk#_{j@xg=LR1ezj(YBSJuCDZdMPS)Ksw1uaOay5k6oF!F`5GpozE1c_1 zF`F$8fBnBU{`%bUH>Ss5KXm+?Wc+BrS!}A`XZ-cq@&9nh_{XHipW-_E<2NTs13N+4 zEWZII{;$EHDF+5^EwGC;7@CHFq2*&Wg5n8GM$d zw;?>zz(~roA@FFF7FXP^7|q!ZG`#4dQZ5%DeGj23tBTVyc}pS%jlTA@EQ%hFLYE_f z#z@`o3CU$75Vo{hwW#=mS@eBM*jHi3yXj95u4rCVRKt`So zfy^az#`^tj3NmUzaFsre#mP~SMODNaKB5*VIAjKHJyRh|ssRjFs}o>tJzDOv1_}M+ zRh15Al)p~6Enk13FM5VylF{->ZoDijyZ`-9+WwOeYjQrz0}*F$>0NJO<-Q)S{v>)- zxcn^@n!>7FXT5sZFxRAH{#klCG|Rk6lC*A zm(psFa^|%y+pCwIa-Hg>JR8!>8tLUjScQ^0n!*4f5Gk13!#E(Y%$` zCnJ}FLs$bDhw5gAQLZFiF!j>(OYHLDLM zC-+-j5}d&Db{`kbX(jGkG}{*uZQ{98+J%UAPhD)=P?>H_D8MJ)l6E7B3s1!l z7js;~Zz)y6U_BHPqZ@J~xR*eAO@vT@QM=eqpgR;44~9@8*XiUUIa3-03qCI~3M}tCiyrZ)=PA?9#rWh;sOr(M|OXhXf9&+pl;qu}v z0Wer@5^CbiC!=Oq0=wk2W%TI&b@b?c7)}@m1P&Ko%TR4|g_9;s6OtxeW*xQ=;+NEK z$@G>8IuVruy?RBg03TA{U4}WrNM7itL;k9Yp%I)#$7NkFKRi}kFOMu%vlydaF(x?1 z1jm?AcKA-*SRXXsl`uKLUw1w3@Vh_?&^GH{ndZm060lbWSn0@ml-IJ}mAjmVRA}b} z8p0a9u1u1r74&rB8-a%P2k>tVGKCQquI=kzAp_>0SvtMud4TrKd63c&W9QaG9X?`I ze`w@bJqUz(WBwN+%-;)`*OY_sAo2CRXKDVNgKYkq+~)BS#_s)s z$@mZ6vGinYJ^ElL<0K~IDxCz)vW_#?@D@~ z2H3Kr1Yo~AFil@qFWE`NVb&dxo*iH5bF;T(srd<~2?eZaG|>^R-pitP}mxTKPHksr}!29kKafX+QU6|F=PP z{70#)SomDQ|13+1(6*%{!osQR;M5=VFed*ugy|IRJdyK%E<&n)Q4%ZCv{Qv(LOn2U z8gir>N0_c7Rl7rHIf6Emy8#O2%2o5280|VS7PSD&pB1C&1z6mYB-p*hXbFw@_~9Z9 zm55~Yt1@XZ3XX;<%xUoWXRV!ANjzni#8W;`iBYGp6Hq}xca^CGm9P@+$oM+dZBAZ1&`^k^5gG^(uJViwLwR&H^qLqx60%3UmP>uHjzkt-4~ zWq~)MR>V<>h+ChEps+1+sNE;POiCg^O14&I2v3Q7s8aTYGK?$sfBDdpw-OIc_~p%hHuKx*X(mdHDX!lxWt+%t)$C;l|J7o zt!=)6)()bTyqs3>HRygBSj)kJ=V)o_rh{zir?(Cr&_RRm(N(lkc#y4B?9~c%d2o|- zp|tWF^z(xO+uEj_u{hMN$@|xwF80I-?J@S9o#-b)vy~5GZi|fyUR>hRpv6*JVGT=X31I{fR3!zek^9BS4~6&*kb9s=1|hVEG&q5eP?g9>IRYiJewF;24aCF(1jh-Ck#6%I^g#14}Mp>;5_saKsR8ACl8 z>u3YKa}z)Kj4VGu(07M=9e!@5_<0?->-jbEYa*O2q}Lrn?$NOn^W|8<4;CeflE?gR z`4Dc7j*t^FSIcQdZRpA6<`U~ogbMk2_?5u63x%84=5X@}hDE5(oCceJ);jIG`!I8- zti#MhWiJ1NuHeY#QpQuIy4oG8kRxcjgnE|H0UvnvSr@Z@#^p>?;>JY;^B1WV^wPJY z%{;QbzIQ(%^WIFz+(*b9`enaRG1clBkPmR^)?Hw~QN1U6m{3~1kr<8wQ^-;! zkHh!tB@0Eihr4R7u$JZ$>7MSMu1n&HZ?Y>p25n&&7mDPus(f7Bi(#|5(8-l?>o07N zHj77N;PtFMIdpu^@YG&$h`*_m*&rKx$(ek3MrC|tUNJ^zfz1`n80%VaQ<1mE_bgriG@j_p?degj5 z>9E#%q+on}B{b1(l&^%pj$pRkJW?A9>)OOFXm2Up-`R9OfCEVKKxJ!R+3z8eoM%_= z$l{Z%yfj}D-`;oCb0ZnCjTd_(Zq}1Y`9Q;(EIH+%ThQ#0Pu)#LqOkob+4_|mjOrKX zZ3rK_+^OQthbX@7xU9(5>+*#k8zH3w?An>n5 zy@ol8MX&iUvEN0ho*4C#8U)TbHTvYlJ$f|@tlP=~l0aC2Rl(Hub{D14@7>W~jDD3w zF#9K&V?MR=r$Y?(YD^MA$k~0icOjLi8UD4Z@d!6+;#|(6~BON1c(Sw)a2cB~Fs) zrS=ovBE-lU4e9jd$reyvPpb8Ews)f{dS8dS7pb$>Uc=zd0g2TI%pX8u-z07`-E-7O!hApJ$gGDd1ndJRM*rtdPV<~O$80j@Nu z?^b?OK3XZ8M1J{Fna4q8{L9QasgQ5Wt}fHsjdEEeD+_M^1!NGSE7R**j&TM4M^;0T9{S%c;4EmF~l>BFsu{i#t z)W?xLXi@3*-UCYXznuM3+R2l`11=vRCY7(9f$YZ#ZY=?TDxXr-_>^qPkL+?5)SynK zz11+zc77fF7NHKcIL`0(r^@{VdPtdzJHX=?%RSu-Gp${mT=x#F2c>p(M+ z1VTS9yF89MkaR6m_Bg|ls%u1!)j%UvRPi0D;ugn3_y*_8r1)J6Bx;*dz8+4*{zlcE zqS4gw`ZB!!B@}WD?VxzKN#RRgIYY%)vhqW1H-zQftB4qk-$cI`i%BA2TKzcf9*MS; z@=t~~WsGE^G-@^Lrc4Ox*9{_SX~l^{iEr)Ksp_2|EhB?06j|psa#dRpGdlh1W_JDWD;L%TDy`16ch)HVDzoTL^|1Ivh& z3&;}b$}9wv%$5h&jFp6hb0iLx(zNcJ44;X`~#?9p;%*f2)`>j#Fi?v{t~9lh!(F-+>No zarTcYUy=PxQpxS7V*D27*!SRDnQ%Gt2vVl$6b-i(=F7B-6$HAry7-~3$n4+R-)Y1 zDDbL!rSdNpTEkB^4}Ee+#QeO2{)!V%`g=V6l|6I_gLf2EmqP?_{!VI>QD@AnU< zZ1pa7p)db8-`Zh;o#pzNGVU$%MWEf`a+}4$kf9wt-q9zipHx`H%@J1DRDs9ph}1_2 z_8mPrheVX|GC^ns9}eNG7dquVuN2)ikg<8@BR2^?vwh4xM=>Jb&}uf^h3F5D(FXtUe02N zJuH0K5v`EuASoe0CsRL>Wf^pe8T=G)h1DRcp_4x%IXLVBk&90xk;Bo%m#es8q$C#? zE|K?t&je-@#!7~8TuGQK$x!hbpqpa0qPE5!b|PfSd7e-*_Kj4&DQh#B5EZcB`4=jG zPUt3+z$pQHPrr1xyC1{{FGzb~Pd2)pUhpA?RB|zBZQXJ)!b&Yq|Akyj#J$u=UP@d( z2j22|!bJYFtd!T27egoO+21bN)+^;1aGB)L`}lMST@Z{mi)KitMx2!LMorL7XNeP5 z*O1l3IH#Pi{iJrR+s8R2^TwKwXRY1;WH@vU57rhZe`m|kIr?}hEv^!J6W08e_@%4p z$Sd4lCf2Y+$|(v4x19N^NY7VU?tJ~X)V1>xnXfeMmhD5kWf|I?e~ru+G_A+cv-4uV zq1|hx-9v}8OI&J2;nN_g>w2MQ5%ywoTqN>e@*QN7-lH2mB5k3nm#lXuF{|8=^M7-W z)*`3tIdz1wF!&rUmj;gRetjr#{`Ck0T5?$yIKN#+Hyjw|ZIe1Yx`9J?+aR42 zI~VN@Ji>CJudK=7n2%EB))F(N7G81F4+`{d&3V zG>BVrXgY?@PJ?^xRrs@EYUbBjc`wRnWoHwml>TI+z~sSF(Jc?`lN5b)$o?*cjytkb zYUyrkp~ESSR5tTd`@6_yev5Ro;G$oq)zTKm!^Wjy^9}iOe7$)T4v~QQyl$S)++R$} z_;RuVoc-6`Lq69-0>-9{i?SEB&Ehx64{E_NNKS}Al%>VX>4&rGMHY?87YzL9hHBBH z<$i%RMSo1@6Z1vn)C;dAM=Zn3m}|r`xdGPf8>GrR2}zInzYc`O!eq5>K!)Z zS9p&pdNlY{1?P1y?N*%9hl4KIbvf3dj`To1@ksE{vt zbPX1WFa2vqQujbl{RF`kXRa>x0~< z+}NNL`e9C?oTjL=w{I?QNWtqnC^~O+PF>skeiSla3ZC$C?BXZbatTk=$Gh`fPs*$+ zm;N@cuo?e#2)LCjI=ukO7FDvLn)EJGt6=6SEUEU*;Qr=1aKw#PAV@0rG~H zq>f9~Q5Na3_EOm{e$I4$Vky+}0}q2z2r^v+A7N`n96d!nf1ve0j2ALul{)hc)))O% zZoW%t*!c|GJN7dnI=1Gh{+yEoZOxH9m$qf0BR*EipQ)qGlGG+3;*(XbdBkFBMH)_a z3eBKURfGD(!9Lou+KnAr^O=|A<3p&o{y7QZR+w z!Nd~zTXOG>;~p{)u`}v&z3Ytv-5u!^zQ8t<#chkF`r@aSs>7q59EdasE5(t*YFV{T z4-?K8aIZ|X^{U5eseh#UL%w2}yl=58y3Pt>Y~i)`h|9O8?^tNhd7{2Gs9Uj1nM|BH zIInn4a!;`O`h0EScR)CCZ=uOB8{%@ShgHSz@VMs27$^A848QPub|W@dI&u)_yg7l* z|C{TDh9rMz)E2%=w38v)D|jM%4fh3}5KIm@cxO&)k=WEL6mi=1%>Ey-O2IQGLG>rw zx^xB~5_{T7%)FdWGVRP4SXHqiS!?ZLup*zq5EfXnbBj^VraHax>g+eH?xh*IPJhFy zm22Dk-%P#@gM#Bs3SXk;`!uPmGB%J^I*roQ<2>e*8Itj}P&~{EwrW&H_3G%@jJ_CelFwiL z3u43di{89C_JQqvSyjotIXh_ofbMoT``fHH65>>KR```IffWCKKCdYF)9QQ!*cCR8#yx4^$tB(aj4OrGQe(`&2h5&kpAFLY1#j9pYYzyigW)e~;e< zVCea}+|W6xk&_xus^kO-a=u?q{02Nad2yW}D4$MUSO)|o(~WJf`816<^*@)oNkv`b z+~ZSK#=uUwS0?e)YjD49;QLT}?|`14-DoxSnWzE@Go>_8g0ix2&d|?9jpqB?pi+nJ z{VP)lmGoG%*K#6T5b4sEU+}3y8jHWtuWH+wUc=nAPi^?>R#FZ6>tq>fCAI)Y>B2#Tp= zCUE8p$`%G`7HK?*_KJ<<{{wY*vm@X-o+bt z^ShtlulW5gXm$@toD4SX6sJ&NLAUxmm)XsNRQa+_q|o^|7y6*7msJ`J}E0UWNH0tstKf=Hnxg>aN&GzP5GO*8c&(Lz*8F7aB}OANlC6S`cfn z3!7--iD`HB%?X-lKW0rysCsSe5JK_y@VmS3iXpWP4f%!2*U}+19@$!ykM?X#OuIYx zLqEA+$qJe~MYOA3NS&hhgsWeVmLiDnlC79MeD8&wm`OJZ{SGhu1Rqu_S_NQN&d9rD zRU-PS&-xT&HH@jnl=IM8!`cW&g~g>J^TxJ-^Z2wAEIv|VsDP<_rY3SX$_OYUQ#I{Z ziRPR2zsnYh@=qj}E^OT&h&OrA6gHA@*`YpV!G5N;_Z<8tNHU`wvXu|E;GNH5B!)TN zW!~n~wMTYk9hCaOdwTgU;>5apm(J#{5%cv31CY&K|IHkU0Iv!;3AU+E#RP^-)du#0 zNj0fL^ILF#@5=ZAYGC4RU|=PC6JJj`2r2y8BgfYk3EIV zoy2k<37X^6AA3@tep36g29o(6h+EIv7Y2%)`Z&g5<*zb4W1w#+0c)*~1E&a4H+jhC9@Z*dmEfJ)R<{S<@r;p_ zz@K^DRT3I#+sIeIlT&&zPwuj&id-rz^hhODZl8b@P=VA72S7QC|rv;j=(x>a0v{ zBa%BPKPt>jbNn_2W_9%r)7|5H^}K+dfzg@DQL`_Mg5#>9$7TVOd7s)+b=0bz>sPQe z;l^jQh4Y2Y3!l#A43YV^x9|g&phj+HB~s*OhZBWd)w%~*LMM@V+QKu~pPtoHYKA`y zn{2pk!$s4Z*=wy&2_H$$%eydn*5mv>#a?2+LLA^oewh!A2Ausb1%8q?6K=5f{Ya3- zHtqcpJoBmD?du+7E-EL;T<~PjT4G2w>A84vTdLB{Qu~2&apWUGt8}J9S5v{qcuRCO z6`Y~$+FIMiAi6joeUkC_Oj@c*Fl2zS@@d;WjCHkOGyAt|t;!~4wHO;nKgQ;=EShXl zbS(Qnr^kC-x@+G_cWvKK;O^!#0OEko+0DQ4=Ztqe`7)BK#CVtKx#L~imm2R@sr`$_ z%QtTiG~OM#aBBasw*+f@93j5{3^b?q36-@uZ)1sY|N;y?j z{3`jht-y3I`Jb*@IOmtRvey7i<;t$0X2R&A5APjD&+I6M|I*og$oU)%YO#;-x@<5O z-&RNH!SRCl{}D{oQRikh725=2%eE@!Ap{gFN~k<+!hXck!o0XxeKj(nmvDqK*o^v| z_hmjT+j|)k!;&PQ;}U;B^miZbg5Nwqv?h6>M<4iHWoGMAJ&cTxl5K%$^-aS%)XhmH ziNIL%{87#s@1<&gb!)2j-88dL?E{}vGhNx0s(A|M{Z>8RCo~`UpL!X)`Dy8+qBk_* z2K7mG*`h)r4_Z(|w<1C=KgY*lG<2#sAo3oQW?ZbH0JDyU!|awdwdfsZDxvx`zT0pGr-W#2&c}@!bvjkF@J0Wl4|&b!I#>U$o$UX9$uD z+ah+8&szO$nZf2>$8|y&rU!eJNSHG2i)!Qr`c!3&t`_Wos^n3P_E=u~0^1gNN^)#b zS*l!(_3k-RZW^%}$(%NeMyH@GeoXmT_-1k$M&^lGWs_YfHemZrH$nCY>INPa| z`kt1Dzjhu<0I1hmsU8;nmIqi2z2xiH=Hoq8%AD#3qYYJMNBqxoRb^@*n43j6@S==S zw@yvIYLUu<=Jk6T;#faKhm5x*GlH$LGQ@PefdzGwGC+j}cCh4o*QzjH9(KO%Ti)6Sno zO^73oiy;CzE~#~nOVt;Z%z{r_)4>(-URUxh)a_z|Lp$I56xuoQI<*M4f|q#o9=9KA zX7g42_v~*j274g>Szhcr%K8@EheShAXsCprPn2##EDpLQr-3;|uiXpaJf{l{iM5|B zno4ykLQ<8aiDxkt3?ZNteVN@*ro(K7GMhJbhsXvLa%$c{Uxvko3n9sthj2~F*Tbp* zq=y1Q_H$3F{pqYQlhF|*VL18dQl5M)?&qcai7e*Nv=07EZ;_kVZQ@U?hd(zi6Gu5n z7xI&AE#Dpg6esiaCjQ*lE|qJ=$weHWXylLe@Z*Amo2b>V70a`F4}Thc+-O7%U(G@u zwz2o)`fh8XJg0uPMe9(fdJT6Mb9#M18`GI_Wc%xcpz8zi=h>$#T&mmN@$V>)s#=D* z{jEIL>QFzjMx+aottS;LqF@bikGHc0SF3`(6y%Yfh**5YxfXxypL5Fm#wjBeg8v17 z5S4?r{W9J4!iMV{>Q8!`ZUez-q>=AAQvpyVMRzb?3-uu2hO=)fHId9%>xz|jh%dE9 znx*TtIjxXS;0**Ds1ln zm374L5YI0Zj4N7JP!$iylSSVf`8BazMB?v(_#Th8P(CB3Cx0OhT=ku*${nVg4I4_5 zIv?qNh}zZVNOgHHm$vsY8p!c?&*!G&?+!T&r4ZwPrAUsa`wHn~V$NWqGQ)#^<}CNy z_+v=ny3^<}Xc}QR@x9UX`A3y!AFL+(u&A68+QK%KESp&he*{JL7kk~yKZuiXQYU|= zEalJTi}`acYdgwjweaV!jgWSZ19AEa+j}Vh?Z^?nocf1EZ*M5=MdbQ`F%HCbaUkl6 z2TPl{4x2m8%L)~aTFWYN+{kH#HT?{Egvb^98*jzx!obsh;2ttRbogs-{3+*?z~=6* z?IS7&mpxAx2Hxv>e;6*6^1(pj+){S8oQNw0@yXeRKCKu$l#jwptiY`jYLPF-=Z}=7 zaLLJoh=>ls;WAMm;0nA%LWS`DNC%U!_*YI(QsB^M#3Z$N%N@66oJ9accsb_FZO{m^^{?6SzU&3>$ zQaE0eBZVS$RQr3Rg&jN2DgKxgf8Z>cT(}%LKu;l7YzuSZ?pf4-Ujc-$IB6Z6=*S$3)0{Uf^mIwPg){gO^GjIN78Y)%>E@vHLqkt*k- zQcp?(30bznUCJ^4a=)E8)K{*v`=}JjStfM|l!dCj-n!*~g-?(-q&AqSJ@|}<9s>H* zu+BPz8g$b^U#QN*Y*C6lz7&-2RVd%G82C6?U+ZB*QrSP)Vj&gs;uJsf;uKxJ zL|7wVBCK7+pSpJ4EciilO47Efb)bB2UN=9IRz1kCkap?YpM6!N39jdqA-L8fZ(7xNN zhQ`TeIoZI;MmgEU$+L3O%gIWN9HgFPC!6CDQ^-k9s^o;txk;g~kp?gP(*aJ24qbisDUe#-rPhO?+$y39@Ej852G(gj?Tr|BF zXYF&=^N&b%KU6a>`rve~g>ef$p%3C2pToCrhXi%t&HFHI_;l*CsiGfVA{Z%&d3twC zM-E2HZq{Iu6-_IcAV%3eEhS-x^1t;tF#>v~s{!sw` zKZldIN)v*Uf<55m1`Y~~Iyu_jQvsylq+AJZ^{cz1xGTsdSIpimXhj*E6guWR@xt$T zPp;%Wsn+`7Ou4vMT?kH!?azKFtND ziSkWmB;3S8p@Rx@Vm=4t-&ByC59B2a{vYbjJwB@HTKp3xfl-k&R$`+fOa(O>wNZQ| zj@6lD2F`&Q4I+vUBvrIn)W#$OK|v#v2y>64*lP9m+FN?tTeY>-_O=$a+5{vC50OVz z@EO}$PmEgBmWQ^^@4NOnXC4Uw?EU?7`7oI?=j^lh+H0-7_S$Q$z4lw^De(D^l@;g( zYs}3Kp9>}m_+0RH0DC8HeO>tcoyTlG7lsl(Pw-p#T(}fIKTsYoJnKU=kV z)-)ICusrsZeqm~u>NwlevC{9SpNq#GCw?D}Nf~PYW}EoNm@iq;+c6auxy)@>zlTsqTzJ`n`NN zKo(DG)(VVc;-53Rp9hYN@q78pzuRGm)ssY-n^iA5j7I6MXdV2pd$_jr6@Hm*OMct~ zG~QL2UJ8#Ddk|-zSf`xAP4XZ~{e8dS4-XG>ANrk#etAf`TX{IrefY`A1ieKclxG}3 za5`W8H}kyMeZDpKynh-GyzYaic_2-~S)Ts$cv9p(Sz*5-58Nup@_dh2u>{!Gg6DH6 z7BfE)gyiKT0tEp+&jhegG&JEcJg=!eEo@H6!?z zp-m)Cda}0{=lSmbLnT1Y z$7AbiDaJ}VmY&R7jg5Xi*80_QYgW0?c5wg;vg}11t0%3ZW)1ojC61+wljx$_b9Sa& zuotUz3>S3TZ(BiXXKFm zTO!5SAfDI1{#B}fKGi>D#Ttn)sZsqqTKY%M`gD{XZ@Y`;AKiZrJ-Y?yeXaP=(_N8>+FKV(Mj}a>gX#ug5V3(NUr;%mI+JH1983M6&rsJ*FkIqx= zFUwW2G9*<{m3-R`evt|<8eE~r?m>rqfAI>7_EI4|&s8L_O3H!*?eY^}v%BmvsJ7Ha zu|)FjAH`DYi(n+=w3mls2_sfu^PTI+fp*}p=MJKo#}jZW4Ezff{^#B*T#;9i4eN>n zMseJ!&`3{GGC*7U^dOc_&05I_?jRe5@bOSQ-5uQ^kG}}MHw6pPlWWLFO;jjfU!=m@ zhpf<^1~+`sL3iqq6$Tv8fKo0Glq23a$L?~DrKiZ4%k^A_-ab$)kG}{;ZgkpnSvs#F z=nU{4zP?C>V}`B}@=Jw$eUS>+S5qF5w-*Q@ztkj^a@l;uKtY)#`%?d*SqXUuHL>Tb zFuqJUmroD#Pl)-alKDqh^AA_K`A6JIJ!<~B)tP_vl`_fj?{*@Db#3ytJk4WoGm-Cy z$Qnlh*(@m7y8`m|3Ym7IPvp%+Q=XAqvwGA_)PcKRyxncfgug@ysa1cEl?ht2(=w+^ zFI47$_!HKs@syg!%4K#xK$VVj@9#X8``62slve*};I+)FELUAJD7NAk+)`Egut z>lN$t&3(F;sF1$lq7=_$KCc&I_KS{pzx(<_*_uUJR#t2ClfwDFm)=<5y90bb0ls?_ zzPZZbdlUG!6uuY!i}-%<2RVGt^$M=`s_@B~&ft4f9=>12o~|HFE$(&Hz4H`~$0>Es zptoPOd%MZ{#aD!=B_{+D_1hFV0&H(86^2LfKa7tXfy;JT(` zST+>_NG~SSO|lF`<&AE-uDjTGvZsG!_?IU`n@v`R2w-!@3cLgNpChJ(`!>~mkHv~S znn1mH&3JdQB`?p~r1qo80jBrTHt{bU`pLGMnmUR%Sdagl_Spn&|5|X3)J76Bu?v<0 zf?C!DVk}p%^DX39(B%qH;A%WgnPK9a@|LDY%F>VZVZI4QTe_!+^?v+6G4YbN0$1xZ zaI8mL@)^nymG!8#u2b0)`>pJMq%x1HjQkpzVyCk6lv#cThV2|=mHVyiMyc$7zAunH zZ{WL9eq~i-ZbhaoxmukVKEsz^_m2J6T_$y3s_I^2B>(B%TivnrHMkT${zM_7JimVb ze(V1|NVIl6#Ohw0;hx0BR~IGH8NK{p)r6>m~bZ zqy5!we|5+gqd>)dV>6jS+_$~%-@kdUjdwR#AO45HneG57V)eM4pJMgc1QD=$RAJpi zb-$GzGJkUI%ClBqxBMDX(X{gUKA&g#yv%0@AOF)-Q<%S@ir!)`SV4t|q}IKd3o5?h znhTZ-6iCJvQ3RFOQV_>gNdD;Zr!LC2V+bYz*jNmZph(fmZ@e&G@;!ulsDSS}rd&3t zbjb@1IgZr3SIK-s8mBTq35ay;ilTk0;Cq<~0!ltjN80=r+XutkXN@{W)p&-~*nc`t z((aSLjj^AI#?xQMll9x3cV2a#&}I6p+Kiijao1}!zl{%qYcX#n7#)iPN`7;dlixi+ z*sJ9CjYF>2-1RvNwRKkCzY8Jl^?A#{_4${ik2r&RtW^~2CwvLqusbxjNI(A9yq=oX zD~@Srtu}CpzW!BS_0!R(n`||CrK=+ej|%RsG{C z*S|;TUx(^n^N{`9n-(xEubU4JT&Cx``5N{dx0{dTb@P10Al>Ysn=21srM?@Is-C{f z1wDPl`ur$pDl7F?MO_%IO0CqNa|^s71>nIdS*gd(&&lAI*v$vx5R{WnJOVyC6h64h z;llzS9)*wF{>Ay)dH@g|2529+QqN(glVUDrtUSydC!Qo_T3Ni?R*~`+>((m{9$w@E zJXqGwBk3mc&9>$lyPmgK=%+JIoaK6A4qaLY=)LoCt5s)!l&-gM)v4SGHOuVu)tzDTNG4%cn%$w*eT^{Uag?P#x&pm zTCLySTdmM7OFxED6#4e;Xx3QTAZ$1tCrcr|YlsTwRSM^iIfTj)=Z$i_|+IxNq z=nNehLY#TMLU>2f^b_toG(~p|aN8m2H9XiiNiVfu<*(fhurw3{79d;FDlC)inu`5e zu)_|*w{6IHPz)OX8orO+YpnqX(7?Y~>xPQG!Meh!bwz%yTHEaXQn79z6>m`WD=NA} z5gTwm-Gmbr-3ZReqnj)_9lBl3gMHJD3<~lpfBE{ck|GB({0hm|DOS|HVUovNz#p~@6+IeC^xC^4siQroKX#iZn_@*&AD(~;CF((Mb%-W&E&E9@k-^5qa} z(S8YaBPGl!qrHgx&3#~(GeFrA)~P=1iHLbOg}vJhlBY1m#DnX#B{#?m>s10qeb+~1 z;XD}%h!$OC$0w<|5O-RfS!?lbG!P37#G^D2u5ud4GQ|3d|CI)E-8~3XM*}%{pv54k zft*jVexhn*)5)?t4dh3N4K$Dy=w=T(W`kRl*yFh8RdpjVv0ngLV8ou;k z_x3?@yNgP|YAjuxLM4f$N2WU8E?zmVy?@%Sw>#e}VYb&XA1nj8D+IZ-mSxQc7rlu2 z;P2QQl=%Qf0B1)73*4@f+RNc)u_9EeoA*AG$#j*R%FXPt;{i#gQK{?*lAQJTZ`ziG z75Qsk=A~>f$KEXoNxEct?{+N-CEAj|*hU0Tem$6!_ES%d)bmVQ>JcNtu<&yr1(QUX zz0&WI)uSN08Xq_=4@>(ah^)zZG=cWO;P!TvhN z{yKs$*7TbF%V&QT*1}A&LVb=35Em*&&51sM?{Qlo(lkeeonUerOg4kIRwXj^X zwT)I8`FxN{QHDF=(#~06DA`LW#4VQn(GVXN{dNwL?&!I6f=5Gz5tzrEBZdTZJoP?~ zPEd&v2A-V|pp|ky_%BH|0^&n?*|rHd=601iqf)X*J^MMc-oDOGn3?f%q7u29k^1zV z1PV^nu?izT^#1DzBkW;|g2ec?;E8_%^`4avu1S9sH9xvZPD*%$KpD14NYVufjuS%% zn=n)^64ED%0c5W1s0fhHg~&Lbihs99tf<%?cLob50@jaZMW-skBys?iY`B(6#!(5mdBM!$^1Y?vFE2PJAz+=amW@t%)tFdc z_Yd)GY3x^r(AZv%Nf_A3t7Y`W1&T%#-yA?ApU$C?_yIPOC~PDonhq{@F))RHA5f%U zTqPt!wA=ktbWqzngbr2K_+bauokQ|kGR)!XxPnPoLOPNS26&O0!4piQL=y+B53&vf;hZ$gznG>0C;@ zJM4xJl7^3=;pO4$pU>O4-D_tqc;3!jut9C#Pxg+>baeI}3^+Y5g7(F-S6u^betFP= zVQl*mIR>!vNX`?n#XPH9Z~Rq!SQ5%@r{7*qmYQ(uxnn(vog$=k5W=u1c@uGp3h-N) zN0xlV{*S@H5JNhXtYdBS7-nR(Re_DfnbJMP`-|YF)SK$^(8sT4%Nt=Xc$GMxf#~9J znKHqtbY=(1@eJxk9mJ75Iv{x}Z*=xghOgfs__|~8dGNjD#Eb9sJaYEx=bcA>I*nT; zGBOL!%QBb+RKihp9=R>#4jnFK|NS3?y9>DI7!^C^tOv0Vk}8>XOC#y@ z_gTl#%?Q%|1@ zEbjtKM$zXCJ$>N`Zp2RTMHRI32Kt_@h&PWaaNCW}l%}zOZV)dOD0gGxO$6|_zij$nspH>iim&wZPs-d$`7;Y}%|++E_}A|6k=8NO<@mEnBeEM@JB zf%ra8;aj;WcJCjd7XY9Lut5l2gS}EUpte@-m46f(vB%`HAB|IJ{EkB787eEb%jTf* zf*cw*Dl~p~$i2L5;TpEp$fzj-3d5A3(K`2mXuW2#zDEo3Y%M4qs`w>nydR$e(dQO2n0MU=?0 zUk0n`y>7XlLTe&*P!cOWhd3nuY8O*{UO8Bt45C*azW%X)eBA^zd*$zU9?$lNt-}Y% z-{g7amn3&9pg>ZwVLFd`Xt`x`O; z3}+_uIRO&CcmQiivE&x)a;B9lQP7glS6;O`X08)C8c@lAH(I|?s$KReJy7~XZW7SV zo1x}*z2dZ|zVQ(J-}$-MdSMZaNmmx@=@x>P-|c1UL;hm9JX|s}laSpy83=S{Koo-0 zawOmP1a+#A{O3}4x|n&9jA!iIoh$L31v5i!^9L0;u>e{-2G>rFw1 z&aop4*i^BLzy^ix)AZD3Oy-EOjn?_!6?BjF^vM*cKPzh%{t<`uhPLz_xn-``lWR3S z_`-s5N@HE4Z+=g&SQQKISTJfzy7+Y2YZL6zwHgcE-3=E%Ar~&*V_kl$Lb=v7jOhHYNaQ{RGvHd^=JZ^NkIKZOa^NMBO4 zPli81$T3#xOr33S0c2(+ja10qgew}#?C@2yL2e3e-e9d(;>e~)XRl=9GVa)|bSeNK zhoQtf!NZQuyWmThh737gtw%`I_nTW}TkZK$Q{g||B;SUvW}pVWJM@TSF+t^PC{r~1 z_^CQ@VpUNAsd-d(EsOz@U5os(q&i|(b%5WM{AP!jzgD(uMwQHqxE=WQ+5mRKiy3 zdQaj++a4|Em6_5m@cZ&W`tyG~1?~ORb7d$V^0Y#gu}}a(->ef-^0L&M_v)j(s&4ku z7-KtA`mlg5X*wD(dx7?*0E-WM9T3VtTrT;3NIC0;NO9BBO>*2L@()v)?a5NYLZpHz zXm@co!65r2*a2}GcH3CG9$XvD4o;${&5RNy_*OAE=93Zf`Tj!?4F?;+=MAj}u0Y5N z`(QT~`+T`1Zwz+KAL(lErkuKShn@9tqI)wvg)JECls-DVh2BAKfBtxynCTyrDg7Ijz-v z#X1kWuoRP9q#9yR@hCm`s@C4YZ{TPqO0A1gk>?N6{6*Sf#VAzm2ix_|iBC!iznf;z#2AfT-R z4uQo<-jnT`_eF_>RRcJ3B-jk5ZUq6W>Cvl+zP@21Q%k#GO(y;?!;c%Rx6ecY`oGJ6 zcP579zq6&AHviR2sV~ldk`J2&>D|~I{x9&~lgyw-ff+}fVTLgc#{L>(SD#U_E^cCa`q)^>sF#1k3p0UHY)~>QR2i*} zxe-pdkkPn?N<=Hc_ti+sf2M=MZ=f*(2@%qwH*T6=!eHw^Si+@)tj259Vj87*;9=X~`7mvgE0A!>fk)$>CsJtlPBMdKwJTcdVXN9kv zEm<=b{!qB%6Z|tupT2?Q%n&Oee46GFGN0P?>au4?J=g_(Iiqm5X!0SW^W9>jyHkXxoMgxR z$V9rCJa7&kpskt_*_61}`dfoT+ik)S18BQNv`pwPW%6h%N0UQaMbn`uH`dHj&Yz>4 zkGo>ovDXNf4xF&|x`($$$dgha;L8!32r1Cx#_64Z;hdOHgghZVwE#K^;RcfEO3_!h z|3K(7{Sb#ftcEUe*8@sLF<(_G%O~JA4jNHcVigQm!6E-N)@e#7p#mB%LWAf04r6942T=>*CtBtBQ% zBXxS}JiYTBNKA!_;y2M1wK|)`C`3Q8Mh8dmz7;dm` zRH-$4DK66iS+xsHUZ!aT{xjSWBLjyW0M!RHka$;^Zwi-V|onUZ8eLIVX+$x$P7A$ z`AS(Tg%|JO`+N*w+|Hc98XND^-4pcv@ zyxw6a-(J|su@sGzGI?`2`q}(}`kCltC}@g1#$xH(yG|CtpCr%F$;xsw`>E>g-gUA( z9H5gG+Fkd?9j;Dx-&~tJ6sTWZCwm8jt`kIG_YU&?BZ`YwEgqYGeM=4$)hdvf3K^b$vQ&X1yOK->aB?W9H3%oftp$?{*UX#yPoy z=?s%oaFw=Xl8^@K*6HT;W$IL$c>3HUSQXpG*!2sTpI(ymz>XGeac)$)_$0A*D`V_7 zwSyKNjZmREsQY#nk5iRd)?O^Ba;%AdcP*WiOdfYBx)iEq&X{{p%Ghd|=rav66{l|V zUywfcY&~^VnV8U%|MWMK(-?>5R_h&HFLCpEXBq#BIE>OWifX%xPrwJ|B)K8AuUf+B z725&TUc#WGlNo2dbuG>HAImvm`0*vKz`C!X!aMn8SB6?8Cz?wKnW!-af-sq~1IX8M zS=3UkxE9?l%lBi^ z-Hb!oE&SgUUbZ)j?p=1FgTzMMN+Gf6wleqZ+oF332vio`1g{rbbc0l4TXatl`&Yg_ zw<-^y(QdK9_S}Wn47TTfE$`Ls_Xs*M^7^@{?76Qfz@htKlWs_;L--r(aof}`>nY(W zBOr8lQgRNro5Vn8az3Key+Nu3dkQh%pvC3t_V?|UupwV#UrBX%qOw6GL$uEkg&K>cbDjFZam@cS{$iQeN=t-hD;gebGAq=g5lE&4Ld? zHbyBP1|injS1Wi2!X})X<##ci_l=@pA-@j^zl$$ra(|h50)*t@>mfmri?6eR&&Jn{ za$C`8qJ1?GCe@y@vRAtFsZ;bBh_63UXSV<8e0;slFZao_T-*3Mg!bIK539SA8E3FT z%)GEFV#*M^R2O9Z^Z`NQT~x2!336*!y@DFG7utrhv=#gfiJyV+PRxV%gaUX^%Y*mO zEWB6bmEVOC$bs&$(*(K?DVBxqG8ei+&J*Z1Y>-6k%nl?8o-TRx@ZL)DyYvm5zb6uD zk9G7&hAz_x(+zt%R;!U=bLA_#iGh0UF#J}E*VLh4nvRd_`6`ZIxyM2M2iG zΝ7nRL)zRRL)+H`;9BN?g%mI*?_LGTw3A#po;3eEA&c_`B03NB>+ZCP1TT=c*#) zGhvRL2Rys5wGA80Y0Nd@25cL4E&K@N*T?U&g5M&~&7w@5MBHijbpUvI!E^Ht$y=Gd zrJHvI_^nqEqUR0!aFB+}m51WK4pnC5Zs?OOy5H5-GqSyS0} zF&BzH=BkNX`X)KQZ2xpXrhoXc&^nxORmkI$tN^Kkzgb9*=c2!$g92 z5htr@I-?xCWjNO*;POW00|yS@4c1`zRw^Hr z17DTEH+-M)`DCH%m#&v7O{!9RlCxAlvMN!J4beY1QQ=n5acPnD&v&J{T2b(Qp=BJI z(^Wj;Ryphzl|_UDF_A^q%=hHINxU)g3f7czTbbcl=_4^@`JW!WO#Df>UWcVPoq!2| z@HRF6a`6U(;aLGA4S?syJb2WwLCN#DfL)=$5{SB>;=cxw1CJc&2a?KEq>$40Ke7%J zzAi-1n}4y<16GdhnuN2M$lt?S7nA(u5T#shw9W;-0WUr3yd+41ZEu)R<-ATo!qG)E z#OZtrELXlA7q@9yUWrv~e z$b53#!@^Dgfh7OyMw%wlj#|1ZQ)IpKC!5@G z?!gQ+6^>n_`WR~C0P+UX-9>gtz*~9203K#Nre0KCJjLmvI$wL61F^!HYD^4?S+JF? zkUs)Z-q73`9#>?CfiPEVZ6wS9fh@;SRV(y#je=`5$MThlgNQM?nK?U5z&Kgyj}mid z_hdYzyd%3a?~n_I8``hR*r)8@t&6-AONWXQf5%)}ri{H*J_anYZ?i*!(GLngB~mCV zw!xREd(_kNQ#W78ly1CJ#67;Mt>UfcD3wE%YfRcmE$|CzAX0YBOH9$=d-x}8%>>Gn z#K-5)D5U5`M_FerP}|Rln0iq+sg}?lEAmGH0jo{A^fXzXnCrWWhacNjTV7;6`kAnH zLFpsh(snV{G$*5gP}-wCIpY}J9KFf<)2H&*dh3wEZEbry+tv%L`|{iR+TgZ+>Xy!e z#2br!)~{6V9BA~4^;?t#cK&hbpY3L(+56Pf0-(R{R+KKiYCyAH3fO|u6W!9eW_L!% z6%o>H&hT3$ywLXuWK+knSjP`;?UP=+lPDPtq{DbxCT}&33tnU9L_sa5TKTP?%I-aT zLhrH_j;_{oE0w~rQ(Pvu;lbrhr_pCyE2z>a&^5ZO_rE0wEP#BW+cKQe;80PJY_$$X zZrmvg*)Ipmd{X>s&mgjob4%xNLfSE_T-AFBgt&XBP4puMxAdpCvPAa{Xh|R3(r=v7 zF8GqIg&sVo=p-#oprz#Eh^P6Ra{niT>DQEN?=g&d`90ev>{5ucs9QR}XA$dyYEn&FntHH9+hw`SQ*iAn8hr>!Z2+xoKmcDnSw0c|ZB z+*aS~4iXC5DziRP7(kwEwRW=8Hq5EcMcSR)ilp>LYwKYAtZ*wzm!2}9*^J-fiixLQnnJJA^HtZU#INbJia@s7bgeKz}c z={LVqNP}Ys!+MWXx{$<%gNl0xSASKm`p+Q~q( z=+{iQbU|CXRiA6?TH3Nn{F&QYj>Ki|`z&(vN!$wS*e`Vre(6sH$=LF};3Zq#&S~FD zgY7$unhNHV?E2D;JW+=PeuQ^o;+8l8CZ=Uw1jrJ@_dSqXux=@`zL9OabOJVAN{Pe| zcTGmSTMq>X6t8lM=amEWt9;h^s%j!ywpx+D+O3^DxV1ebe0OHD`53zLYALd24Q_3n zQ#`Mn$9f5>ai9(gvi0hRc1x?&t#LA{U2DTVxt3nX9GO=`k@bl}M?vv=r?}gBdBK`z zw0pb$w88(D+IVw)^f5ag1N-bYRBLAJwwAbYpK7EzT)ziEZhS28dV}GaFCfB zf5nPvhxN$^LWYf2Pyt*po_slg-@(dCDI3EWIIJ5KpA;0|j!{7PBd?st`m^d7J%z*28Bv%??4=TmT4!O{_ZC#l>$_YYc>fht^j`s5 z&>^1#2n2hFWl&zf=)a*jxVYyh=+ZM&5#!h7E4%Te+5W zvowVh&m|1!yE^ooH@KCmTq~yxY~>;KT0wsf%C(ZKeze=4m#ib{k4?nK2e-9htAmUK zCgM?WTaJi(^B2egr?@yv9uXxeh5B=Et4CTfT4bGn7)x=r&iAnt&um&wn5(%C{|`pd z<$8K*rDZVfaaFQMz3vU|>}J*X^naW4)6#ZO`d49V$nvbAk{ceo6l z4oId22+(IGzX;ZpaVCHGdGUmDt6eRqd(_s)>RTHUhpBy#we@;;ZG*hrkKLeluIh|@ z={`ME+rUQQxp;ERZ4;`PS0x66ohxBt^YAB`tM^wbvnWh|;nA+pD7vQ>cclDUc( zWzf_SAB0!9&>=Hmd}Mb*>B)r+pcETQxl&lCahPq#=^bHnTh!bUHFt;E&LR7#|BFUc zoc_2ruEbb>xjz6&%9}#m(d9;0PNV~7A4ObwSS{rjKg)Rz&O^z4X!m*%T+I^kD zPJ0goB@#!oJqWu>v?%7?qmJdzdvWantHfnUG~uGZS`q)K_o zsGj0WEYi)3e7bkL)GQFTf0IRb%)CcX6$`$QxRTc;$H3{j`9Lf8H2<*cw5Dyo>vZk5 z&g8;!PvRUnsf?&+NhOSbV-?XZokp;;k>G$At;Y`bc<9R!dT_QcK~HZFBo2-Q7nCK6 zpD5}dc6!H9a@q9>z^saBglCS%HzE+|-Yx8|*tA>6EVX)~?H0)n0HH%s;-KWhvZ93d ziK5k~4|v`^OxOn(0!|!!EUB7exF4H3TRTW!g27A0GIr?QH34OONY(gd3ug^{S;LN_ znkseI4A+bCYWj7{lP%@ovaYO|GsUL`46T+w!q}z`dW}kM@F!(d_K)Rxl{~NFIigrS z*X6m+b7rNG*uxT!yB&9WSAu*0gw$`!C?LUaifc1N|G`xb2yq0*`7I7Qi6X9JTANw{6``JDb1mr9>x0K0> zU>X6M_Dc^8e(BeRFD-Una@Z&9C{dn~z7f>q;7^TL<)4g*rxM}fA8-QW9yyKT5UJy zx~(FHgQ(fY4SMi%E%_9caae3DJ3ZA=8wZa!>?jk5VN`WBye`TUg+o5vEsz4!+NA82i#(`(|d zled?9n&JCP1Fg4=A!Gh(Znxf2h2>KaMsKMl@v!u%9*pptnioF~1|9}ca(kvACLR5& zboRG$182Ppy<7%0 zyJ*~e&<;Q8>D$?#hf5Ts32)8RGq4XiqXcUXTOox17i{%Ou^=un)?q1F$z~ZDP3`mP zW#9i`DbIFeC*9cbM3Ho)uY{+rz#lLl=k)cEA(N9~e#8_k3Q*ttYs1gkTsA2nQkz`Y zB6Cxd5Y}}8v!*P$JJUQmJ+dtLV$-2JBHAw-%bE}3UgG`JJNhd$|0I&pl_F0& zQOQT9Hx=T4Q#r6rDogzE3G^!oiy8Ht z7f_A|61HO+VmGD>HB+zFwirr^VZA<*o{wFOel=fin9~%?C+dXVo2NJ(sIfZ`bUH9X zb)f%btz0Sw{z?I!v$$1~F7pLz!$J7Fy6A!&g%^;FLgjHH!xap)ow!%B6VV5J^~f=< zTcl?V%qexs$vSBM&H9%VDQ4K4yTX_1=KFTg!R0i95B3~_4K+VAR(eKn*xVhf5aY#1 zF|(;GUa?C%G1?oi_=r54mDW`D0ZB|gX-rg3jOa|h!O^_3)UD;@gwfi*C#)}8TbW^w zoS>(#DmT0@aGDYabUJzp6ZnkXo1#(b{a=#5G+4!MF6`=WG7B+adsS|7AZN_(Kr&J1Ms`K}5i7kSM|#;T9kA|z zH?T&Cuo7k21(EFq0%8R+CJ)4JlAZ0Q6yi7vn2XUC^FW->LhS%S41N(1--P;jAJcj! zbpbBEOmj(UQW=@IPP_@NVR5Br{_u3Th+7iq7CNAmeGpkkgHZhJa5K}1cn_;I*$-n> z*9Dpnk2CUQmXZhThep#GSdk{KT5 zJ#20gz4v_GOto^y$&bMP;v8EkG4d7BL&>`6Z)DG1MRADXa&1_c5b>mhaAWkf{S@6;)nG4t)1Y44JbnDzcGF-D-Eq(&bT1^GBjBg4Q{$^ILi_=2M!roaEeWj2VHuE z(;=H6&4>4YO@0WLef_6%ymm?AWMbFoQj2uyCGVpBkz2 zZA852ux(sIy9e^_^IF@(JlD-J4(G;MHjRHvRH+>IvP2rdy+&|r;>P`Q?h`JivYcD# zaIP?{Bt^?Hs@8w3!>6z3@#&^`@OiDh5tOJGU1_20$DMk`B){%mqX#FIElAjXl?-Gu zM00|Th!3CiWDx^1mY$V?1!CUq@$uW23+=~;SjcZn%nqA%Lhwj%%WWm*Du$!b`ztoR zdj|Hxg>=Cfd(kf@Sbx68$}nsGvmp}$OIoId`%ze5UVdmrk!Sr$MV?MRd-!9`}bUtA|Q~7+8kK9x87@LMzL9koo zqo9s86AB8;>J0V`lQn9-?)UAX(|2ko5<3%4EiA`hVx$%ZBAmdI zK|m-06AqldR+QYz98OS_nPD!*T!%U4;Obk-wI|nxwMdWVPk5v*GDl9WZzesCSk^?0 z>xgmW6`CcJWT?8O94nJjayh^_vO_E9l{RGCCKyoWf2EvOqG;LrsdXVz1r>{r5N5lw zIjtQ4jv`UGEHf3si{-L_L?~|c7`l)Wc_iZI+?YAN4r^NTB@9vsS|Go+ z{aZB3MJjx?;Fh6=<6&p@%K>=c6}(ty3WoT_W<%1DT`4{jwDB8QSjfa8ONEa`|c69LE zL=*PDCT&YoRfxiE0zf4#1!j>Ci%?(IiDKP_;f;LjV28ww5vUU^uy^DSv1)m4+dHs% zyo$YpjnmjWfWn3027w{?S<_g(;w?g($TP+sX0~B>{;61^$-JYJRWORjuRC;!jTFu*rx!R!qDdxy3!<;-96HkerT;%h6n!DAO zY5V66PxDHt@)-3-2ZM>1RwMaKu*pHyMLtS=H*PLqvY6Z;o9|f+!4uCfxPu!lY=+jB zHZy-u{2)4Ui-4uIKSAFnW;is`UGmyhOTAOf(JZdoC?I@1#XoT^su&qH5oP zcnpqJ8o^O<5(q6hf@_{MpNfmX7=wSHxp-m+^OqS0415GTRs3#^&!`Ujw023fu6wP1 z^hX3qrkhOw@$_h~I#?+9X5+sJ(BYNkv>#2+#P+;fYySgX^>S3=4J-mrl_F6 zB)gcG{&G^o)q+FDW_7;Lg1R)}B)`pey{ef=!F83M(3gpAqKN&Cxha6saf* z*my0A*=MtfUxL<=Cy}3oa?q}x8Ss}pk5Vbk3Z|T9X=1&!Ml*!63+@KBl5N56lieRJ^0D2bs=zEBi|vaFm|2ua zs#xR$*hM~|#XqVrB$CP;>C+`EbM2mNp5LtIXkQE^Z$*o+WiCd#g+2y=;(})d%@Vl% zsuo$JMfwcyhsx2wE?7h_s@!6a?QtUq8hfCSW*GaGXDyPN?u(l1VyWqVK9zj*X!>%L zv*Kv_YH4TT^6!Tc1~7%M{eP1)O>xV3`@|z zjeL~BSDGeVXpWqyH0RzD)=(^edPM9bZg)fuWg0(>1z&19DI+O(UotAT=ta}ZWBz5g0#K>=qYE!Mu6>-1Nu2bqw#9v?cE#{QAyU;vy3Rj27UGYmwK%*UL?( z4aQg2KVZx29Rj^AuX&;IJ*mP*-YW-&yjKKq0MMsv3Ys(J-b(dN~jU zODG_nQAk4DpJu}&;+c2JkQ|WWi9V_2cn~T1L!hRR5o!)eNtx{H;}whwjyZ+FR#HNQN_kkx3Q?~sVQcJlNDRhkAEwsy&ATJ% zh1;f>V=6B+FFw<)rPq8`I9mjSKR{4 zb27*U5QcYR-c&PoCSx0WMYpa+$ZcZFp_qB4uXiP@R!{#_0pUjgtfV{ZeK7;9ZYq5FK5v#WpV$#c->R3e8AQuaN@BQCT) zsX}$#sIZKp>%s8#f?@uot`ErQV|JTeajQ`kzH0nT!sL2UocW+!QaG z1_GxE0{KJ8MIi#|v9<(TU#4%o)B+G%+X7YJCYgr~@0OVPr30w12Yn8L45I_ul8g+& z!N6t*0~eA?)hAn_-Qi$6L^j(>lUQ?O=6j-pr0$xq0xGFFJ!;m}i@ISp&Ou{|kOS3r z7)3Tu#6peZ>vzftpiD33t_#cw{)m}LPnM%u&66VO3(LdF&zT~ZQ6Cm5hDwL|ic&Ld zwq&9jQ^Wy!%~$ipof~`N-gWrTWB7BK`A&>WMkt4yIPX-rsg>$DDXu#4t*oejsqwEy z@HIU>i5SG!5MK<)9Wk5`%PARts$9yOcE1esqPa2it%&0h0Kj31Z!(_&fvqF4zYAVQ z7gHn0kBWt|BCDYOX#&uLdZx|pv<(#?u;%g*%HV+=`lF4C&}_!zyL07$*EAd zPo1tJ7$)}1X)Cy`X`b|5AcTwUzPIv2`fmjP&~(Z^`meQbVnk;}#vdB{^`PydQOz@4 zWST8N1-*@#zg7fJd>t33EIpcLR@-K08&Uam5F*GdS3U@e<=|gyzfZNFo8I126bQbV zZT2pODsrMIr*hm&Nbs)}QQ{To)Eh~IVy)i72BF*Lz$QYhEM1%xjObPPn>wEs48eoK zT2Ka!6yx)`Fi)oWN=ih`kI^?jig|aEoxABo!#qOv;)`Z!iy&LRYV8#YV0P(?{CT7C zJtLTDIxKFo8@xMK@uGlp-%GOCU^Llq%BZRg|K8@HtLEy#e>Pp6w9(}WE7424yJ(m7 zSre(X2efNTJzuU>S;oC5wUDq150BgGSX$N1f4Z%Lk6~=mRO&HapcV98H;>jc>kHaf zl!9w^lc<>a<1^%+VSaf)$8+*e*0Y<7gkeZqF8au4$Ug;&Uh!7}1;z&f-}!?b1^R;Z zD4UTDa~4hKBDvQ*DxgS=R9r&piIJM2Vx&%a%#M+I!D`yeTlW;cMUa$w3q2-h#2~Rk z-@89UPz3*=B`2yi;wmF!6LEiIOb^2A?CoV8*yop5l{)e*xf(cH*X?h#zNsxK;l;l5 zKCc9E9Y1BFh^EP95M%in zCb0q*v5w{nvr|?sJ+8WiB&dp+H~4yaI~GLC(Uz)^Hz_}H3+~{pzx^C8USpVl*LT@D zzV(WaK^ZRK=cG|FX#9qYSo#X^$BNMg&)BtA-hU!zT>-*LJTW*+7zj2yjU zPBF%>Hc%*VJ6(%yi-n`@`aLMZch>iNl{#HnP%V?ejC{l74$w@!% zA1}|v{rJcJLol*AUf!1gHL=Vs?-)B|akL>i@y~H{WOWz|K7k8so6iyq!JcbT0hv0L zxI~YURkv5x-@(u4AAvP7Ng^mmuTg1D4D%m0UhY?T!M35)B{PPH803HvjJ-0yj(@ZL z1LV%9_WH0`6aOOq@ZNYj)GI!5oaU_OjPcCiTN6|9#$ntT;EijwF|ymmi0f1AKg{+U zYIk12l-Ylf__*8ZxTIf+{}4ZZhtLXv<$X-Ck)C3SK9``4dY0?;sR+XC{Fgso@u6)?Oc!JHptt~S=^w?y_@KmUl)H$`#s>5s?nDM? zcRoY0{(~_Sz@U$FEe!fSmqCpRHWjGItvtpwrDDOhquba6KYg>0HD<38kV~(l2pth; zw)qFTgWg17u*;GY)FWBgItcdyJ39Wp?9Ogf5 z1s6e1w%KinLL8I~37L#=+o4mA(1T}?_II>*ehdekr}=C#;yZyX8`xp)1hRC>C2pp$ z_j$r)TD0U7+%PiYgu?KxEMJ--9<$Z>ojV&J2259KOF}+}3hbw09|K^nllDah=;dRfn)oozF`@9X0~6lmcI4Vhz%FFsNaT3WkUstogD4p_i_e5M_9 zTn1cr%<(9$;C64@gjUMgP=88%yg+T3kt1!eZJ6W15sg3W|Eep8XP{T~k8)#;wDu^! z)zE%VbcIX#Wp~P%$0WyKgEGJq^QcmFF=Qi~+~IF{iF>|SJi{kVNsk6UYZ5rQUuvwB zb!_q*fYiQL>9sk*t5);MJr!h3FRi5vRokpAqJT|r>SdE_k+kVsGWxEqsuF$tXJ}~c zYqOOhyDspKl=<6WT{)iR{O-ghVRB~;oS?L&ugRVPB?e{=-uMGsW~Ii*f}W;hlIKqFC~7_}!x4PQ;G$1No1W~6SIqN2Q536SZ(lq$&)0Vi zH7);y?Mpp;p5^lfAB&IN+hG)WLVRB2vz^boeB|DL;cq>a&jdbq^Ldbu+`Bhsm%owk z7CuY)$i2lE7I{|1i#)&P^L4Jj#YgVl&x|bfH}icPpS$>|d(*{X53_{%^hJqX<_wt4 z5)X3_k;SMI!nN-ocUz?X9VLVSO~cAfr}J3L#<>gDT5G z#u3r#MMZd~M;A4Rv2OT9u!vRXND<^Jet10r&{6G|4Nd@bzXU+bpo^q#A=le2euJ18 zBU96$q9cS3i_05-Vwmf!&r3WWP>d<3P9(6dj5^tLn3B?se8TjPgm^~%#1CTTG}fJ@ z0w-WvO+wPEnJBm_V;^Vyd_PAMm9=7%{kr1BdX%by1V>mHc1yP6D6qVeQX|AqG7axhq z>@>4e;;6q#NNlr;q<*)5Y`8mAMq3`EJLH#dJ%a;Rnc^px$-&{EBl%%b;(>^HVSV}n zU2stjVD22dJ8q6XUtvVzPq139W#WB1I&tm9otnhQ0^hj;Up2`8S{A&Cp{mBJ=2H+Y zLa5^$A{}D`XM@(?KPoI|I;9#gBTjCyK{1l=6(t@+Ztdx)*~w;cBy0G>0|qA$fDM&)#5%Y;|_$6?@5`?e%w0nuChy zk?atnl!##&CVG`w_$}53x;YP#`$6J3(zr?-L9wc=)~?K{h!|7d->1|Tb2`0SBbJFN zX3XC^#T)ZmvEbUq6D19-y#M@v2r$n3WX8_|Qw}iY{(ch4UNCoRy7)BF!=?m_4_i

R<>Wgq&N*rP3Q+WsfMs|;N&d4aF-4%L z%*8Xr)5S$jJdsSI)uWHh3nA)JAw+8%UsYCr^1B1iHz8eP&HUQ_^HhI^dVdljNR#&V z{z<&QU|5q^7FJYuv`ha?Z3~+e@i0nY-Fj<+BCtYj0qyqY-!HTCZDl#~sSq!ld!7sM zB`yT-XQF#~F(e8vct6S1iU0p}QKetBDlmP^75Q`I>H|GBkI8OcLu%d}zJN(K*9b3O zH4E4J^tt%p8x(zGr0$YwLMP=rhn&6KbgJX16PaTq7kT`i1fF~%^Ke{dm~^JkHIf|x zTwDzjbFy8+pT4?WlJUz6Y89GXRJ6mBI7)@{i6G;U6<9EZgkcL-gJ;1OzFIuD(vGU66eJ}3;d0}$tI4u8^{NF@^Eka-$f*-V@hNb{OU`cptptP z$=iLyge5KwAY03xJhE@^whlpAW_-%a_jR|`c?~nSfKIXOec{7$vtDSl#6(x zqok^{)AG^dgOqD3lAf|TlCIgN^74TbPkKsuru3|{IawCJl!ODs;8h0Ysc-gNJ{BvkmQQ}~uZSybKfD1lcPNcrxF z$ZRJl;=5A*RLP%^b^HjmN44!&GEPuVD)s85q}A2hS2%az&7>ehYrmU2=JWvC*TwK# z7zvaPd)bVOg>$vg_$;Di1D?b;<@zLoI%DbL^~n}aMV*N}B&>ODVz^Nqs_HM+tHXgr zt=KZT_eMCgIud-f@$uxRW1Ef_S6Z6D4LQhDf=w3g_9Tu}uIIy5)P$c>jh0gA>#ksr zI#}Uba6k061isOntOdfVJc&bP7l$~k;f@)m3?i!XvRH_jm-!}k#40Ys zgiCyl+TorePEWS8r)r={jn0Q4P-pjgZf|>t`@C-OVz}H|0e!_ycw9Kc0-N&yiY_mU zS6?oh)N9zJUZcl@msjC$_o@1 zS)I!=86kNa)-Plc;cQsv5*Uh6VwrfvwNVXQ^QKywHvgszmMaA_#hw{YN^Rcq@?*vo@qk<{fpxSR*Kl#vzy(Ew6!4G=Pj3?}_T z>hf|Ub%~EHxKbChN-Q-ypmGkRE-lZXhulIK$$20Z_3TcOEaFWylJDc|{<*T7T|A*h zMt{9pcdHe7c1c_%b_xa$G*rknr?bmrt+acnJ1?Y0cL}bU<9KLOU`v9tAW7OC@0Hwe!OVmMr478dJ1_Yf3DI<4rih!|5#2^>X}~As6N?r)d zf^Esq?c`6Z#JQ-Op9+>52;PbWjewSXhF-*~YyDdLFQq@`bQQnZz6B#5BaSs>m3=3iE4d0y zzm(1FYD>*!Xe1D1k5Vrspv`=d2(;v_LdMBu2{J201Z!8&NegQ?ftRZqi}_t9DY7LP zah;UqJd(^(H63Q|5q`n)2RKWGSt6X^E7AVlpiV?dT`awzie(yFMa;WHh!|Q=X*hQfD3?VqM`WE^M9xHzb0R zja%e_ns|x`44dm}$1Lj#6%0YY zC<)B?W}njMLOyXd-zn@y!t@hV5aIXv#kM?}ds*L15C$exmUAyyzW_Ly?fZMssd$P$ zTO@m)S7j!l-d!SbJ(DU)5kdW{_#KenNHjhbpEgg;O!7$>bJRnv)1Bc7Gy;R43dxxSY;2aJ)L5c5`<3p?ILDUMO81 zPgfJu6~hhaCTb2exT36oGir?>A!d55Fa1((${>LM$$4=kH}wh}JDswXng3wuW$S%a zY6##M4-R|<`Gf)tp_xdNRCpw^gdg>h6egyW;TOtWfN+!}eA=+!aRKL^E3;_25hmQ( zJWeg1=@Od4sIXE))N(4C4v&D{{7<8wd7hF5R;Kg`_Jg8sqPe6?t~A)&xw2Qy$M}{? zUgtM_ng`LFMkTR7(PMI>eSiX?lOiwlO+^csdg=a-S)|>`fuAXY#4%GnY>TpO(H3z}Xk>u%WOgCm(lmCsW#dVh(pClX8HeO2JqP zn~x-p%E5bM0lfBo#we>d0`*%3hE%NpXahU|P}LVxm9oiT&y-CqP{h{ZNi4&pWu#E7 z!I|PVWonYCSw{XJAqUa>_QEBj!%AV@h`@O@+lK*3@JhzRq%L`?`GIZh*crVvCDjRK(! znb@3((T_ZEDoN=c?8u1`oYnGUL0dXpHt_@VjjE$`ExJuQS0$?-m>;QHf4n+U=pZ>O z>*C`uCE%GAHVk{aQj_P(Xdwi2G7SvxKQEU4MuRAx<+K}cPQ(-C7n_w~UP#XV2g(j- zQtqK`P_eUlgydA=%}8p&T(d9NW+bo8-U-s?8tHw#qanGQO`(b%tb1jiE+YgI%YT~$ zW5^^qVBewO1O{OuQ<*K0*n3p}U(5A=W)708=`p=ky}v|3GI?%rP2<>#HFQ4lWsV+b zpc!a{RgPV#nePVZ;PTx2x8%KlguE~P2uVK>zT%)#rWnOf&V(KjE_@YT8)1#-LWG#u zC`5pX0?zl`RG7nF2$EP9K104+mW9tm^J^72QirKiic;Y!SK4)j&qd+u3}s5CeGyZ& zWmOnj6gR0cbdL;1J+;&x1xyV0tWY=ZdBpyDg0Iv)E9Hm7c%iJumOJ$buBj8;0zs$<6V zyvXhJ4HH&kW zPh;JYDcuIH7>PcSLmMzm#?0>v_c&OHnC;S$n3)!)ZF`Npm2D`kN09X-YMv8LzWdkY zZB@mIn|1TMQVtzdYx{3Osd=Z(gBMFG$Mzn12CC{JA|=fB4u10!z+?@H2}ksM?1A^o zn23RH7S@DLMoEh)d+at^zdA_5PqenPum$!-Xh2~>((yga8;1FMtokP_xYF)Cfrr`g z;6gEaTRsrTa$OKM6daTlI0UePaGd18@r&U+Co(nopVF#>o-DEy138G)mZ*GZinC2s zn&G0gwes3>71QP*su09|;%Oi7{Tp-_JdG0g+F#ojV2L{vDG6_YT37j_=HDV!*PWSY zc1UTM8)VTpF>XHZ5Hpq@xd|6ZJZtDm<Sj8hD$=>6?ksf2;A!)RO| z?()hC)I$U@XlHsFAJ1oAO z-`n`TLNXy*WI>ES5hEdluqyV8yrJ|Il{-k(mS`f%G=+uS$RQ1L7L(doPo-`M-i3oDqUYpnUwNhoYJ(C~?W|VHimGM@^THjwG#NV2!n@K`0 zQOW8v9gMho99miuD};S|bu{>XU%e)XcK$x9$>j)SJ(*EExQb#&NY7}^)HUd4TI5WGoEn{d z-f-*Nh|y>(J2^aEHDmB>IP)J)wabb97uBMpcSK}a&DhAYTaIxA4vCBdu~cI@mdVP< zvMB`ahiQ_to8unqv+oc}S1O1DW#QkE-#61dJp4OR31r5H4VdxI1O2Y<{VLhIVT0#! zKZquj3uV7T0)|59OPnXImOzE-{#;Q>F(%3ZX%jPSSc3vYx5$zwWWE(`Bbdfs@zm#7 z;HD#U?OrYI8fK#WSz(iK>*BGV#iJs!!vGjJTHpCZ%ojpdwt{5pTI_3^#J7f9Z#&o1 zd=tq`SMaV8yr69U*JI}AEcmpxpUNHcyS8BN)~nB0)jTTG4gLxA_^3}p@nw%rr=Gr< zeJNc{VryI#tLV5OUA%^XL5`-H=Ckf95q5al_EMX0Edl+sB- z+O&vNp52m$`^#53 z@VOw;ch$_{`L<{$TXa^uxONaBhl$N6!a5wAu2)5H!?olX{QCM?WesJ%MUD{>Dp#a5 zly_bjYx11Mro(%fqrcrL>#tzITU3CBOQDYL% z1ytI4SmD9>9%u&Z*`D8X1U;amUkpNsXNR}K`udNh<|D`7%~wPdKSLHPat&INs9w&9!^z#5}TBfWj=Y7gvL9yTHE$mZtT6 zmN9ND0E-(iTRZjzN0nazT~!#O5xWz2Th1!YXs$XBz}>E*ZkAs&(}&0hlg0s%o$=KM zc2RA#1|u-+N$EL7{Q9VZYW;F5wbzvM1u}5A?)@Fv<+ck-)-J4Oaxgvb7+6y^z+TwM#f!W;R?VZxS<5jypyvzyED5| zouUKs2Tgw*3Tlld&)FSIoihlPS9-sT80oz41r1Jur^thUQ9zN4ivf z!>Z!R8KXK3D^4jNrfQCu%4EKqWEonHJ`Cp27P)ZD<`_7A2YpAYCYLtpJLvc~bfv3X z8n_d!UL-7a+Ui}|$)zPfjHb}q>}abv40<)HK<~qSYQc_3_lg-Mfz>aS!Oy?sDvDzX zOM)>jgVc00qeHbnqcUP{;Nzkot{e#(9qJBmU2+_FMG-j4fTbXGhgU_`+W!c6i`JNO zy*Iu#E_M3q@rC^Y{-g}1jW29n5jYggEtbIkz+`PoyVWo4hViAHUzA);Ti}=WTZ5c2 znD}rJI7SXC>whR9*0Z9fWXV+u){ZadrK`rbs+YC6z!!`!XLAv_T2&+1Z3Y9TRoi<_ zp`pxTQ|RsEadT}^E^1tjU+A9e$6-EE1g=6uD)iiZq32Vmq9WrsyS>Rg?y^QJjj}?K7rr`)XyauwiH2wV^<1tGs>Ln z*($f(FE>8E++P)eQ|_Dw266+tNS!B=wpBt#YVvX(>?g5nu~W5LL^thCiRo~crx8PT zc%LV3_*x&U`Y7uyD3?5^$HvJd^J4#+T zzO+X!ACHpBs4)fJIKH$W7J(a!*&W`66cFpVS0sckKwF?|mBG64IK8+i1B7T_!CK|H zXJL9C@OG^nH#$CAl!`{?QZW^2s&*gEv^YlUyj|4@thxq)z!LAk_-Z_LS-#m2@6{0R z0ydBz(BAAU$jv6~GYL=6-p#8f-DVQb!Qg5lLQXEtp`nseCBc|GBd*n7?Kq`8rnav*>`9f?S0Dl$sd>5m(o>TYc^ z6s&Uks7iJsy7myN)$Fnza|FsLW#lC*wfysB7lF)T;uzC4GRtyG7nC%YMuV}Fj%Yq< zlGX=tJNH7zfz=O@I+oet&%5*787Sd$>t2+ue9q3fEIR^g)+&Nh4$VaIPCLZK1xAGo4FV*{R6hV#hP61Cw~0XSbm%5OcmF47%AzbrZY)U6z7(^+))jQ-vTSF{ ze@wnZ-$8z8$?uo2q~KZ?VV-?4>t`SkY#D4XvlfqD?Xp7PAl?*df*ecl#6lHYXYzvs@2%6irt!GVkR))Zj zZrdhPRPX;`GG(cOWM22~pw|gh%e3P1eT-8+7;gnCmR@71*U-j+cT2|X@P)^&(hS-V za~q8QAbGQ07r7OG`VmdDHks?mnkyfSp$w9v`(*}sX|sFBNISa9tU^G0v}|0uPZsC} z>NRY_c^WwDpgavkk@43$IaYTM!!DjW`OetDpUM+u&!LG5IjNc5y5}TT_LfFH);Qf% zgaOdA&OE9-pJTCzXu(QUF5z3ErN5~y2_WuXQG0Hr^ryAuyf=$yL`!eRVY?cF8CjzA zhT0$jSuM85O21iKLm-Ca95N8>k>Z^<-t-biAOkV1QWdSw^(M_7wcVxy9Jkn>h0qs4 zG?)y&X=LOWH?QF3#?M1v1mPG6>mW#Gm9m<(rRYnvba_dn^vd#R>B7oHX@Y}#3u>rK zqNz*Xme6pb<HSmR>B!7)_90L3#=Qvt9gfO$zk{RCC z^po`6G_sMMK7LniBwOBI+u;k%@ z_82EjE1%OU#&OSY)o|DD${`5)QVyrL!e}y=WcPzmR<<~9`D18(9&?&HLW|h_WLE14 z!5q4dGbwkXRb|wVDs|7^N7Bj{h_n65X=GpJi$3uHYD%4=)uRC36ukWd~;S$`<5 zmKVKm=_F2I)Gosy2T!j}Zw9Wvn-;jXDtk4bqZFnun^qe=OyGK1>EN2NM%S)1+2wcO z`ddvTTXy1b)6C5*?pHb#z*XfDiB!C{Uhc-dUq6M+!q}@v(cimFsG_?K%5X&+Pkn{- zD0=LfW*Tn||Y0fE(;!>p+%f)^T4F}$^;y+2V=OsshYM5?qcSazDMvZZn7 z3;l6ry*YTtd@|9hnj;xl88wB^a1Dz)ClpJyqnCVjF`@w^6`zJd-TWHPrx|z9at-ah zI3hee!h{?2K5{Yd>@(maXZzj|C&@L=4u^90%G;|uFnrJ^DJX^~u>M}TMPsC3ILS^m zKb~YHfzbqaOu)*!IB>v(%N;;Q7XhPQCI;?^k7F~V%{n;hpR5(MyJ0;U7Z^hW*_M!T z!ZmbRN*6H&inQ^X_>A!vx2Hu?XPqY11`5|-HIPxJ`mSNDtzVGhY5v#C!KCIWN!tUl)G!fMCdU6~7MF+?ZKsZoL~Z5ck&h#l9Hc`gc0va}_hT+tfKY?+sw@xncbl@u-1H~lHF%D7y1 z=eMXb%)(4-I0_i>F1S=cU&1xZvw_vOniOx#?mf#NIb~OWCTFPqGviB@9CKYo#i?W5 zZLK>qb?jy}=7UPd`N~S2NOH?}#T(J3o3oVzObd8A9MpX|Po0@6e*m}dvhA&jHy+c) z2wNP&sY4JM$wB+#UHVVL0VhMt_RZ#$+NU|BHX`e9YvFylgUvI;9H!TlW+N*eP>j96EA^nWSBQ! zUDOZyi;*;kbx<=hX9w2&u&#E;eUjPXs3m|!~G&AgfkoYb+&aVyUMeoht>_hao6b3Ht{8AF!e)OHh>ss1VV={381 zoYW1qLlmE>{s|EdOkW{q+GpAW=k5EO6Zr7om~!+yqwQ~j&yQfn6ZF;FoS%HWv)ahD z8`yuP|Bb!K%_f^UAx1b+b^8WFT_4s3AF1lgP#LSZJrv;GuRcz#HUT#&;s^ZyNPDTwJS&XbTa2=4~c# z-R4c&&1G(M29FLmTF;~1jh@V-&5hRaXmz8tJX+jnhzC1z$MOif(Hb6Ho;a09y&Dbk zsB4A+h5~g@UBW#_f0Ug=s`^Vt|2?nL+yBk|nSE!Jm+BxPCTrE7FO__g6MfPk3oIFv zw=ceJR_`}hi5k&o5j_#8yTi=6@M?9m;nf!yr&3w{vt;I=cyP|NKV)p$j5GU}sLR8q zF0o1mht{y(w#f7Gqo>YdKl3^!%{OxZ5FBML`JRWHgG`#zHvL`NErUlQ@hchrqP z#nG^tI502;UtzMl&wB;u_qd1{yMGSS5;Cp?ec>A#gQ=U^nL6P}K{bUT9*UWH+76&(pWE9H6NhcC#jxc14&IZuFXNXyx_P7)OvqFpY%+DXh8wO9 zN>BlCNFd4EciYDv`|x-qZy&LNzQeI>=;wfz!ymT`MK(25$vD7F z-PFdy+Pj4~RUIoYeJYtiTvBoAIj!I_!?VqQw)@Wx|GCUQuNQ0dU&Sx|S20olRczIN zU7vpy1NLLZmHk(-YX7xA|2mj|g}oU7VGs3&5r)9Z9JbOGp;HRU`7QWf(ge7}yHDn8 zsqd)$j(d|(o%znFkd^w5+MkOn0dvZsMkX@BECD2MG9A^!Who_{vta{`w_k)HyoALP z%L#b8<4)z=E{~XDYu@C zy8CCjt25Gdh@ZGWO)C#a20ta?r4yZj=gXMwTthb!)KAbRlUjeo-Ti=AAy@|dYz86( z`{y6@iSrMytDU>CwC$#Me?_sFlj7@c4lWV3S(j{TPO7$-2Su8f*JNGqW2aw->Ha0j z%V#7nZwU8YK9ha?up!PXUsIL^wArKXc9K6)SMykJAL?R4>fYo})U$JwH)lj@Y67yZ zZNg!x1zahWU~e*t+@*GxB(@f$f>vbEo3lwnf`2QVBrBIQ+7rpUG}O8EgAQNeMEWMWlQ6t0&r~1&s!2k| zQ+SJ~D)yl-Wm#a(9cQY)NoiU^W2dB#PwHc;`d=V3R_2vn?+v=FvEl_A-II;Jis(e@ zdLcJv#}ZS`d^Q7aQZ2mOU(;IuE^)A!d#|E0)jh8uLwOGC_hp&tH3aOEd)oZ0g!qy# zAOIf2ZGNTqkc+@H#|ljI5tu$gGYbT! zZ`+)V!1Qr)T8rrztIS zz85lNemfIWfam^ZUaiLXovr&n0K(V0hS8bR1-}3>PUbT$6 zT0;e%ZPsq?Au97-jP@T?rCG=smGwJU;8p`>4<^)uI&XM8UflkEx+_dN9VhrK1OUr4 zldXG`nR?&7W*@UPGc$Om`aU#DlxJiDtX}Z)!Vw*KSu19 zJBg7cN0MSf*A#pq)wY)i{(({ppR~?5W!bcj>Ht>Sd8L2@y)x01k)4JS-kqhlm2}wb z?vVJ+^1-hB6$mRJIYfa5X-Yc=_EyMhmT-L^8%{8Z$iQDGL^!0=+;?tm$YTqk#2mh3 z{b`x`PfbZqiUmF!Q>Lsq`g>S#!}MKR&m&7AgUZ)T$a~;HoM-lTUIdqKp|+iqjDgtj zXR@_&xitm-j9=L>N}&T*kt$_tNzMFImLPl zUOC#xCw+g=AWklK!VSyYn2SEK84`Aw;6+6S3>T6$+R5s||XnNl2*zL{l%YsSAH|Gxb zEju1{etV=ZazZ2-(-o;0bl;|NmPqQ%1hsY-gdwV=Cog;hMNtJ((qX1|CZZzx5vOz9g2seD% z$V$nDoAPR**5{18WTc$B5!2hG0tE8`9!k-Oo(yetkk0y9(EbwK!|KkA>52bOn{o?)4%L?Pw8;&r1jQ zF@f29vy4*&+u*YxF&VS5AwC=OewN1O=zEmnUKWg_+Z1DRA4yvdB{^SLbg+1x_%SmY zdOHMa8*S=_3dp_TwF6f#9+<<%?3_v*FwLoqu71Aj;OOdSyDHdK8ZL|c9L0xUJ8m)J zb_yz%Wk|dlE~!Nuyo|lsEJG8-dSV3~@^+%M){=)+Wl7){>`ZKRAIUaCl|HN9R{Bo9 z=hnA=?c>E2XMe)twP&>S|9j1kG&S#>eV|%@@1v&HtIq-ZhEr!32rLdRXR~{8c+BDv zi{GaT$aRc=EBR*ezBfg##yXnsf>v^NJ>m%k&+79n$aHe56IA`baX#oyq z*GX^T-mt?+$nL}5?+{n_%|4-2+Z`MQM)KskT`Qzu`4O(s#tF({EZ8`+Jv2NqQ~h6i z>7exNAXtanS^4;X<^JW%WRPppnE7Gs32i&Un9HG26yGg72xh<3tQK|tyltw*oREl^ zXM>*6>CY#UrzDbBRq}^V<*RD=gWX+K&tI57CzfWTn14J?3a({MMO#?;ws>z3GQ`qn z7@ZSFDB`Vx%tG9_o(ONZ0b0|~yUk{eM1nDJBSd%`Eu4-8_;At-je!dj;q_U#wmjUj zVz{+}YqM}0^KgU3aC-#TVBw@(^UIYRi81BY6X69dTuUCVy%UsC^;`pz*{*}(b8{KO1>(0ZiD~8)BxFHKSl!x<*;Ytu7Oo=?*H;X;UU01zP6Cpj-|k|#5y914xZuPbT%G@#D>qDpS83te@^H(F;noUn z&;1538}o33#c+EBx5>hBWIS8H8viv{Zaop+S_{{bhm-g)hF-b_*KXm~=ixRL!wm^8 zY~e=oaFvxs`Gtt^YAjsX|0wz^hU*aA$QIMywRyM=#c=(C8?7;dfL+AQ40JltS0 z+#bO-Sh$j^T)8#=Yp&dSBD|o5Ystg47sGW6&f9F-yFL%MsTgiZaQzl;Bo9|P*;XM} zZionPorMegA4Pw~a2VKTs`ku`tx6N^=lK{ z=zwW&cOGtCG2BMM4OzIMJe=ph;&Y<$`{Jcb-WQO+F&FQk7W0Y1l>PW%`$%&M!0m^b|5Zw#C|uin-70yiUmEk3sTo6GtIcd%fp% zp_)t-UR?C2d`_8yoW7@=qVAuX8-J#12G%TP;>`PpYiBlkde7I}4&U zc0qK1?&Ms~v>;(~Hmc(YInQ&yxnHa-;eIxVB#igcz-kYPAeO?=F;$KQD27pGTl3{{ zKVl{KrEk-62%Q(MD{SejIe2R=L87I`%3-rhGiu`nUYMH z3WRM1!m0pH+LBHv-WaaOMuwjx9$;OE()n{8N_QMQGAEiI46OPXeVfO*3pS7)50a(u zhn@uIk<6CnrXL!dulhENf26etd*w)VfF0HB_Oto2b1z3ijVWu{0B$;88yPOs9bU;b zb$|galh-0>*}6JK&|%^d?jt?-sI-zLSX8m2dO|nvWD(5vw508( zlrg5|W7o6B35omzpkymHI_-I`T~2z!(!9iUGWR99Xs?*gQF;z?-)fXj*jaT3byT&y zHbyR$`U$!#wE)$k%t@`(kTou%jzTklX2v&`Md87|!Ka=u9c0GC$F#eP#m**b>vb&iq^8&hw6SACN`r6g)Pf;1UlTuEO|G zTa|O_SV?!k!K#|`pLmpVS8vyXwP(-7nG*xOaz2c44%bvzQ+jSkU@i1`=fOSKU^p0v zJ~_;C=;rNaIi$h5iyAoYH?&ZeXPDlvneZXSJ8kVmv(xrnmLu!O?zFXKsyBcH8O!S? zsMbnUfT5&}04}RZUIqeL#k&LD*99qEo9(me_yXW%jH%1Qe*8n631w_u*1}&ae{D|E zRtfyExp+UlWK*LdbW)$KbKGl!I_IbjhzFV|y^JjddzplTdDfg-XCA}wWFgnHdKl{d_kvgHcKu-V%x5^X&A_z9%mpVtl)>U;B$IA3!*mgi%5=6W{WMN^h^dcHTIG4e zzca~vCO@P^xcAMi|IK+5GbM20^gh@vPAHU5zWUkT;Wrh(2=nz25d7jgSX0zdz3 z4W22CDrR^hOsN??Y477)jvQ#_y+6`4(6uX?JUEt|%RrqQl(q=J*jS&=4ax9?cSwT` zo%{0DsZw>ia9l7qB+{Q%*NjGhJn|4z1kuU8SOc+P2 z=aLH>8Kq^@)G}_dF--;2s%IA>?TD45!W&I z{7k!KE&iW7FXWmZt~+vSx+cleol{8e;PhScb31^SHg^za*7fMe`|m4Atz;Z238RgC z*QBDDCpMPmHK2~!Z?+yYd=KX7@CK`!$3Ot91P+QbQv`w&0#m{p1Hrl2CprD>%FL4Y zm+;UffyIBUlx^X5C*3a+#?woj+nr<}x-8N30`9%E#l81cG2&*-7;hMmaAmx6|7N>2 zKEXcI8lSvqj8E>7EsPkY@w6eGl=MrLQlB1)q|f zYp*~HD{bl;DKea<`_ZO)W;=##;QOOJ&s6b-MHVL^TJR!d{4A8Bfb1h9raIX16*-UD z?0w^Z)VMd1ZW}Ip?nZP&&nJJ<4|FVf0v8=NUS6}LcCB$%z|}B-tOOlqyCBeuX(@+0 zIJ|I84eq0?*o%>FR_wjmZhJ8;9bsnC*M6VpH^UFZZ`>PYcV$k$>xBab$v&0LNq0%a zaVrGtG6>jVeZrlT3_81IK^9!+AL-MGVKakJ!#`q0Z3wf=R}Gx*;c+u=klo>8(^I%fi>~qGGsQ z(HxU-*9e37)0L+4qa0EzZ_rMmTeX?TsG@QzLV{WP(fWgig zPUFWGeRSzZ68OW?{nENBcpj$nqfozb((;#k)pL)=d`!6FX^M>u|+Ah7IMqu(2T5M~1-A@G)`Rm4+L>9L(hw%V)K(3qi|nhX>fM zV`FNPuAjyW%#b!c-T_q+x#}P5I4ErPPep5pW7pyz!+j~{8?58zAp()1B2TbH<7kNc z2hLbnA1fECRvbrxU8sTD?w`eGieY}K89VTn*qZ4)!%1B)L^jAIX}74snykM{`^eVF z(R+7*@6xMU^R)6&aGJ&MgM3}9=7g;ozAyy z4FzC=_L7;o*iHy`ufQzv;7xCekzW8W(?-M9+Guz#aOa%knaQRF?(7fTJu<0hbj4z5 zbPkhd$^JRVPv6J3=rj`Cqoll_z`Hr>HGLoWL^zyhw^+|(DvhzjVD?y{fQ!CxA zbB-@{NA`M#*IivVeP6TtXEteT3JGsn9x?ZTj4WdQYdeT>qwR!G!hub+J?>uIkp~?v z0KHzI&F;S8)_Cg54jmGzQ|$`<&n#d~JTF^8!_~X!^NUN;(RP}|S|Q75zEP0xN0qN6 z+B&S*j%5KOfoW+yCzn4MO9oih9Pd7$vspc_uUK5Me{RGF6QsIx070C+uQ!Tzt>*nS=ini3sQerso#5hjnk=j_FragL$e~~6p56Re#_X> zdid(>dbpiE<5xMybf?+RUpe6K=QA3giWp9VE!P+i0BfjYwzXevI16Jui!l_2l zSi3!u-(#;@;3)p*_v(J!teByLu$6^p$a|h^(*oG{oPD!7zGKYb_Tmq>xzBEnhq zU+zk2fV4&Z1TrQfSF=R7{s&UzX16%H##c8cozF_ZBx_T z)vpTs4$Et>g1%>25PQ!UrQiUy>(Z@u)y;B$hj->|#EY@MfokeVf(1pPZ=hFTp#r}) zG7fz2hsJ{Y&O83?bV2U${yJ-1io1$Z$QQ{^`~r@lfQXj)0Wb9FaVeJ%z>12q-gYL?2>NG!2xw^!br zbvrV5d|_WID2%I@7Rcp@nzln7DVMzqgMn2t7!W~24-=e$Nc%$!~cY@O+CNSVvpKP=-J)6`di2P&#uqFxg{(@lS zmxji_6jsy6lsO|`=84UHSJy#-AN#YavBUeWA-gfq6APemEM&kA8=vXZwYi4m`yAj4 zIVgG#M2-RYOc7?P>T}3AwBM(;v7NrdyUPrnF*R77&pPYc=qrD+_1*g^jTpqlfcL^D z$2IiKu?eXS7tq{bkmgPqkgOXM8o@et)ioW1KTxERsp} zO1GP5R=XV>XsBEAtZL!bJguBX53a=wP3tFfJoigi%LDQQT=0#%I6dMAfjc)^4vBjcnMAy8qlS%+P6# zjpOS7?V^OhrIe7)Giu2@xy2-wN#o9D(Q4aQGfe5HGne_p3i~FLiS;Q|FpBSnAPOeO z_tOj6$>ZMp%;e!uT0Qb#7x}#LKa<-e2rtM<|EZtW?!kfPrcjV|{~bS~cGLFYSKj&; zq~}|Rcd&+SGtb}Dw(IFu{L4RK6D?ONz1paec}{Y{#<=^D#}?teC8b5zR!xr$%Y(5% zGgk;i*WyZG9e?Zj+ko)C)|ht*EC+*XSUi^PC8?tM2{-ql>2^595b`cH^kME^vNt!2 z#%8=vl@W5vY{x4>tN3Gs9jjQ{dN+wY%wT0gKk#V97F3+J>v8(fl=fzmdb6mst)X0L zCu$PXg!kAp%3Kw!Go!XB$u|m;_>K8i|Do9=U2st(h-l+)wYd{m(^jdCkLK*>B?qcR zq#(y^B^Kr?@$XQfnRivAAAaHhjVLWj@7Mm|1C{m23AqMeqv^M$gnya=LHZaJpDZH% zJ1AC-Zaol=uFjW|L+j_H!82&xiHX*@o|svbL;{zb0hiH@H{Wm6i6-$3Qy%8ID*QB| z!$co=Ng-;NFbvYx z9jJg`Vq>|Zl_HVI`+$d04Z1C{Z+q6DIWV6Kc)4phc>i;_?V9s#JSX z0+sQb$FuEtz>ro^l2eP4P)238jK%|%QC^flWwhtYxa2@(>^y25u7cSz9x)9sVsz_G z0y8J)`eSOYj0p!CB3BkA$?=@p1JUB?dFUdZa~q^^W~v-}9UWlk>L1H+K6Iek{;fvM z8(Z5`9?N3=+>ak%EZkd^g!DecxeXSXWtW#8sI=Ajq|3*$%fHey)Au39N3&vl>znz^ zjy+Ijr@dolw~!gjw`kLCMy^=(pPZPLP!@N)Hp=HzNWxC#i~0P2bD&;%^vE1)iYogl z@<%-Zn>WWNQGeG+{mo{mx!YI!5xmE1>oe6~`?UdGxSj)@r8*A6CLX6cfIi$mMUGQ@ z8J6oH_914YGgEyfiF7K2#VJaAwC4-fIqsnM!ym#7%THjFhj(}C8vxb5uQp0UD*=cTq{AB^GYaIWn_PiliP`kT#!2xV)pN)ObtrmUn7NoD59-z z&VXKERO-^{&!;D`#?QEq++!Zw+2bsYG=4*CyCuuB_Dj9H zJHOy=j<~Du6N`LpDAK#Rt0nS&wuN`@UHQxc{!Qck1Jn68uy?}BXY^7w=nf2cO{VBYH@LJQW3Fxr zMA0{>HESGi{PP?wyK@yG@zu8#(jQY`p_*1RztLBEoU4rHM0bUh&!Q zck>SPwY$s$%51ebl1w92C#o4_sR*bXny3K~sbARLHT`@?{g^Q@1!6M57@G6mS{N9k?ON3Tna^|CYit1&CMFW3qg>yo#B$tO7!xVg5QhGl&U=(ui2mRwO`7VO{a z`l_QKC9|<(^~QxK$8(!H_VhUW0tE-iH&H~h`*Vxk{}6V$UzHkgC5omi4%DAY$Ujh@ zC`M`&$y6?v5^}k;&}ugsw{`<-2AE{18?4qT6Gl_9Dl6K!=I13t+9o`erOZEg!i0x$!?xG|1Syiujf+lPh$M|y*y2enX z_P|7yb|)q7o|^e0*X3nmjfN-#tM37AQTK;JTg?5LMcZ4btK=j%bwh18pEK2O5+RCI z$eH#o$YaB$7apqX$eH)T0Kwie?XOQ(!eB`zV6`=ZnMv-PxQNdt(_`d`CLV4Yg%Rt>^~u?*9#INAnn zX~U~l*=Xi|7blE2D&3-oWvo>eOP-$BCtX}nvr^S8+m~3ST2~t8qE)wPY{+~v%z9VMnE=Ck z`s11sjLJU5)*1F4I;nU+2kWo~X#Bood?4wj*uLceDJoVKN{Cx7qxY6W~(WQSrt znX34)rrhjC11(-ARry*~hCl~%Hj^NWRrnu|HusDw9<4Az9xavyY7ZqGvF^s`HXz$v(=BW*-cZ|5iE_~TyB=lF+)oe5TiKA}S8mzEW4v+5Rc!U?Iqz#5L3GZwAaXq4p#;IlSAgVt{HL%1`1^_viQb}ZJt3t#ool$+zN z-;y!x)UwhMfP_;mqYMV?)L zMvJo8`Zs06EX^#0$dY$uqN#eH!4~TM=c8b&z`siZsw=?Ooi;y%t+UB*tvNr73Cs2@ zt_{K`5O8CXiHUp6uq;9z|i8Qrs$2`GO?CV&1V{t1l_KmdLQ|$Wb?>Z>X*=98#$?? z8RQpuBOHP@3y|Mvt7DM=$WINaY5+0F|FS|k+-wHD26?yex^j!qt3i%K!koYa{tx7T zsc1;c{K?z-1k41Fnb}c|)COVtquTdAR6#v_X^BxiiI6|4|5s!>3FKxG{pL@|!;@>^@IN?H&n-)$^wLVa)e!7I#5c|Sx45+k4c zaUo(}AoPD?ch6J%EM}fjCJ@qr#K;e9Q30^KlV^=(cQ;%~Pe$tyQZe>|2vitEeqSB?E{y5W4^7ER`H^7Fz@Y@ZJ9%7tGP+K)2F%M<$% zvQ=xCQ`&iHIy%@=ev7Q6_#bL`hqg7gY=ek!_==LjA)kb)Ns zX1*hDFx;Uz%59NB-Jw~DhA$CnT(~8I)otU#ejWqExFv`NGv9H5jefj8`tfb@24EyG zsz+Nc65lYJ-uU-Gf*|XlXq|wJxFtD;Duo?iP{e)7dbMnsaSEun4tdLyu*EIS&9xG@+VoP% z3+vM6TCF;D>6L>)8z&K(Yio%>{-P?pFab47Z(&xZHlI2-+D$mecAH%xqI$}J8o-zt z{q+7!#IC&*&QzZ#o{=9k{j`+!nL%>_`-E%Vg?(}N$_>NiszLAL%Px+)6U~JoJG=}2 zsD9y6+xI8jLrt9`$w;$23?GVXX&>lOPFSERILi;#-OFa!^k);v4>mV^DVPW}zXi?u z#F+U`8>A6WesK7GG8>I(?NtKE{_!B6kiID4XhQhISFE&W2F}VY8c%s?&)gHKv8R8A z-{tKg3)g~$OXopW3r2v!=g6+EgRm72tlm#eIK&ojG%M%8>VCq_?yn@w#Iyd`{&bt- z_DGoN<8!9^St7F1er(!UMFr$%6%|G4|mFj_RI94VLAG4=W=p{EZW5tfeuY z*^d8|de%&-GGEM8FNc5&diKvYL({XX$dFIZcOI3s(FM(@dwc_3b%V5c=-7;axEQOm zETco#f2YM-0e^E+SJdu>yNOphZpGQnS#E~O?{PPu%M!aEHRVz?ZIIOp)@Ct|r6r@^ zaZ-yp(Sm#Qtqrpo`)5|hIsKCPMmrS}ANv<|F0PVsRsGs-^BJei^1Ki|1|o~}q$?P1 zuknY76`j;DyR`FiUhakO;JqpOBmQpZ?`i)2!rzMw)*ry}5KI=_MflNl^scjDb?N92 z!~oOL9b$Z$Z*)_7&+=c0!96X0*0cQf5;3_K#nr^wSJJ0(fL028zL?)(P1cI()=AQyx`id zAiPz#GKzXL%eYu{5*ahLynFwkgMOT5Y)hjJTxX~geuom9V1a}2&Y8H6Vl2+r z`wxDdABbNB)kg}Qu|V@H;6C#WIsA$zKg2-1COZ)GAo~hHPK)TqQriiP$bd#<^Ab9J zA13hu>~28i%ji!Im`|{=Ci5jKeqn^I< z>^L-w4-{a|m<7lfrHLlf8eN?L-TcwmHOBBNv@@_~7_^uhGSwJtD9X83;5FHKHf`qO z0S-fq@)&WNUh*#hO3Q&pM_GUI!R*{ zH38n~&JM z%So+zB{>d>o;qxu&iNn5#*GEBlY!=gp643O2sCRl_bf6zsCc`9t%kMa+`x46fS&kI zyag&SdAuWjpMUT8MP#K>K7Jb%GWe=&-xlHDUaht67!^p|YSQ@3bp{02o0yDWmOHU>R`W_ftTzUQj&}6&- z7=Hl9Gr&2Y5$~_p6%46P4$z_IIQSTOk!N>?+utLb(BH81^d%5uyo}U%FO})-XQE*_ z>I~>$QSM$`rs2*Ob%J#otL-K7b|3KmXDPzVdvp)R{Ctb>B3A-E#!YCBABWG$&Q(1F z+*eU4ut6*No+y5!|F`f_G|~S544?W82{(s&$GkkyB_j;HsEAtJNfUF z61MnIi>($;vj*63q~UPS4`~%N$8q1()orpL(j{A|6(dG1?oLI}Lf^it6R>T%EXD4I zYtCSb7{*Z_H|(ss*^1m6llu3JER2CF^-A#`B0(m(DZ?j~5HMVmq81cGx$67+E2 z1sF?Y(CZo$v2xX_CxFY@dQXFd3|2hs^t??ozHLg`I%4fNiMn=LAgJYvvfvP?$-<psHo|wK<^gFStF`V5>dNEpbjV~Z1kZyZ7A~2f$0e< z$kWpZwL%7wQq*+eSbEwQxN#R4$kI~?;t)M8_US1kD9pM$%M1??@l*%B~a9% z@utc^kK|H1i7Km%+3ZR=$)-{<#Pmk(bB6TJvJTc08J<+stIR~?+?vTvW zIAQT&bCXj|+RQfl_1gcQn>==hs(8~y8hzaM6u1E&*0kmR(TKfqYualznx#Kv1Rhx6 zF9ZdT#s=OfV~2D4HkQ35iPUE^v7K+?!05XFTYF+n4_>>AAGK$*ZO@~Ad*NX{)n0uFxXk~sME6_Vw8`b z$i46eYPNhdF%3?zABHJHG9`5_n0bou^L4jVD5s&()--42LC~g0q%Ea&JJ;J-r^$-N;K)A)cb>XRyWv6etP)g znEM3&VBok=&TwT88=b+`{hawqCinzh4G%v__@kx(`jo#i4;+qE2@h}j$Ig6_danA)si^ZEOX(jZg)i*Ja~ zuM0`U(e*9}RNOW~X>~H!^=7=RQHL!-Sw_<{wWl(2O{9Gl8IiR(sR$I+-+8k8NP#~e zu4{4kqK!Bkvv>EANV1B4BY|L>yhYDwXwe*$71*n#o3{=zI4EZx`F{qZM_OiXJuPDe zIBUeMOwOfqRq333>Cb{LztsUe46%{c3%hHtk6&&_#bYx>_-Sfx}}>R z_Eu?Rr{wB`19~qnJ_QAup`Y1I%v{#u!m5nOnP?hVI%QG7Nu56eh=pFX+i7}qF&EPy zatU?<4%RA{9b0(`J|;6i4SRK}Ykl}nFYbiC6v z)LDgC5C|U3K>;HIkcVW6ru3qrc+>VJ&52Y5=>$S&)RE9=H4cd*Q!G7dQ7w#dB-XSW zKSd^!LjqIEBuK=1_Hv`4hkE9vMNDhkyQY(0=QVz56A;S)3`{wgM5?q6OZqT1=nwAY zm?IO=0J2LOf5a3LZ+fuv&_Iyfg9lMUX<*7Bc#&ZMZyw3zwzrB@fnX&td?)ArAfN~B zebq=aMk<8>u4SMknK)Q4#FC#u=GhX8_w@(md7QEb|}_$YmH z5nW51#^9numL5c=> zhCk9jlA!Bm#ZqUD7RnQjD}`I!Ju==jh4d$gS2<0Ob)pVCgbS7&oIz>c>-lSCf{xos z9_qNQol8|v6$Lr&;ZE`prb$|5iZfv|I}%6Pd*|AYV-nhmD=zX^$HC6<$uK3BJjh8N z9)rt38~MEU9$|(ZSm3j1K`c3CZ>fp&mlEnU4M;oc5a#ubP#s zG~7kUv(=Fa);QDXv%i{-OwBOs>sTs08ZT21`?F%*byvxGixFPw@;{ez2e-cxXLHRd zLzEn3l0`(D=)H1~#>Vz}sq)jDX?QIEgZJNxNiD<&{nKm*LIe*dQc*(Ny+OVpM4BbR zL*CtljEv2_@rIsQC_LodN^Bx^U1hB4g+R|gGw^cb?=zz@E~H_??f$BrZ#yUAKx5Hx zIrGZCeTHmlN9ZE-etJK4Qc)pjqMOG?s@soE#9iT_^JCUb=UUW9JUyS!Nv-Qh0PFWbS zVQk_Pxq4Tl0Ed);Sdy)pLsVgn8&z?Y#O*lq`5ILI5C==H7~5T=Yq4O5uEum16~6Yp zRd|8v>qJRmb34x}s4;b=(Tv0KH6F#EN6!n4-B-Cq8&fpldqq>Ac3Y7AeOj!&`}3PJ{h0Vhk8i>-T;Z){IDlYosT;KB6KMO@Gz~ z6Fb6H_>hzzNkVgA7|3m=66ChG)-ZHfN_nl5M1%#6$*S7DlT9=`-eM8lDb9(|Wj>e4 zZX4xoc{GW`*~t|~`v9sBqv<_}i)6$W5De=AH~t8DGoEU}VWI9JW#-f>$4N@)eIv#N zyD|6J!xE|T!_e{{GruKVG+Yk4hPjnX4i_5uGwmRCrTIt(NRVV4SfkZ1WdlM@l>?I; zG-z^glml1#7*z&Wl1po`rSiUnq-i<`Q7GFBSv2v9ddIRAQzwuvXzH!J-x)_H=bW@R zPTwJ+FQhBX34p55 zTcYEsSuDjz6Zr7u)<EGW6o)qQu4BI zcKpO$2(_Q_LlN1jGjQW{MvkUWGjKGAB1$n9Ml^WFPNwAnrcf9JS63gixGIsv`h@Oc zQbYoudT6ZiX!hrl<%c5Tn3Bq3s4gY~7EeMB3=M`ZS`2`vB$|$1Toq3qMh1K_aZ`^> z_!%6@hk^{^$)m|2=59-{kx2&Ap0Z=W)Uq^zSfdJ={197^(Z~{Gs43LP3;-5_vM-pD z5Mh;?I>nO*C6c6@kdTxdOf*eVZE&wiTc{1yKPb^SW%1cmPtk_)#u`f(M-aObl6#5fSVEi-&{W8f5BBIQa#S_UW4v>-1OG9|8D&!x)e>&ELyvQ8%nznE1S2D3XnZza{)QCMc4SooEh3&!4k;$HkorimA9{!hl6-oEz7vMne40%!|a z!ya?#G*ie^#ZeZrI6B7>-g&*?`xS&JSp>Qql$cxDz0A~n8#Vt3*NT0V^_!V}ss90` zwcXMJ7|W>S5lp^DX>f!xQD&eZn^D)_&**HMIF{rpor%hVsWfosS;}RXsm?CHI;u#4 zNKmNzxX-W7p$;=LvVhi4zD_DDoLlS_u!y+(1SUHh=*pJ^!7r5ab7^IuZp+J&!1>`| zpza~PI3c2siUdwt>;>u`R!oqHFIN$B&F+^2twV&q5{w2;nmrh(+iH^5zZ~Uqw<_Y! z+{Dj08~FL$I)1*`*Bm(MirNsrtjYPESxXK(1@0V6(x6kT_x5uAw$+&2d26ZExA0WO zF00dT>&g1dvh#lE4ybFrIDQA}x*hPS( zG($-m3X(i%lLRj{^#F9hrVD;ysuCkZ)X{dEn0HE2X~8cat0D`K*E*C#snS&i>K+xA zz@l&`(5DrCGMl6}k47M}X(_W6bWb^(et9$tF+&MyLF*s&GV13rtN;ye-R&nf9r_Fr zlsDzGQ~lKY1f~|ZzD2daV0$9a`Zj?}>#~)h^Ye93mG`TOC#xc$Wy)7+Z15}C9#SRK zil8!m(k99hLjgKe$oMAN^cMX8)TA8B{MJBHwy|f|sc8^`ZB|1fb&M*w55%B@F$e&k zIXbtk6IFecCI-J!U)-?fKpo`!zigh(rJ#AE(LmkoL2@zOwu=f7an1%+XPCFot>aU+ z?{-imql8ae`1Hj-y>VGt-I<%RwV+E4()_Me^GL(^0zi16Lk*$rhe#KD>N&79<*PFZ zUb7ojWC1@w-d^UolM|=trpyoZXz4q-gMi$7ho<*yA5{hx(*HSZCg@qGpL+cmL$`1w zbr#|{#;p2j;{;L(=}G8s-eTOEsg5#vkn<6ynD$i!dbTW6HNp!p-*-|!tqp16!f`K2 zDJ0JMYYS=<$gOzg7~i_E*P0tX#^%)}fvYQ?V=eh|U`1~~mE799kzb~#>aE&sPo)&} ze*7@E*|OTthCjS(e}*fBm8hHVQj$%HcEE_xbfpx-2@dU5>Z!{JO13Jm1szV$Z~F*w zr}}IHD7VAj)BE2>If=2Z^F@aV_SCC;S)3$0QO0H?Q(tePUCDE+W;*V-l$<-igNtT3 znavo|bx5XM`9=w)Ejkvp8Pj+Sg#;V&{iroZPguegy1%;)LnQAL^W~53>WcmUW?S2> zYDGIh%(n)!nVx1cHEn(m`F5IopPFa${h^<4zw-65*t2{Hml`@aCseIBU)J&~{9bj7 z9w`OAx!}{suL(ael*Mnf%_6tOA~(@?V9d5SJT3S-yyRi(uNcUev6S4E8MuRlXGAw*-d-xfHjSNUUd zDvR|gz3Ww2>RYBv(ORbZuh>AFt9Sz&NUWwGS}Mtww^rJKkPtf#udD|)(lh5;Xy7O z(8;gqjXY-bwcVeZUBg!yl(384by^R%BYqrOnt)jZiu6o%Dcwjb0=+MrwpSmN{oo0C z`tX?jP?P|>BCg@b=eQM*bf|PPE~(B8`))%e__|n zxa^8LFx8KJNbDLR9(G+ZR99MZys(jsZWIrPPe(UFh#Png>FjHCH#8?sy)1AZ>^$1d zUmt&K`CG@|dXdgD0kF}-?w{U5>F5Tvoii-`C1vJ0NKM2Tm;~U~GhosR#cK`kwljuo z64r8i6CNTwM0gM3J%mRHv*Ga9(yspm5p^E~Evbr^z=sO13lWQwoeVCR)viTOeqZnC%=t{dTxsaEIAvqXnBlq8f^Gx-2jX*a# znomcxKw1I$x!!fcCKWYOM~L*iENsYBf17k(BR)~$aJ2Spf)Fk7UgkOy#WKm-k`7kv zlf;5XbEstH92I)AJ&m<;i^kSMmiySRw^lOZHms9j!OG{_Ab5_vdV!Oc?PmQF9^}W_ z#81lxep=V@)22nvhGmjq+={t}Q(favZ-^YH?@IRud5UK+V~PXn%1iI_pV4WpbT5@M z)IbB*iuPJ~p_7d?6l*rgHl>GZzQ9DEWZ-+MJ7PJ8rh6sA@t?$D|1Eh-h&MCv+n|68as$7EP-}MJ z(~5=N-jG2y*I36>RFF#R3tXFIqPM0<>D!@thcH2jveVByjdk<_em^5|#JD5l%a_YXvRHdm6{RuX43;;xOS?^pVV`BC=6#F(-k-%Q~5 zey#)d!&UtzLjnTdPi#SEO3ylB#aVmEp&;LEmdcHS$ktTU&vICc`xR%6L{`2bbOu)8 zLN()*8L1;Pu<~^qxfdP2$hd=!^2t~SunZZ0=NxtX{uJy$wynM_fl|j;V7j^G)XQTQC6zFJ9!S7 zXf|3 z*7CgQCX4-bbp3k%ByVlBE92-O??aHhcRcIj=q6R3nEp)lM{V9EChxNivZLJw#oiqc z=onWzTCWu-&?2d2{59|w=C75%Hpi{FmOOcdSXz{CI(n`h4gNI->8J)C(HgO8$6Lp$ z>XRpvOjp~a7Sv~|`wSjaLx4!6zcfs*r_G`Jdq|pyetyo` z$j|52^Yg{E{9N9RV!_OxR?(f>u;~pmf7U}Rq6cz^k)ZL`HAEdjC%KFbgSHTDsHvh2 zX2#UTy75%Sd^O6g?mteC$>&q$O!Xn)ibJK%D9>ktYB3kq)YUgzm{SeRUx0x?XL@Ts zVF>qh(^w0**aB7=z- zQpYEyo?tcaRzIk>oXJ9IS9NpaiP)PlLh4U8T>t~X`U<1^rk$Nf^)#K_HLaVPIOh(rm`t=OrT=yPE1eRif%ay!#Vih)yjRBj!y z!;_kmw`nelCodj}rxLJxhRV0pHv0NkxaZ1LyH_bt{mRia31d5M2kqzEE#}*ftMzTc zh?A`TJ>NxX4L+rz&Y!%@0Bt4%h_%7{k48gcUnN#l4Mg?#m8&~>z5)EMDe1T$6QFJ1 z(_n0UmN2 zz+<<4?6D8eK1S?g)IQA0FWIY`z)iiUoTh`iPc%2uy1<$w`O01q)kekbHL7aWkdsU) z-*m6C6}I75R3zE^Ea7CuU&)p2+9BH+Y6~qsnaE7_ zR$9zGw9K!8RXUGArbcYHYQA=Iqo1KMlwM?EUUhgQB_QvQr({2`(r5RVy0YAHZ&5PL z!a5I6m+1rJvz#HR*x_0~6RTD#t8zI|@^i-}kGsEB#xWN= zls7wP`1Uf~43X+xM!e21O#bwdYJK;WI1cQ+mdFy2(pBa3q+thdc0G(QWx41&J%Nq* z!2StyQ&WEl3|Xefs2vmTiqY7#bfMaD!DVR1xN*b#%_kX|r+$`lARkINl=Nn7quELg z65)L_CiMNDSM`%y@ikzRy-yH!D^{8JJ-nxTpY8cJn2dYt2(iZE*EYF{=w$BR30H|Gp`5$ zK91$l#kiusIE<*|sBGR_`D^E|Ba0GSpP{Ph8Fk?H7E zuW2^lw0O|G22QQ6Vp((Y;%=~dKg}{&MSKRWOVAM@cU@{~5u^r)LKlb7N&aV_(k6o5 zvwwz6qy3uqOeb1$chRD7^_lhv4CN>Gh~(>~IHfzdgL=qAExSloea~x8>!J@eyw^psj11{e*4(XBi*}& zUxNY`LXcUn5&u(@U%p=OC|SMq~|dUovmU@>XNs_V9Z+f9kScEoyj-*oQJr_r5~F(uZN< zeAW_&Qi&9yLVu=8OtS_)$nR9*O@>T$$}nUfbq1i8Un)-)3iDqOeQKv$Q9`%5gX(`L z@L{&Hr}VDngFE@TDF}h*ZBy(vVpHEF7KWno`Iqj!*}Qe1?$4rR*e-}@{SkaN)Yt&| zh52hqN1IKVDO+ZKAwomiNCZr)2GWs!%EE!v&c{B{??oEFSEQp*XZPunl-0ZR&PCEg zN4wL#-2{bTHO)TO+Q&LQ80I`w4XVzsa-r($dEv2vN4obn)J6PWEQ(k0oxeel42<4e z2?KWrkF*)cM*~Mm;#NF1+Q%mQ5Mv^8hzD8S!6V(PPb5|;y!q%*DFF5xh!DR=0g)4m z>xl3^H%n<|_-WA3`EN96rvFMA&Ag_2hsgF=lS5>aK|w0Dn<(!{lgG2X3S~k%Ps;U( zA+$%4kf05Tm95dvfNie$d$JQ^Sk zHuQuIg$Yr4$3nL7U=HNhC^k|R+YJ;-19dX9v{`%pKla`|uBs~iA72MJh`{lHaTL7d z(V#HV5U?;a4+?rHP)4z^EI3Auz ztgx%y>GG+gbRoNFW$}Bz*IJviIb!ZtJnMO$^{i)I_u6Yq?O?Bq ztz;ORn}f?$6Xd&nBiZ_FIWqVk6*^cRP;E<-fxkl_x+^qhcyHH*p?8#f5?&EG;nUUH zay|guKNZHITFE15hofW@3qqLjEOwq8p%h8zzd*?C8=DB3pqQZq!I3&X=bMXY^KiY+ z#*F4ec%Z)tk1>xy8hC0V2XAi!d9jSK&XqCP%g6JJwxZ;q1nU|G z;u}?HZDkeoS!YoaU5ioJDL`a3Hdn^}UJV4K{6=qvLX&nS?HNmVKI?s;k80_&-YXsJ zP;p^V%)&?nV1&Nk)YILJxqY8wKGeexU-_(ea;`tg+D5=lh%@%vuXl4sYT#1VtVWLFmkH?T_;0DKEBj*=oQDa`id7$*XnRKI+Ej2s82ul(`1VlM3Zy zi8A&<1Vg9o1VKYfAGqJNc>fW;P`c#Ci!U08*?q zWm2r~8Rv7fXp~d%bmtDZ1JNB^)R}gLivC(hGh-VK3WrN2h5ZU;s|jV5fs(A4+@(;? zHlbW#pwuXoQ4(eB9}$ea-%Jq9I~G>@^BZp@>>zuewVbFjzpd8G;C8LUr{BKmG64Cm>@^MW9$-yv46 zcX@`eG5aU6=|4z?M^0e&v>u#{T?g9D2yc^=F|E%TQa89fgx}F*!PUJ zl0zBQj-rv_n_6Y=HtT6XU{ixZDt=TZivkXO3mB0j`XlkiWEnRGloZVP5HC1je=nEA zPkX%s?FUYZ$9~xe8Ezo8>8ETR(#cV<_CwF`$8-s7A#|Ti=ng#+=m3%L#fzz+D-vbPW}hSx}ja{P-99I*_?Ou661 z4%7mlBrW?-v{eWL2lki%Wd!PR8GN!g#Ufl${-pP~{?mFF;|p4!tR@=IY3Al!4Cm3X z8YJ4->p~wDQD}9z39Ji>j$7I-eg?=W8 zKATsPxCOtgT-dti=d&I+VlI&}Sr8VL%nglnBNKsp|0Z`a-9-;~*=Xd+5_t%au_}v| z*ypWOV@^jlyF(~50!e?HP513|%Z(ksf!t(@B|piOOMbT_U7urxVv2|EZAZw|j-e)c z?0?gPx2(yO$kh`0Tp;K8-u2nyzftu*d$qS}Bi;h`4sDqkfv~>D3JRLwYPtHg2(fwr zMl7~(BVM!E^PX$9y^Wa$g-~-twABrfm<(WX&K51HCsP#Lqrk#ZatS``BXG4~eb<6- zT(?^P2A^6_tG@-iBCS)d=2#hrWfZ7|e8{V;wiEVCGf{v;QuqQ2$Wf;rZnmx$NYmQ( z3ZizeCm#NxHBdx?ZW+pCB{_h>D#!(65vWCR*c~!rkcRe zWi&>Z<9p>p$IsVLKs=9Qlj2g1QFUO$)}Q*QE0^&!01-ah2jMKTy(JI`xEx^J2i873 z$SHINzQI%oZSpnFLf?x9EuZZ{ndJ9Kl7koO(VBVKUc2l!bV^~T-3aytZIjypjQ8_! zvR+MZ1cWUNKa7(Yc|gGJIV=>s=E>r7Pse*xY!EYz2GP?~W^NFx1AA~fw`A5c5B&lu zEK(FuGk8#eqEOI^cRy8=>ggFdkiv}yg=ef+Ko(2i?ituXZoj@%4$QQ9%KitP6;~9O zxP67``IBA@_B^Vai7t?jiwjBZZz6MGgEszs;HtY>6S$Qr@t^o^+ z=qW>EdIvDkQl-LqwQ{C6&$Pf;gg2t4&>1G%U%46x88uS{xY7s_(iM=cj<3+gS8&@nwULjm z6$R_r4CS^gpY1f6{8iWzb}Aq`N>f%>2G}isp>`x1yA5e`v5-{YlMhg^MS+Mu z+naq7xdSG zEUO-`8FEjM;#^&f_VApy`uYMpUT5*%)z0*CZ>f~Pv{teSF)Hj%xZ)&U9CYA>OO|1x z(wtLjE*ZbXMlIP$(7HYqA&}yih7`9mF$DSjPgLM35M*~=7v@O-Z1w@z?gP9nf!t3r z9s!b96T(}l(Uv2obF`oBIK_^2Of$DA^P=;nZkDF7PjMJ9d5}pPr2tV*?kQ+~f zh|=Qk--JG|2rV?XEdSHV0SitEy&MyaC^kxyUT}B(<9>OasrmrCJcSqb)l{P`VIj^K z!Ncrl20Ua1r57srA>o0G700Ysl);dm{hk(o6uJ19B`_JEn!}X&;~!9MvE^K?6l?|t z>S3SbJ=h602y`MANWVA=pi$NvC|4_#X^itZZXl*cS!SRNRVcqyRQ{+@xZYrgJMf3U zvt66nqM{3Q^e99tzP9lo6!`it5^J);`rL%|k_@u_fJu1Z1%>sZ!m8F-T)TJ$(K1gv zpp|PL#H3*8)_j^*kO}#WOpQ3%qG+yVFSbM<|R3c+7aW@h$iCnKwfy?^I-Y z5gcf{t_A@5Dxl|dpqQcJFOh<6p|y;0t=4ja=`$Y!@M7rTNP+_??_sddmMK%RG3awV zCV9==AY&X~BF0dv%@F4Sh4L!nd}DvY2nS0c-V5w;a}qIWvBzDQDo?cId3?^myv5ve zjOV9S_J;jYRc`yWevUNo@*|!zVIr=M>%|`S)fa+J>&yp9&Sz_vRQGu8d@18ArMH`9 zl(mwgfy6lE<8Cb<)%CoV5AFNRRwrDC)e1}BQjLub2Eiwv2 zZm!fcVH16s+(a+e->{qaf#f0#g|IZkiih_4-cJDJ8caKM=(9F4d7ER^V?XxcrD;5a zaB``-<$ViB%0+#8M{}#SNU~Y?Y*S7+1j59??OXS3_Uv$sE3x2gy9)~t48|)@6DXI@ z=0k|2!Gq~gR+Fd~=+T97OBr`~m7R%j*-@#zT5wk5#>d*61xSii-_BkIY~8ZAWv{`W6DtgG z0mvSxRd&hnHK{;10oJj}Uw2tPYbF@uNm8>!xr@UZj$UlFa`Ynm=+?V9T2WhuCoyk6 zeoM?+FC|u-+H`ukJ4k86;CdG&QkzXxwa9>tp>qEb$vH2B=K36;BbUh9gR7Ko%Y4=Z zQg6FmAhA8w_Aw$b$!-RBJ{}~v7%$yfKSwy949WmM9~aVg~C?Gy353jP-9NA)QKy^BE4KC!UbJ~3}&pmi5_ zqFa}daGP~B1!XSKVUtgG=1cfZr_rR+?2rLi$@9o`pX1MY6nu|QKET7nSdn$tB^P24!+akda`$AOi`O0TOay7*GGq9nQW@hr4E_@_4j{+iB{Vjj471Re5Y&TdOvCMow2M*0St$^xTo~Ts7oeMkhFzuv1z?J81Q_>Yzc7en?@1R&WcV)2?z|{f zy63nloYhI*ax!MS3=>eG_z`-Ka>_$-^I#Y9K`VB($Xzd3I+8nIux`Z$K|X7iu zEQ={2Tm(=cUfK$Pjcf}Vaj2Co>?eA2=GheMd(Saad&bKEmeO7t?eBn1A0?`pBs(kajT4&b)z za+}^(#rvW>Ke!ApO+skFfj2L0ANSe=UV#9c{DSCk^Q3 zfMW;W{Iora6$JL9TX9H8q8D0L2L{Js4lcXNe(HSTE8oN}zvaki`?^VC7?`yjDYma~ zgnT^8&jTJyE(X!oN0OVt+>LKvYyjkQ(h@G`$aQr$ziH|Uq;LPRFYxQgpZWrio`au% z;VrfMQ6eQyvnr#q^XFtHSY%k15 ze&2x7_)!TQap(990r|eYfxqtm5&A>vH|0D9onw8<>!7Yy6<8#}ZEIl!KnSDKx zIZx64@EH}oUoG>qy!D@PL#p9_%l9-BMl)e_TS<72_3`T=^F`PkbRC@ z=O9GdW}9O!MpJ>`yiOZHTwwPKuE5c3+Z>MDh)Z16-s6NNPNC5l4Kr+zy6u4lq z%EfP_c+rF7V3#76LWvt6+F*&hecA#LgLjWvN>#1Z{Wi_grbKAHVi{ zEbR*W4XH5twclsJxEgJN891G~-wa6xUpIKqg$aYY*ab!L)SNi^lHq+AA+c(q%3uF3 z0IPi7JXV&NSzw4QNsl3aFiY!)Xwm}k>x#(`$OD4yYn88kto-E0`E@T&*6R< zH{W3T3il7KdEtGI`ys?OK>|$v9-bc{0It`uHZ!7Jif|oxxZ4W(-If>8bqZt!&Zp42 z9qx0S3v}#Zen+C5)0#I6>cRdo_@Sh2LcAE)nm5PYdd)d~j>C}M?X#wX9Evud#F66? zTF{Gea=6B)O9*mP7L*F=em3&ZtlI#>RPA>X_BkE_Nj^6NQ$#wUGO~rRLpb>TCt^`W zMzvgHACA&RDB3v+Azd5jnm+T&&(J156}aRZ9DK|E$5%Ly&ECN{N_l58i-FtkF=I=V zTkB~qX>4X_ueY^DC_2*p0==Ak%mxLdIv*_wkB(D4Q1 zKSCewt?HrVQ9L@_XTMPf)%-|Pb+7H?dK2D*Z(DGNd$%9;5XXPM=(f-83}ASHvg+hy zVHhU4XW&TPZg;EuNKXRJt%u_9MbAEO6c;1@#|}&_EDpg6$7!`(FmJsO&2J_@d=!TR zcsXQzA<&DNup0~Ptqn*?BvdMwTrb7|jJYh`8OV6?$3ES^IOVrgZigS+ijKsK!l2FAsRqVyU^+ZEqXy(Ug z8Kc-=euzYOekk>5v<@WFLXW)q!R~~UFMO^+Q7~o0_IQapA zwrW)OeH?-mx%VBECyn8ALY=EoL{Im`zye{8Pvhf4Pt%rN1#Rc8hhd^Adp&% z4}%k+@yVgLa(ofk^zmG4#KQ%?d8pkZ3up8;S_>xa=oyQTxdH}e$;V2FYnyMuA=Ef5 z*~jxhQxWX;*||1|b(xE?Hn`8-^1)F@X%oyP;L;bC_9 z{0bpxgYd=Fh%IbxO4!2SxWlETXiI-M7iAuJ`dM8IbbaTiA26_WqL;ddD--{r< zvfVAgVfUn7H(G3zm@EZ;=(i(}aTg1O+qCTrj^Nj_(Kx2E!XXoQ{30VA3-<@*AJt30#rHfu{A@ zkBRakCUqTfv4H-R%n;jsj6iANi!F}-mPp$}%4Cc@AcY5|z;sRP(|?~%oyL=QTg0ES zPINhww^~a9P%`Iph=q>9datKv;S6B=4Gk<``I#0}n{oLw-mu^J2!aZ5mNd#YJAYpQM;GQ#@}v1@?R&W=IzPP2 zJ>z8*FNQP7Gs^f?<_Awf^Hn#SKVu&&pC2ohGi7fz&CMQQMknfCXW&*0#bpscibU=< z4~qC>6!F|6u*UB{qTIF~fSw8LJ8;}}XP}HJe709*WG~QOW_(yCz8{C;kNCV`($ik& zeEr27>bNtoe56crk4!T2F9egmV~c_Z5$LNoPLREW{it25k_-tmqoXp4A<`jf^H zxStc`w)5VDfqnKNoUHGrWZuV=r!C?Q71e5ehG@znUVcVe#Ig1L14)YYy<56$?@JJ@ z@34pOU=N}79rp0Qt?y4WFGeeT!uq~G62hOLzLyinT;C5H^*ucBYlp1w(K0)%??*mR z_5GhtCbAuX*C^r7==Pm#2|qTCB|M(I+ag#Te`4}h>z@J4_5BV>_2=umEMM05y$O;W z>-)unqzl&fE~CDu6HS%wH@i&r9ped8dw%eJV9EL(kS^N+3I3li-@iscuoIN;TH+k1 zeE)cgEZ^ZWJ1k!T7u)j}INJ8vzU(G_)t-Mt_eq!U>zORy9P(D>`!^B8DeI+`DJ`f`ps9N!ASfv5W6&IrbA!z2(1o(nWvB;MF&62Owy7BEY{bf@Cu#LR9h zF#~up8=2>ezjBV?a}*Ju-w)uM^48h(K}t--Q-)1};_xJX2fvBKG0+iL{cQWTl+bDy?7Kit1-=Z_+8G>%Ae?)vaY5j}d>_G_=>cLJPG|8(7Od_$n| z-KURS1%ez!v>b8PUo2_#4xoWULh2yG!8ja1(l!4pdtDeDo%4Bi2ub18K}G8Io+N&# zpkT%huR|WD8-|m1fq+~@Be3y<{*~O|`OEGZwj>CDs1YZ2Z;z`wj2XhA#;`*>-P`xX z9e5pmIA)6dbHpAy)p{0Y(a;ngA3hhD$Ta|XK2aKSxBSrOjSTc*B)B(_gmL=b1DX8o zr_pAek%bgot+3!TM&9Uc80Coo7Q(TBhsAEH8T#E-(CWsHE*C~_MRAMpO5_e(vm zKa-2aBJjz^DQTCJmzwYMkYgMH90gAD>lQ_lS1(Qh#yN-FFT?{@X1wc7%AeFn!X8Dq z&v7s4;?0VlcY3gBv=m!|eV5{ega$n8moFp8XP^fj$D$~7fPBHGz{uldzObs&&o8~u z_gx{*g7zKK{7Skvl&nsA|{5%5&xGQz%7E90J{_?Uv zYP}SZ>-Rj4IwG(&b%XUn_yaGXYY0rmL=mSBM`ZYW5(3G}GZfD@lHlw)c#~0@y6j!e zGvkm{_};Lu-vZe3oyC6RgKz}KD+yCkL$Oq6JM=QbJ_HTy!Hk3dd-`9Nes2SKH9-|s zdxY5PBDEbY-Lo(&Yw7HBzuf80+`)DGVYz$)zgUvB*f4y;7q zEkSJD+UZ4kP;hBS3>w_3={bpw#c0LtkL4t|+kSx$_;kUE6WL;$@#@aA?$(54%n`ha zz6)XtW^AvI#tHD@+tElc3AS;@}X`Xo%Lc~pPcq4H1@vxho3zVSx0&p%VbIm$w zv;mHu-Ln4ro^@wB<{@Fr`?K6HJ+`W3e6I`)CM!3`Fo8%|w3!;lOr`Pm%Iw1i+LQ2Cf8x{Ei5e5ev-mpc42K(SHED zz_NfK;2`RAP#f(Tl=RkpN zNQk})fd|%r5ZeLNTeQjgouqV?SsxbH@r|%`XG08E#<#8wST_osi@=-q`)tp2sS301 zG{mmo(|Eyd>w^e_2U&J(K(B=S)Bps!Ap-J;mx2&?;0=W^Pay~*Yz4w@>y=0iYZN_E zu177B7YZD6=Dxsxao&bjwk2D5PvvxRgi`Qu`RJo43`?OtJUw_|GH6|4!o{aqI|B!0 zCb&Et6KDpbKplQ?n0KID!u^41fc(y)?{#1ZJOo8)wnBl>^J3S!ZyT+FA3!tnC2V!N z9DIE{dy-h*3S&m|)yYl)oN=In24Btpu)YuF;!~cgI6gtsv{5=UuR1~(OMzbDtXFW2|%+Ao*q6y~ly zR9Wk-@%7|ozP0w7oJMbTu5V||!CYT&%z>P>U)I!C=I5;KomW}gn1e_7nR{~99;)?L zuC(8i7qh)JA~`Q+2lz(#zGUCyJA%XLV;hUyEw)DR>PzXJ)n}jS$9n_b)R|?GVZu9p zriWmZb<1~0`!HI#D%lO=;UDs2|I7AvRvO=l#FswD*Lt%hc+-l7!oui zfqmP@2@SQot^S=#L%Xfp;DTNmGrFM|zhfHagP5fYI}n}jm@Xc*pUw@C>B*7jMK)&O z-7%zS>5STK{WTonNZM*$0~fy6=)}RFozC6XW$?1S#hxs<H7Ily$tI+eU7z^L1w4BTPpCFjk{8+dFWMH0h%eOC0)J> zgY7Bwr1H^++|e-`$@+HV5Y?;y1%@vD#bTphz>>=j$aD>4;?s(@MNv!`?QXpQ>%Q)& z0C#HO5n`i^$A(m6bl`3&=OLull`;ZPu-dG@kWS=wJ2DkQf9&z-dldbd$D^MWq@QG_ z4}NUwhOxAD%UFpNG#IQkwgnhL zlOatyobtJ~BrQ)AVfKsMt@2<73L*n@IyRB3tW)ykguu=)vR5fEd@IBA-697%ML<3e zL8ZQN92$-z*iVHRj^|w1mD6j#4GO`g95THD2;}DKbF{#TE;|;ZpQtuB-r+`*&s{cM z&=q#sP-WbNd*GCR|0Kug=*LOnPEtU=&)updYVPTq(^7*MORQ5rb%&yH|-mt2>aZF27VnpKzpWE5C zw)^{)ALIL*tLu%!_%|{x?b5l}?fVo2oM?K={sW?qXWaANzrY^9lVmeCT(l((O`iyho+{+^6izb0S$BzI^F)XTqJC z7w690?$(RnpmZ8MnL$<<&EX6b>pM@93ARAG4|l=)485oUm+kBjE@`*O`x|WF`ck%y zW!|?~Fz0HszKSq4MtbWm43=A>A_CZdL`Rs+u(bCLpk}|#58JX@Veo|CptV(Yuj}X7 z)0TmH+srWlJy;CjtN7kX>|0BrBzWIy#(VV#ROOU4Q;xliw1>-O+Fk&2+OFim)1o@= z?MwLsETb=l_R*K}C;8hhfA5jMkI3J@$=^-#H^WKHPo?`c`I{|6&XvFC$=~zkuS@>U zmcP03cdq>1FH>D0-39V@zWgnczYFE>1E%mUiT{xNeM0`SKJ=v&$=_uXvqHM(VkJr0 zW1$ac5%ZJF`vNwBjtKU~lBD^f4bJ0NyWY%c`S1w7teo?$h`{Hayr<;g+ewAXiy{v1 zFTe*umv~-cxOQ-*zCFVz*_8O z*K=a@r#OMW)dOQ~avI+ZT>^m)$w@K3$%Df8NN&b-n7 zoWuKb!~MDI=R|np3w$r)$s=CobNikT6v*l>+vb(%y5%}09uacL*#C{oy?agTAuXTc zJfMW+0(5#&g_&;^pxaC=nABB>M>>@SzA-6rP)y;Bmk%|JtjbyYfxx%T|0u9rlk-ft z3~u}a-)KrMK$XDgVprTD>d$-~xB}@--yg-URvZ`mmZP``wtkl58i4x5D*G2W7$)vn zd`80dYepn&Kah!Z`0|K-i6b8&+zdOFF-xCCH)(Ti@AJQ{IKso;rCq!IE7F_-`3k^&1!Z0 zg#OfNy+08=YhU`J%-9a(;CTiuf$M4c`wV}VY`2%-z}zo(+EWeTRc6q9Gj9kVYsCNy zd3cv!xp7f6c&0fen->^{R{^W0bm>G~*RY3U{RL?ITaMW6>pn)0W1oms9dMcX z2bpR=HP(!LB7++!l+0i)Tvhg929c*rDw2zs(WS3eA!mh1TxDPKX_fus&#*}M+1pgW z($}i&KTYQQVaP26Q~6)|A`;B}R8qX?$13~MncdQzYVDM83C=ox>rN>H5U2e`1s(=s zw|(NB$r~XX!pHtvg}(~ZQehorjnxhkmMehH3bhoc)ABYPse4G2~xIM8@0+)HM@@*@*3 zeHURn5kK}3@Bq233ab2=s z%C>aolPc2sR|%=2)M>5C4bfghx2_N&YwFlC6=71%Qt~yaLFXQUGS}l|0+s6%W%$FR z12eo+`61CIPiS$OIp*||K$SfKRmeIHIH+((ejxb7zN8P@kyh99gw(#B8G=qw5q1IR zEBmz%$PCHwgdWxp1j9&I!iJ@NRABErD@V`0x}2$SY*<9?Mc@O{N~8>5JbhpUhI=5t z{d4;z@7pi&Bh!=rB!y0$_B#n_wo7`I>3;mJE*Q_ZvUDmW#!u6hO8C~~Tm`3{C*j0L zlDnniQ)iBr0m`85Dso+NuZpztJs|Lq#m4u5;Fbo>(t1ewPrkISlnG9*w5BQCe)DuF zKeeR`DfM10rL@V^2l_K~fkX|lLq#vuI#x!i3|W->SUcR0mv4~Mlh4^5GQr8^>{5l> zpR*atkDMLf4*C8L?9d|t?GQx*W&ttJo(9*g;tDF6+qBCh_-o~p2p*Rjm@<=*VV-0by3l2WQ%!a&!X zo1Dr|8>GXDQ{%5i3)y)H-!j|C!|B$(kI~6>2=!wG+I-zi{@B;yye+Z)z{s@+Zh?Z< zzJCOL0aM)Rj>Dvht_R$S+O@F=RI2P3f7FpZhmKv{;Z^oxoP^)-Rq7%8l6|<}z>V|n zQ+FJ)mmnhb;34}`x?jPKGw`nk&QiLc>u3gFl5cl`C_)d0@A2*1{(j>2@5Y_BBYa;b zFiW0a`vIC>i8iK-$jxpkfoESX`0QcG%C0D1=i2SfQh4fh`}-pi_hyy7mb|v7$gpmN zEk&!|aL9=WXy)P%fb=wOG$9RNBJkqv;D`R<5`Ry`SN5fTy!w)xoD5Ya&u(3JGl{UU z4lKr?P48FlOBt2GvFjN5I|VyHf@7PYD>#1Z>kfv}5V!YWRtN;FVkZ{ftNYUow5$O&-SHuvx{*^w|ulsNx|xH?h19Y`Bll4;0-eD&@4a zKT`;I6aNw5GZ20k{mDUp5&d76NqUlY=kg#8;PXY510PlNr%xyTEAS((m+lUgR`R*o zl-A5IciF)7w*~#Fq`xlcPo&=)^z$X01I3C*6ZyH7yNuxvXuTO3ro2+Vaw%UsaT0-V zimzr^grd<*+#{tDSIU32%9o$vp37yplkNyb&qenU6TNX9-& zXa-h+%Rb~pm6KcuS4{##t_>>hLL@?XH^WPn+eP=z>!i?#l5$mmzt-nQMnCeKetYSn z|K6a#i2mfWC5s~Fbpzr+hvWGJS1WffaXiW`%eO+gw=ld!xh35KxDDND{^=(AvTRZn zeka4nDEDT%+iAWg{Sq+IK?PT_AR&MU}1+^KY@{!Pj+ZSy?kb~C(- zI9Mgds}OR5<6wagFJP(fwgoa#5#YzV$L*Vd9C#Q8pZO9VH{OET*T()V9XI_~Fa&e@ z1|$9af$7_Bl5(wE+h0BL@%zX|pXuR8EHzSldW3@C-H{XtV66(Tq0dz;qy@ z8S?-Msk<0dg(}w~M%+w#o2~4Bw){i|D==ZbP5w7!O3KIvvNjRE2kh>Bne1l^fX}wqu-o zAbB6O(RP#6bpWqp!7Z5fZRPGJPM2~Yp?lqn62F=>J5;!|_y=TmoiAfeQyR!`mM|JXczqjV{}7ziqp3J61DI7K3HeOr&m??=Yy{8|22{GP+kgS~-i{rsK+B_n@- zKQ6yK6Tf`APg?aEBtFUoS@-;rwad*h8Jl#OvBD`D*k2-FVZkf!@p_zvrkocKaEweQ^O4! zo~Pl_F)DtGhF%R%*DylEw?`|y`!!sx;UyZnH2j0+zfHq;HT+z|?=_4(P0>rx(5d0s z8qU>lp@wA|)@t}qoj)$ph;r-K^4rr{0^AJXto8ZOiDRt?u_=+)5RbAh(Q zVhy)xdAc+_s9{*L&Yy-Q8qU`6ObwkH8uSxWRJte)jqow2tME6^Q1AlnPt)*x4Zqj% zM!H=g{;&}L&D!6q`Tb7&uhpU6)-{(23oHC(3SFW3HJ4KL8}pPJsi8m8;;(>2_r z!*9{>Y7IZu(2)0XZ5Ky%IwSm9?H{h?e>)^T5aNG;=K|uoQ^OlHtkv)$4P6?hYB)^8 z??>aM?CTo36(fWwe`RUa0 zTQs~=!*v?2*05N^EDfC+Mr-(ayyEk&h8-Gi)^LM{O&V5dSft_k8m4KOsG(Fgej=_2 zb79^ix~^5w2rsEo{(?9m& z`l3J88ZMmLkExB=vRm<~u29f_Zn%`YY=(mFJt}--yvmP9!_-a{o=~Tt5x%Uv!dbp@ zW!>^}Z%tjT5SxDvufD!~jkC7$D(A||+7({T5yj%#D{AYms&!U2EwA(_5>i^yU0&Yc zr6;8!8sElj5PTwgl~^X@S643g*43X^e7@6LSLa+=SGz(6iw19fP3?+cp!+!`?-r-Z z51#4E{BF@O?o{P>j#V&ixyB!(@skwnPE-)z?~v)8BNcRwP!Kz7RQYSTB|-W9nt!N1 zDeV^A-Onn1xv2_2;*ZK7rD2?gV>Imjn~Fc8q4>MT*Dz7TR1NoPywryko<~E!hMP2W zYM8EJ;)@EuIozV`;|1mK3RC`W?QgdzKl3xUu1?Hdu<+uXg}GvO&YTOS1K|sE=I1Ub z#9|A!ahBH$F}ty%Vht1E%B`#_Z(QjWv-1`em(I;ER-r;%ykOygfVufWf68(pibW+X z!VBB3fU8Q>i*m6-tb)51q2;)1L>=5tVAUYpBUU1g7NjmGVu}z_FXKr`rCcf2AZ8UP zc<@&V>Q2z{nCQ(!NQ!9Gv~q#-bKFle;TpUN3qf_Yre6-(R>HsBBzJ@6k_Na8GEqJi zMqGnCbdDTUYC6j_l`Ax6qc~A&>XF*16z58QQ$z-GL!B9#n%!7cRax(>EnijXTsq;5 z^Ugm##TmV*a(P{Cg>xm!(>Wz=2A<6zow|bMvJ#YQLAe|fI`M1HV;#Z_4*%P9ES*MB zV+m6?N^@6)&?^MTTHqKwaT%NOM*pUOJ^BQX10qf1}}8k(RQeUrDo} z$y!)QwY1H8afR0D3fN6OG~tyo!E&2ZS5cW(T(hb&tzXJCnJ4&-*4l~2vfNv_Voe%l zb=FZ>bF>%9ov95I({d|=O}x_x?H5hzV~*5!Eu^7_gSuXZGN{6zs%cI!39+;|)>dWF zv=XMMhM$&KF71(}T#1{xW9fO|szb@KHX8Uf($43h9xvvWe0dr6N2)7;ZPaAtp8~uo z(o%jN&un9qjCHJBwlo!zf}){WN)Xg{admxVc|}8-syPkLa@3TXRpl!x8`2il{0hGd zJe5#8ddmj0vIi@R;yQ2nN+(Ovnc9eQFcqB)EUc*=JR)cHioqia%bQfhfFd18?AWM& za>qvX6I8MmR<5e6Ut`pS$_i)d?1{>{7M9npsB}^TPDyT%a7k!C$ z&(1=v3I2kDDv!zruc`!d>T0WMR;2yENudRG4PHZ$Fc#;Wy2e^>T4<=skyNWxb$QK7 zXx(HeD%sp)!UrZCSeaqb{q2!5XGvvdN~5!=u5Kl{%}iOjvJq_@`i1^DNiyGCxvD>8 zaYJQ=Ibh0^DWJiShT^)y@+&GA*ERyBsIq=JUjG376P2{IgJ4026guRdTI!Yc�SI z&(;AAlhRX4m)EUY#ZfBog5lH55vK5|>86N`!O4hV_y97d@Bw6m(JUD9rRB?}p+9_* z;a{{0ZLSb$1?BZCDgmW`lJ*JVyr{f>4Mw`94Qx~3hlMwI%WJ(lz-TKfOUWPUwMkT# zLe5FSCa7PIVElmSV7lqk21MwDX|qws=GMq@I)ler*D>(tGs_6n!gixF<<$*J|^)S6L#lb#keE^Am`UnA!hrDvZFRmjUZAbe_Orj7^+ zm%3wuf#Ln=A>mW|8O%n1!I9^3^g#oA!wU2x?9m1v5zR(_sW6NlDqT!PpYuylW6x(? z#Uf8E!ssnu%op=8dSeB?3U_~R-!Nr)!)lDZD;qGp)*i&?N-CAQ7Zl=LC+d15Zd7MP zixfQGbRul&Qrr`8W4g7p3O86U#n^o*N;(bka~J025r6Ug3+FGmc)pmuc<$W1g?R0~ zFlSy~sS#@U3-agZCXpib}#wuCA`H8`>}k^yX?X0P$G3du{w2SbACm4xVW z1^O?6zRH9z=}83H1_qN0m|1!MfD23(R8=)pLWioOWbRd_q|AmAKw0`_I|>~%)(f3c zh^qqo%UOvnis~ydPOnF1n0ZC$DwN$tjVrGxuXPUWm+R`&=0K_jXFUe;^%ZDF#MHD* zNl7-rndosF(^ju4m)ft?I;g9MkqwN`Mdh!rTVp~P7_ZW^&mckm!_3S-@OWh3GccW| zs}f3tf$^GQrOH+aLB**5*8>(9QtKXWARUf-4H|>Vv8d9kS-}Ji)BG8FD!=TbgUM75 z2l6c}_Y7nkqSwW>t3ogardNDa6B$%%f0oUTf!WvA5?YRmo@`=`f-=)M8FvVuf%59| zGz5mK2aVSPup|b?&oh@per;8qVKyOn98%C|g4%CjVf15jTqp;EvnyBB)Y2r1s@F8s zEH7V~Te-Ssd1YF;r{sGN9G7=4xPaD6xvfL9Q&~fn~b6P`?;LqY! ztt?-G`7{e~P(14OWcX_Rr&U8v*-Qr&cU-!Z(^FrEIi7dT+@KtJP2S2{X;&9rSbCv* zVQEEW1?&RMbup4|L7ImLOorG`FrX+Xb?4<2l@{idlrDnkCd43KZrqe4QFLpQ^&QXxOD;^EegWrJ>)c{O!pK7Nsa?$dj(+N>5ec8#V0K`st2P;awWK z8?cf)K$KbI(Z$}UyVvsC-FUOWc8OocaVf4lZOFIV9kHEb_c{`N8j4ZUpC>9*+b zcJ23TIOQy9USi6WvzFD=o+U2Anq7goP~>3#Qi|2^JlqS!La_wC9K;mBHwUg_8Cr_D zP7W}N5H8JFpcO61S(3M~v|#akROC|5eJjfAD@s?FudJ!i52L{HIOAXEqViRqm6gtn zDd|eCi{zT}6_}NAb+K^3+G7sZ9fRxhT#u`d!xOb^dIdlRD8^UETolUgxsP%33GJFkJb;LXo$+ z9AUBx4^r%>cbvBA1mtSCNjm4ewk{iC{;a-2ja73i zmo=_fSm~*&_i`$;tbF+uc_nDjR$pO?DXeVZl#me&mva?Uv^n9T>T;}pPr=~MJEg(1 zT4;Y&efbLMUsaBYAFhJ(HFz8{A*Er0RDDCEhayx~OkpH0-Mcq5)>oA;hsL!3s+!u& zjMDy484j^93P3hW>t#M<`kadLRWQ!0YL;J7r}$#r}_!>JDPzPyi~fNpOTT6`LdflQ@*R1 zHRZdH&Nl^#n3wiGY4{lj9Ld*Ahi>CG^I#bLEVrnTI}U#Hoe01A?t|Za5AYy@yI0Nk zKy0QoU(`!8^2J-wjayyw!ev~)|H;w+<#`ZR|fxvLGX^g7vxJ<)F61w zM}Y|sJHACT{CrW1myutl-}LT4dYj>IClW3LkL8qX!Xte*{LLZsWV`!LkRI{;@NXCd zFD(SmO@9a;DeZbYNRQ>VAO3DYUWR@1THh4x&9I_9e771RuGTjPrMoAOMUK#zkLvTD@}Mz*98Ay<@U-OL3+dsz#rOf z5ThgnuNwYPyCUA#rgnq!MFB6=PRZxC5WFq$^MyZNKc|=WpMv$*sFx~L@R#YEe>@hC zd=tM5=9l#^9sa@c`@o?f9@BLn3DPsP_LIab8w8JhjvfxuWBw9>7b+k5 zhuVuD>6{4XWtOklln=_+e3E#s${DIZrwPx<2lMyD*FnD04&Xl#U*frew;zy~L64M5 zz6#PK{c89J>)%8OUL5>R;PW!mGutWY`BC0ZBH=RQh1x;&Ab6DffiHvdQGcEAmjUuJ z=&_u#O?WaN@H>fw%fQ1MSE8&pNN*_oUGRstL&Og)k5u3}5zfmfH&Qa!H_F?K`sW1X zW$>ju8%*_*c$?urQMr+R_2)r)2H#Nmh}YF~VmzkHK6or1f(Km0OAH)~7s{9U$o~Gs z`5@jAv}R}V;bpZ1ES$b%9Po!t^jXHW_JnFgVB;+H#&XdG* zgKsbJdHq~Ix}fJx3hig{Hh{1D1bEb6(@B=whC%egdbl!-xl|X|1-8KNqM_q5@UPysYqM~9_f85XGtAxcg4OiQSp=#)`-JfjUoZ}F2wB$iDUZ`f$Z)_-2RZbVTc=WG4uZi zE@!&hM;=PL`;IUdSq3x=jhBUSOyK{y0CV=+GsDIH-C^7te_V#d?={DF4#Xu5}k=B5lfHzo~4Hm{U6%nO|tR z29W0Gbe4{KH(0qvyCms!53bHl|3$cAU#D$?|8#>)m? zD(K;vuL(mro4{i*KE1~YH`Cn=8z}=Gp4}cp*9c%tyNNGlHfWgZ2lHXRD6?^y@t6 zGsRP}OmUeEz?;`Z-1FuX7w|zz8QfF6l}+B{iW)dY8N+JBbl58SY|wwLnzJgrRkg|K z>7tuqm|&=|CLwMJ6IsZT@~^2NEwK@{L>xZ2E>E7uAdHvg^O-5>>C3BDVC)G$+rTMv z78WO$%*i3nk&rMMxM={M+zS$gu+(7M#Y~$%5bqL(We*6;m*iX_VGMMKgv~)%(ZDcC zuVg@2k)&5PFihfA4+txkcpij7#X7&m60aFy>DWgwrJj01KY_9JBbclye=>F_t2xuv zFm)aFt6%-9Vj0vYx?jkvw|MVZ{pX$bc|~CtgK8p9(%rN|yG> za8Y0}{K+V2%qfS2%QakGt{DuA7#c3;Z#pcKVTlMcm46n)_@1{X3G|droXs$i9xmFM zH^owL&f&+TR{D|sGSu-B7|E1n4cz*JLJ1d7;G{oTx7tzd0UzL(F3iP;bP?Z-_6A@| zeM9~7Gos9qZo$VjA~V9$EzuDoIxA8{XGD6E zY!(rLI+osm`Nm^mB7eB*&*LXViV4_bH8yLgurG}9Fuob_Y*Scwcn!NwJp*%UI0f+i zQ}lR%lVMyLfDl{E2V|-yK&VRm0g$w`OhrnjXc4_IvTVFHOjx}`1y9BQ4ere-)A2}S z_FL?c!d?+2>{&yEJtM|rNr(~&6+=ZrF(AScGU7azyl9bEF+{kEBSjqO#brf_xQy8D zq{&0XIPVBCI?EyKmf<1-1Hfi4#&yWw_25IfT*wPA>u3qYD}W4FvPFt5V}$q+cdV2R zxZZ(bNn;{}4fJh?B0UkHO~V@>1zWF35O}v)cyKqprt)R>C)rU(3&YZ>?{vsB->LW} zC9%xxBKjdkuLSgz4v>G8C3O&=6e*G_Vv+wC5m_H5%PdNlSrPI>`DcSJulpt_x;dvw zuUG}ha{3J*@htd`dc--I2%mln9aX(GX#+h!owxO-;r)>vffnfqslsfhR)n z;^QMkRDFaeyiDb>`%xuVnbvo$hDs_NAeN%!`70o=xYL!KlL0B`50#$oWQU!VbxzngOL|yzZ&Y_=SA;)I z+gJ8~!o-AlC3myV=golB`zGz5ht7@pdVG?qFL7vB8NU~h;YnvjizLfg!qywpJ+y0x z-|C4d3r`O#qW#8!{xTpFZwu%V8u?g?7LESfWXp!~2PEBhb-1)8FKnzLTv)ThMErR2 z8FvgH+GXHLMcc1sCXXvsODI-hb&Yge^eUUh86%wDGla9^bj%x5@J!Ju5_*Ss$3Z7a zr?L(Yhi%0R`%5vho>U`kyfYg331Up~DIzgzxQKlzsyDJbLfW%m^X}5T-`21jt9Z=c zXcq|Ja=DZos$Cf-qHi_Y6`P35!B_&fz_Z5j0A_#Ee1~vta)=olMu@YUMv9E;M2rc} z6=zmV7pG@si&2&g5!)Nx9n}@-H`=)Z6d7~mRlq4LLP+C^s5 zXmM8c7%?d;PmHmgD~9#Pc1L$b`6E5nGSZ}uJvyJFIZURH1q2I`X!z&LU@;!}4F3-x zF{>8pd^@6q!!lkB?H$r>*hqY8v`DQ^LfSWlmpHv*mY9%rz8C@dW$A!FZO{oi4{2SbL)J&odo%slvKJR(+GU#FY@3+s zEfbR~O2x#i%c0kjWAu9D3DAOAOz+b9HthMC%T@SuT#&_;SFZA12FUBb>NKBoZ6eK^ zEmAAa5y@F+Bk$?Q@aY1ddM$HN2%ir;iq4;}RCNDRuk{VcYiEPeq_Tu&b5- zWHJ}0a?=DcvHCPI){>9UW7)c6y3j^?BFn7l5whKA2A|^8Mv8dM^Rr3&ItCdr1Bs(Ec|! zD1N_dQ}q7@$jg6=;ydcUl>gMD+`Q8u68u5`1zxZ}VzXFigjev}w_d7M7+3n5U8Gc;B~r4ch!o42LdK#Va=hU}CX!Z<6P1B9aoPUKVsYmbQP^>o z$lIKb_H8QKw+ysznW)oQXy5A5zFmp-ZSZpHemqzYQOwrvDWqknDIbZ>fC$@8a26#b-y=s!9~i|mduB6IUt zF=f-KV&aBzBDu*aoYmvSn5=wZv*Ze+t*0GUgYG@ZH09f<>7M$iqI<(*ny$+x&hpL? zlPYG5GqNBX+TGB8$OSq(G@W)$=K(W(*Y^xd4Rmy_9*$Te^$v?^PG~eWv`OY`l^y=!fVQZ;eNAR=RvL+ zkn7w*t^RyO(N7zOhb&c`G^#`Ed#Wh@{YsaOP3J{xDoPXYk$(GD&19pyk5}$?Z9E$ zr06h~k8Hom>v(b$AfHm*d6#EAiZ%PCRf6yTjw94$`C zuy@=1nCoRkhz!j8tX~fm&4*AXxKsZ}*@)S185RlKh%s!VZ2XK!F$42$$1cphU^mXM zg{Z_`q0^fEGA;PRuDUq_9-kd4vcc2wHPd3w{;d#8aJS2}%uh64PRHUBa0bSqe9gE8 zk)i;3HuSRPF!oFy8C)*|h zVrCENwsz67v1V{#l(=wLEatM%(bo~sNwnztQHY;#Z~0i2h1nmU7%37_FB4ENqh3;T zrc~I?!>AFg7g5rhF%Ac$eO`iJTI`Ilkp{Oqw%>Us}N{9fhD6UD!Ixe(*Hz1x@ zd`SHMt;+si*70YCStQ?3I$Jc}Ul1Rp6WzaVu|da&hpYHs0#g1^eq|c(VT~7sbqnHI zhFit(Wy3tSve=>+=+WFBX75BlsqxghicLhIywN7L9TVf(a4C+-( z#*psuHmqGCo$O;`EaH6JsRy6~_B)_O|4Q6h3`4#{{c)jbiX%lA!p@4di2Zsk&+Jbc z9Vue5wiR0+<>A^5%b{PN9}N?Jq)Q)W5%+3-X1~%W^a!~e$fF|z`b>;Q9l(wJBtln- z8FIzJV;L1KMtO&dQCK$|1znE<{-}%*-J!M_2mAP9w6eSYb;_#e z&WOeMJ_32RL#|AmTRHXMk1v@D$^pC_NcruKIlgq5ti0Idn2l|m?9n-i9<1oDcDw=U0m2!b*NSI|zEXFl9tUUpuPaBgFq-Yr) zC5FRxhQoG-_r`S(>x%V{#|Mt$yphniRp1?3k%znBQd1 zZ7b5HmsrGKaVLJR@^AJh$u)L7XF$7R^of4h=D|ypT~4`F+a;g1+!~giGy(H~tV9uq z=L~4avFd_xEZVnb=*BQ%#I7N#4Mv^CySrjSxkcQm zC`lhdD^;_ ziq3t2ymkT7mBh9ZHaS-73D21*pQPmmtwXC+x*Ka%x~+heO|9ch9CRJWsbxqH$rt$= z`3i-MI}iB044LT;#WVZOc*8t8e}M8@ZQ&?iOaMep5Ep8HqxRnc$m`Rq&^`kWY0~-P z8aw)=(dd(G3*{o=*e>7Y~pzW%B z-A#a!?$6kDiKc1LBAk4qO1}t@*IMm=9FX*q&W^!4xl^2C84+B!@n9~3K1HsXg62I) zN8DzU4sPF|(tixd>*|{npIZQlYZ;4Y$(E5JWgnAn!Shc0t(a@ZM~Zl~Dbc9sR*ZMV zEf$f2yZtMa)3RJ4srXd7*48?9&?8Fx1B&5W1xT=cJq7l=P54q*bc<-E~EHz^|q zYjW`-!IG?=bNS_}cu@q}W9BsqdDYVsasI6;A6NZW<*yY`=1b0-F=x&i%6T(a*VXtA zZR-}`JcGOJYh`0*e|%~zo=L=usO#Wg0Ll3k zK(-#A=q8{z1tVpByWaG4^wU#~AQP%o-+QZw)?2>i~UTX@6Asx814e zJP62qC5?*~dz|P za$mH#@1s-1-NnN(mU4*USqWnJAw0W2JsRtBqr`|q1J-tm$gf)KA`W?`Y_#Kf_bPg~ z|5fp`{Y}|hDj;#xIBq!dj{IZoe9W!EaU7p*jz?JB!ee>4G%v9($cy=T@?VPXQ9xdk z9#Z_~0+QdKA6EWtfb_?w4i)2e3|Rdyii2G-KZ!1-X+kfrT(LNYFs8BtM3joXNh zz9vy1i8w|DL`4h|6xTQ7$|!13R2*M5jvG1%zt5>#l~f{b^ZwrFnLq3&C*M=2&fU(v zb*t)D)j9WA+uhFv=KCp~*A3$+BV)U+&y}G3)`P-5Wyyamy&}D*Rj024<##(M+(VY! zd=}pQJT2>f=5e@-=l$6`bo#xZ{0@S`9X$qLZoGdopY9-=uOhRgh6u94XD z(<;h&`|mpa)359N_JWe%(eeFNJoDOk#?BbV&WVhjp*NWKU*>r@o3!$K;0>)GzFVh1 z3N+JC7^o(Yeq8=w=I%kv-JCye=(4eSeD7fWz|uF~)agT@q~Bp}w_DpWlX;d`IDmSL z=6U^y!#=0n*~)sAHk-QqEuGJ&Z|i)IY`5x_&|f7`uXx%uo_394&W^cZXvg68qs?|T zTkSe>kJdj2l;2Wo`)Ij_C;ceebOLP}e1mx|Gw(>v3t_&jRJQ!rG1*(^m}-J4SVXn@8Ux@<>g;Gxo(u)r|hA z|KWP*fORe5*gpDaj7eqL;kr+BJ=#I}9kRCd{WsTNa{ql&FLlyFuCute_Iw(qa-nhG zNx6)H@5!)@L&v$!x{7(`vJPFIGtXLNPe9H*yUSSbcj`Fm)ACuEdIxf*?Ji@D%BkZ> z+wspZHR$v1X}imKpVz75NE`JzzYc3#-`5Z1zFyx?M+dF@`e8?^VO-M;W1I~OTB}fje%ab}i>^cDK{^O)}Y z<$9d`!f>GgAK6;%UH73U^~L(I)FKARATovFLSMvYMbp#<{h^_tPMjgQm0-# z8xAoiv7H9(wCdS&yIUGr8zP^Ke!Q~=nfdhZrTUXkBE6N6&3%I`f&C zzUFVUSMKb7Xzw4cK8Mb;@^Nm5T{?_t@?2}|3saT5m>+jWsN&7NRAED;%3FPen!l>I znqJw5_g7dC3md9T8@$6@JBj}KYd7a2Y>A7!u92wI9~}G10~(yVXSX~b3oaC50re%hwJn) zu{?iCp1`&0Z`2XoJIb0@IvK_A60foAWHou`M3uPNrQ#bB)VS4&YSgMEb$n&AI;QXx z#&!wsl@+NzRyz6AAV%INYJFbCho0ly{VaBP2AwCny?a)J9e8coY-g?2-u0l=f3wxj zMWc273dia8Wsld}!{fDHIVgK3Bw6h+-(!jni_ae(Ha>Y+*fC*IVV$!(^%MISQf^QV z7k+J4IlHZL_koh%7oa>}QD^Th^H$|tvW~xpfeyUD?X{t`Y z1(bexV4B|c&(!RO3TMZA5 z;{I_Y=eSta&+5+($=`gw)%E_iUZdn4>dbI8cOh$rR({>KBWWY+^-+s>9|^MGrRcV8 z=~OQcS0k2m<{v zzTJ@D1GySy%pDyasiJdPqch)j=A8ttVMP8^ez^J)8hx48?HkQA7sf#}^LF$N`ut?; zj`oGCGoTD~1&@~Vi#%6hZFs~2^Ikk)>)yfsozV5@%CB!P#^T7Xa%V0LS1&-1v0pGO zoO=+)k~v0$;jX^sw*b~V>I?dr`ASZA_*tu37Oq|)o#3m`KF7BCJco6fK|D7aG>Fv~ z&ZA*{!&Ki~^6u01o_g3&Ic6T=nAvO|Ghy<4l=YqgybmIFG3&!Y6BffgY^X2q;P>HP zwzrDm{2!Azv>g>ahYtdW^Gv3Ycb+ScR=vaGWS3Hw{9}9MA^Rl<5Aws6VW*dG$K~fNr?y8qQfe*v^sJ|(i=y4~ zD?Cj3BA4%a<>$=5#WIb#!}=%Roy*Uu=i+^O`{x;`_se#24BrCY%vuZI@YuhF$H4PV zSAgd(_AiU){y?E<@3M}4q`RBGoM_B2cAs3YTZI_;zp~OG6^U{JU zwtpm21K-D1Ebof?!?nBCKU_GE3?0S8Pi&~i&~n7&!B;%F6qT11iaT^)z7^_0A{>^f zYMrAFC_^53)ARF#tXWhnw|o*<5r?>r)tcSQc9eP_`{$Hc4()x1DGHA~70Z$9^40ll zSzgX(OJF|R2YfnmE5eT%BQDR!yHuUBOw;rEywd#gBIN6kW54co%GBP>XO{+xi7H_R zyI9;+)=Yxx;e1~rRPdcrXTD~CEcW|Wr~x7?&NqjFMDVhEy#l%Pg_VI8^L?AqnTIwa zSM*4%mSy^ia{`5>LF(GSD_?e+LEcJ(glv+yYhC)z<_*y)W|ju3tV*2IrD+v*E0XV7 zKsVjdH)C~98H@ot@*XuWMoP$#ZVe)qv9k=jo~F5$ckRivJ-dr}8EarP{d@BV!~OhW_(do(N{NRcm?kt5y$rglo5-FaSD{OL}cc=yH5k>F=##9b!voY11y;LK*P)vvZqRI218F$NvSiJr+y z*wbl4UER(gPEIQpbmgjj%TSNZ&RmyB;0Md5QuBVYhn|=CW6qW?bL66DThU(7nNO$T zMXVx`n^|-lh)T`s)OVf$HXt<*8B9W4FxFzaMsl~&L*LBn%)xHV{4Po7)SH9MvZUPy zss)+!CsU<4SCpxlohkL?@d(=sI8sB_{t7cTVSK{L`4Z{$qh3tiGMAPX`4-aI(?g-4 zpRgr9^;9^0ni~%po6JwQfeQ%!`V2nX|AYy)bC{P|U1CM0Vrjn)8Ia5dp`lPGNq+Z{MY$1_9%%r?y znYQ4}w7E%%+CJss8HBPstK1*5!kX~iuhg>0PbC-7el;e1ZfQUsj)||Z3 zk|pZ2h=s&)D=T7{+lf^mJj3T(Zf#QfMm5V{7RU+Z6?Vz4317fKEerXAGmAJ)q*bDK zfn`lbc7Ev!5rxc}b)||>)AN^=mxZ*3R4uJisXB>irA0C;lhdNiv;}tS)+obB@XjT6 zr9b2;3*~z}XQfS_?#VibbE;vuzP~5ka}IAqh8bRLu(YVuin3Yc^GI}az|3Rj^o+FW zizTtYIN(W0l>DBKF#P!*V{{%y2$G<%`GqRcrwyv%t|+Fb8Wcc%_{PhEDaTUiN#qOwAHgjN9udOu5VsW z=qD1ieIe zCkmivmac~WKgY6aON6o17ow3==~A^ zXUvjnCv_QKCG|46@cjRq>BZP2!EW78_QoP>Y+vb2^)rtPJMAlspTg1=%zVzSDieX+5;81oR{T@9NMnoDRUs2J&kvCJ> z;j^V1?+72=q9E$O$IYwf``C{}Ab& z7pf$p7~NZ1<@J|PT&P^B%e0(|Emtcw?`N{^R5(-DEx**88|3mKkHFgAoRAgn-s`KB z*zR*J9i2C4ZoR#N^NYH)D^K-Mq%?qrhH`=-wZYo*bL~{qt<8C*A5G5+cKp_S#sJ{>@VSJLQNuySUDlKiBG3yD7u9| zSrMZ=Z&|>X=P%&AU1(m^A`eqi4YU8#>_YkGRT0*WGa3=&W-eHCCSlXlI&1!WRlwYk z$5RBaHzzMhmzL&w3(8CKhy-6yAj?zuJ5b&g1#;=)E#t1NAWtrG`8r3})nDq(WAUc6 z#9L9YBEXetsDL-FzqhYU?;?jvRluy4#~p-e17~_G`Do?UedI0h%eDG2<&_?@wyshI zD+ncwe8Bf|uu4%?W*Hpl#2cXD6|R>nlyEllKAPe_fo}?latR?kQb3}xqni! znxYui=7O0wkRwZAbOr~npNP>w&=)EXGM3OHGwX`UOa4So_Xf~HyG+pn^B@lSi+z)l zX*6q$bXzb9$&4?uxuvDc%LB8m@t9T;3Np8Dkv)t3!BBZl z(V`Me%n^7)Jo8bFOxZ=_ZpSs+fVTuFt#Q4|Wf1(BKcZK9VEqN1WVyv#^^XJFDSn%?p zLy7fo$KQIAoaOb;e4mSdX1Yj?ZRRU9^4&`Wa(uAx#$|7~KFqC0vT}InP=C&wyk7|y zrH)pE)L^(F%y+}oa5yV#3nV^iCmG%M$(D1UGnS3boBK&;qcaE5vE?T3$lu;}>X_v@ zbnI=r4)%6WwDWSxZQkA|dDt==o%`(l_C8aYe|FhD_e)y&IrGpGE$iGzxu%X)ey5!D zw%jbk-Ugk&PMse6MJ~UdX>GZ=&ymK=$J*|;?{O;+&?(nj-RtPwZU=<_&Tq{<-7Pmi}Dc(_ryR3ivttx+W<9`umTyK!6V=cF4yPuY{{MaEyE}059JS6hPv$tOk4u z8Y1#R$e~#H0Qd?dI_+S}V9k5LJ1xEzymtuu&}jy_=r{8bJPnex&bSp4&*C@3l-ht? za5A(L-UALD&bJ=$vEWzGK6v@4<7Maod^@-;M)Pf8-3ZEhm2yEl7KPw8+_zo~HmTmX|cn>6Lo58&nAA2HxexRMA50;L^ zpRnkIn?~Vt2)+e;5lZ2kCue*Ji5oF+4E01Vcsx`HFE|FOgcl50azXD{)~k^_V>d`l zhi1MN*^gXs6EuWxD$Val#%cRYkexBmXyh(%1|;>#0B?rUk+*{#mRtf14TD6-1&aU3 zh3JU?j=stGsS_T&@f23l{?2z};5Vu0!?QS`ZgX=LEpqT~1D_7?&EQef>38@j(2hSK zxB=?_I==G3$DvsG7O?3Ie0#t*gRf-LcKCL1%Oct$!))*k$UHW|OWM&#N3a2^h8KJSx(U7woUq3# z7kma1zsZ6hLCx@jhoDw?^^Q^(LXv+qxZC2}!PIxP+ymYPna735_G-D)4|5Z;XQ7wL zQ}AtQAACD_&U;!v8{A{@9pLi!wLAdc3GGMU>65t)*-;-THR4@-CxP*h3*H5`LvjoX z_S>h&a1>bbp;8`n0^kFXjO`ZiypMQy5P3G({xM@4UQql9XYEA}+Wv$EFZd^V$OS8* z1Mq^EK+*4EEAV+peA@~}e}TQ=1&@ao!aM!{ieKP=eaV|?$OXf`Vw}S}efgUHf4`=W zkk^9ipy%KP?}hfkJAL+wpWpYt!EVR}KZb_jD_8Jy$OZ58%`5(VBM)F76_4jO>j+P53DAFHjA9Eg0WN^Db~R zbT@LRk6Q6vd&ZGrYA15RbD_QPPM@^mtM-U~`07M1H~@;q2W%8*`=oXHo)w?8e}(3v zBe)sLh8Jvw0`N}1v*M3-JyeZcQ2ff?1n=}OD}HBpL0gauPKv_j@E-80VfamjuL0kS z#;-1X2e>B&JO2Y7TzjnMo5AD}VQMsT4|v3J^fi1GSarNkTYUn34N2d%fe*)O8@7O} zN7F9!Yr!+egsDP!r@vV7BYVVH>W^ITIH&>M1&$tvErbVGK`+DCfcq>yV?6CR89$lG z1=mfaKJd-prUdK{@AUiHj_ia)`r@N-6L-M7;02$B=E6IDxjKEniVxTtN3%h7wUr#Q^Dh=gsBnmf|pvn;2w+b0EfG& zGdfNmt>U}&fWeqSF1W&je+GELC!tmFf}^KvxeN53hEEpc*IpA6e*xnJ-sv0F z={MEsQ&oJcre%hy3HT`$^gyx_-BHoVhss`yh~fS=Tr$OVg_)$mT=sp3P` z4{bm$D1K0P!aMz;il5Z6OXzpxf+uES&rcXLpzRA)FbQWkCww+o zc%B}^0kG(NY=yiQJmI%`&WQzAL2}-#0dMhg42ll8I@=n1pzZTi@S|MD13H3>^RTUy z1@43-pEfYu$Jjvb^jGTiWhy>S=NB+;KMgn05AT8(tbsi6f)_2NE%4P~Orf5;V!^AI zS;sp#bh#x5M;75%7kwAl4z<7wUR!Ln5!_vZJv)$t$5&Dod@Oj*@2Jmbv;`b_F?NA> z`l1vcmFHf9J@Hd1SO$%TuLeI@$(#b;0dBsWwD2upuT|6oJ_`KnRkZPQ`UpID4La~n z--=FOiQ-T3u^Q$|bOf`nXHJF}yc1Gi&_`gyjT~R_PJf2t=g@O=mQSTJXhI#wvU}nEoVv4W9u{eV)3(d%&}I(f)796TIv1=)gCFz29K&g^vQ0 zb~BfKi#{0f7UK;*3S81o9pJOUtM@QR!`Fb1yu)$#FKh^=?4{1|9&pKf=)-4&+ux_( z;oHF3{|Mung}Q;_J1QIA=|f6h{2Ai`xnL7i3orOEv;p4fv*{gV+y2QoL@xLWv>RS< zFVq3=^wT6ho!*DkL9X|}Z=q;-!M^xpngH+g%_M%D27DQ&QjrUu2rYycbU}sif>%Rw zZ&(Aq4^<-X0Ox(B=eG<{ea+m9Tu`FkZideQy$9$EnWI44ACq7Ov=1G@5eGTnzze<( z^~Zma;OCay={HIIFjYb0&=I@=N{1JG7s`ScJWX*u4xa%Y8_qc8nj{u{C4zP!ZwGtz z;ob<|=@-fAGf8|YP3X%x2OYurPzSu=DM!*)o`ZV8nf>TD#UKY%#{x!`F7xh{au01J-N^$&m*gTvKL$g9EcpjLP_gzK~6 z;p#c~b}%|7+?*d`!I}}+Bn%%6VB09R;RRn##_tGxJNV!v>hvn@0soN3*bhe^JU^ZG z;)^C5teuC=-~~sYN$g&Dr@tHVgEQeQ(jphEf{Ng)!ILt>)kUHYHbHWY=k#qOK5r_Z z26P0kgSNoefc+L}{V34(dn33TdJY}Iub_5#wHW_Zka^z!gEOkjMouu=o(z zu`C=H$k-MPU#@lffZK`~2l$X_1@}WzuHe(fv>o|#;ItC-;l+Q5V+pr1+#+Txa^Nm7lF4xqO$?)fFysx z31ylWzhNgrqB9!&J(N#c@w;^hDw49wu{pE~z8$Q+1e?LPfiFVYA_oWlo_*vmG3+FM zo$wL^>M1CFFLMLf`%>lrc=5L^zLrHt{2hrOBjE+bPm%C~;*&^tLGd&6GWHkz+>%RN z@d8Nd9{^vq_;zsi6n;) zhs`cs^55!h_rEg7pK$(~R`+S%&-0fb{KOr{j=-%3Ibw_`*QLcjOVDjXSB;ed9In-uKtJM;qgw+SojH zK&$)C4|DI#nmE|F@aA{+^#7mj?&A&yk_Q|$)c9?J=g#)0w!6!3I#~Y4kA@k8?%jFO zy4vmT%IPP6RMHx4+&iuP@}(8q-C1o7Yp=ZZ7$c^lV0zq~?e2Bscb|3DvJpnn(2Z+9 zj^6IR>f4bi<0l_)99Q$Fu}{4BgnM1yOJ|*M=mcZqC7;~A{=O&N_g{Q#+~7U2#`qK7 zuiRMig!`f?>&`mx_=(1c(Z=%kras{|Zaw>{akq~&DmMLQW7>gj?vv_!-CR*V%Gmex zO9$V6WSjeMU!|qAo-x|^b7;@`TPxUq&%`wy$B!{;`+u1E(Zp@;%9pdxc;k~X#*Xv< z`?(9=Z*f1~uivrxkB&8}E{i$t{M%aG+eYlYa{0ftk5+;PW8tbTFMIAizD z-tnWpeBAwMV#CY7>o?x`uU*$Z_5Qra-Nh-N zgC!rmJE!zvcg(X@vzDGP(YWxHxf`yp-R$1CGH1u?jT4Q}wmh+5zzYw#*FR-=%0{`2 zxwBeNdGqT{?$`S6IpPk#%Q)}F`}0R0zsVhQ_tSgb_qvRwQyvQ*^lWtB_R8?0Ge2<| zm!Gkv>c;F1?r%1g+_Y^%g0btK_lNYl_(Av3^gnL7wlKj+T>txX%l`a;JNecd?^$1$ zV0_%S_>Ero-tYeV%e!wFv@5|V^gepy?v`fvn1BAyz|H*7*cweH}kf(=Pe&!nE7n_dM{} zWv>O3jLUXyu2?^Fwfpn%2T!^B@+9NQD<+LO!F`ka&u@Meoq9`>@sEeXHwRCtaaZoC zzx{)|lZ@1=9k2gs;VSot4TIKPeP5EX_RzJ@l})X7x28hq`H zU))~OG-cjPQ_Os-5>MK(=@0#;ym`}<#_P5&J9SmugA0m|-I>yI+?2JA*L^ZIXbR3q z$vz{+%r|Op)ck@qnSdroDaj zwCsDg9((Rqdpm3BZC8#8ZTG}JH8OW%N23S1yc)^Mwf=qf$oz+$w5I7dTb)#PMW0kB z#Y9e=T8 zP#4>s=181sR2CP-R`7ueUujH>OE}pT7t4p|rTM(NKP_(2!rAdDak1j(AwQ>xZ#t&M zRr$)|4EKN|Q~7S&SDaf^6-x#sWz*vL1ZHYkULoJ|m&F(R^Ma*ir3ImQK4+MkQ&xO( zMM7LGA3ypFcuNQe9$lr8Rcvf(D9Cr+b9n8{%5CINr!lHAx^Z=5ZDT`YbK~a5md2fp zZH>Dc+Z*>ab~Ns9R84)FqMD+cVw=V_r8Ie((wj1xvYN7+3Y!8=l}*)6tD0(>RyWl) zH8eFhZEk95+S%0Bw5zGTX9zor*>a$NA3PvRoAC3sxG=Nwr*UVt1hL^ zQb^WUPn)=oCwe=14&Gnn>Tk3b#x7F{e zZ?E51-%-E6UftE_uBf}B?@}$?7g4W1>tff9TLvk_mem_alloc.h

-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1618 /*
1619 Define this macro to 0/1 to disable/enable support for recording functionality,
1620 available through VmaAllocatorCreateInfo::pRecordSettings.
1621 */
1622 #ifndef VMA_RECORDING_ENABLED
1623  #ifdef _WIN32
1624  #define VMA_RECORDING_ENABLED 1
1625  #else
1626  #define VMA_RECORDING_ENABLED 0
1627  #endif
1628 #endif
1629 
1630 #ifndef NOMINMAX
1631  #define NOMINMAX // For windows.h
1632 #endif
1633 
1634 #ifndef VULKAN_H_
1635  #include <vulkan/vulkan.h>
1636 #endif
1637 
1638 #if VMA_RECORDING_ENABLED
1639  #include <windows.h>
1640 #endif
1641 
1642 #if !defined(VMA_DEDICATED_ALLOCATION)
1643  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1644  #define VMA_DEDICATED_ALLOCATION 1
1645  #else
1646  #define VMA_DEDICATED_ALLOCATION 0
1647  #endif
1648 #endif
1649 
1659 VK_DEFINE_HANDLE(VmaAllocator)
1660 
1661 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1663  VmaAllocator allocator,
1664  uint32_t memoryType,
1665  VkDeviceMemory memory,
1666  VkDeviceSize size);
1668 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1669  VmaAllocator allocator,
1670  uint32_t memoryType,
1671  VkDeviceMemory memory,
1672  VkDeviceSize size);
1673 
1687 
1717 
1720 typedef VkFlags VmaAllocatorCreateFlags;
1721 
1726 typedef struct VmaVulkanFunctions {
1727  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1728  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1729  PFN_vkAllocateMemory vkAllocateMemory;
1730  PFN_vkFreeMemory vkFreeMemory;
1731  PFN_vkMapMemory vkMapMemory;
1732  PFN_vkUnmapMemory vkUnmapMemory;
1733  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1734  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1735  PFN_vkBindBufferMemory vkBindBufferMemory;
1736  PFN_vkBindImageMemory vkBindImageMemory;
1737  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1738  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1739  PFN_vkCreateBuffer vkCreateBuffer;
1740  PFN_vkDestroyBuffer vkDestroyBuffer;
1741  PFN_vkCreateImage vkCreateImage;
1742  PFN_vkDestroyImage vkDestroyImage;
1743  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1744 #if VMA_DEDICATED_ALLOCATION
1745  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1746  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1747 #endif
1749 
1751 typedef enum VmaRecordFlagBits {
1758 
1761 typedef VkFlags VmaRecordFlags;
1762 
1764 typedef struct VmaRecordSettings
1765 {
1775  const char* pFilePath;
1777 
1780 {
1784 
1785  VkPhysicalDevice physicalDevice;
1787 
1788  VkDevice device;
1790 
1793 
1794  const VkAllocationCallbacks* pAllocationCallbacks;
1796 
1836  const VkDeviceSize* pHeapSizeLimit;
1857 
1859 VkResult vmaCreateAllocator(
1860  const VmaAllocatorCreateInfo* pCreateInfo,
1861  VmaAllocator* pAllocator);
1862 
1864 void vmaDestroyAllocator(
1865  VmaAllocator allocator);
1866 
1872  VmaAllocator allocator,
1873  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1874 
1880  VmaAllocator allocator,
1881  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1882 
1890  VmaAllocator allocator,
1891  uint32_t memoryTypeIndex,
1892  VkMemoryPropertyFlags* pFlags);
1893 
1903  VmaAllocator allocator,
1904  uint32_t frameIndex);
1905 
1908 typedef struct VmaStatInfo
1909 {
1911  uint32_t blockCount;
1917  VkDeviceSize usedBytes;
1919  VkDeviceSize unusedBytes;
1922 } VmaStatInfo;
1923 
1925 typedef struct VmaStats
1926 {
1927  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1928  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1930 } VmaStats;
1931 
1933 void vmaCalculateStats(
1934  VmaAllocator allocator,
1935  VmaStats* pStats);
1936 
1937 #define VMA_STATS_STRING_ENABLED 1
1938 
1939 #if VMA_STATS_STRING_ENABLED
1940 
1942 
1944 void vmaBuildStatsString(
1945  VmaAllocator allocator,
1946  char** ppStatsString,
1947  VkBool32 detailedMap);
1948 
1949 void vmaFreeStatsString(
1950  VmaAllocator allocator,
1951  char* pStatsString);
1952 
1953 #endif // #if VMA_STATS_STRING_ENABLED
1954 
1963 VK_DEFINE_HANDLE(VmaPool)
1964 
1965 typedef enum VmaMemoryUsage
1966 {
2015 } VmaMemoryUsage;
2016 
2031 
2086 
2102 
2112 
2119 
2123 
2125 {
2138  VkMemoryPropertyFlags requiredFlags;
2143  VkMemoryPropertyFlags preferredFlags;
2151  uint32_t memoryTypeBits;
2164  void* pUserData;
2166 
2183 VkResult vmaFindMemoryTypeIndex(
2184  VmaAllocator allocator,
2185  uint32_t memoryTypeBits,
2186  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2187  uint32_t* pMemoryTypeIndex);
2188 
2202  VmaAllocator allocator,
2203  const VkBufferCreateInfo* pBufferCreateInfo,
2204  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2205  uint32_t* pMemoryTypeIndex);
2206 
2220  VmaAllocator allocator,
2221  const VkImageCreateInfo* pImageCreateInfo,
2222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2223  uint32_t* pMemoryTypeIndex);
2224 
2245 
2262 
2273 
2279 
2282 typedef VkFlags VmaPoolCreateFlags;
2283 
2286 typedef struct VmaPoolCreateInfo {
2301  VkDeviceSize blockSize;
2330 
2333 typedef struct VmaPoolStats {
2336  VkDeviceSize size;
2339  VkDeviceSize unusedSize;
2352  VkDeviceSize unusedRangeSizeMax;
2355  size_t blockCount;
2356 } VmaPoolStats;
2357 
2364 VkResult vmaCreatePool(
2365  VmaAllocator allocator,
2366  const VmaPoolCreateInfo* pCreateInfo,
2367  VmaPool* pPool);
2368 
2371 void vmaDestroyPool(
2372  VmaAllocator allocator,
2373  VmaPool pool);
2374 
2381 void vmaGetPoolStats(
2382  VmaAllocator allocator,
2383  VmaPool pool,
2384  VmaPoolStats* pPoolStats);
2385 
2393  VmaAllocator allocator,
2394  VmaPool pool,
2395  size_t* pLostAllocationCount);
2396 
2411 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2412 
2437 VK_DEFINE_HANDLE(VmaAllocation)
2438 
2439 
2441 typedef struct VmaAllocationInfo {
2446  uint32_t memoryType;
2455  VkDeviceMemory deviceMemory;
2460  VkDeviceSize offset;
2465  VkDeviceSize size;
2479  void* pUserData;
2481 
2492 VkResult vmaAllocateMemory(
2493  VmaAllocator allocator,
2494  const VkMemoryRequirements* pVkMemoryRequirements,
2495  const VmaAllocationCreateInfo* pCreateInfo,
2496  VmaAllocation* pAllocation,
2497  VmaAllocationInfo* pAllocationInfo);
2498 
2506  VmaAllocator allocator,
2507  VkBuffer buffer,
2508  const VmaAllocationCreateInfo* pCreateInfo,
2509  VmaAllocation* pAllocation,
2510  VmaAllocationInfo* pAllocationInfo);
2511 
2513 VkResult vmaAllocateMemoryForImage(
2514  VmaAllocator allocator,
2515  VkImage image,
2516  const VmaAllocationCreateInfo* pCreateInfo,
2517  VmaAllocation* pAllocation,
2518  VmaAllocationInfo* pAllocationInfo);
2519 
2521 void vmaFreeMemory(
2522  VmaAllocator allocator,
2523  VmaAllocation allocation);
2524 
2545 VkResult vmaResizeAllocation(
2546  VmaAllocator allocator,
2547  VmaAllocation allocation,
2548  VkDeviceSize newSize);
2549 
2567  VmaAllocator allocator,
2568  VmaAllocation allocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2585 VkBool32 vmaTouchAllocation(
2586  VmaAllocator allocator,
2587  VmaAllocation allocation);
2588 
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  void* pUserData);
2606 
2618  VmaAllocator allocator,
2619  VmaAllocation* pAllocation);
2620 
2655 VkResult vmaMapMemory(
2656  VmaAllocator allocator,
2657  VmaAllocation allocation,
2658  void** ppData);
2659 
2664 void vmaUnmapMemory(
2665  VmaAllocator allocator,
2666  VmaAllocation allocation);
2667 
2680 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2681 
2694 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2695 
2712 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2713 
2720 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2721 
2722 typedef enum VmaDefragmentationFlagBits {
2726 typedef VkFlags VmaDefragmentationFlags;
2727 
2732 typedef struct VmaDefragmentationInfo2 {
2756  uint32_t poolCount;
2777  VkDeviceSize maxCpuBytesToMove;
2787  VkDeviceSize maxGpuBytesToMove;
2801  VkCommandBuffer commandBuffer;
2803 
2808 typedef struct VmaDefragmentationInfo {
2813  VkDeviceSize maxBytesToMove;
2820 
2822 typedef struct VmaDefragmentationStats {
2824  VkDeviceSize bytesMoved;
2826  VkDeviceSize bytesFreed;
2832 
2859 VkResult vmaDefragmentationBegin(
2860  VmaAllocator allocator,
2861  const VmaDefragmentationInfo2* pInfo,
2862  VmaDefragmentationStats* pStats,
2863  VmaDefragmentationContext *pContext);
2864 
2870 VkResult vmaDefragmentationEnd(
2871  VmaAllocator allocator,
2872  VmaDefragmentationContext context);
2873 
2914 VkResult vmaDefragment(
2915  VmaAllocator allocator,
2916  VmaAllocation* pAllocations,
2917  size_t allocationCount,
2918  VkBool32* pAllocationsChanged,
2919  const VmaDefragmentationInfo *pDefragmentationInfo,
2920  VmaDefragmentationStats* pDefragmentationStats);
2921 
2934 VkResult vmaBindBufferMemory(
2935  VmaAllocator allocator,
2936  VmaAllocation allocation,
2937  VkBuffer buffer);
2938 
2951 VkResult vmaBindImageMemory(
2952  VmaAllocator allocator,
2953  VmaAllocation allocation,
2954  VkImage image);
2955 
2982 VkResult vmaCreateBuffer(
2983  VmaAllocator allocator,
2984  const VkBufferCreateInfo* pBufferCreateInfo,
2985  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2986  VkBuffer* pBuffer,
2987  VmaAllocation* pAllocation,
2988  VmaAllocationInfo* pAllocationInfo);
2989 
3001 void vmaDestroyBuffer(
3002  VmaAllocator allocator,
3003  VkBuffer buffer,
3004  VmaAllocation allocation);
3005 
3007 VkResult vmaCreateImage(
3008  VmaAllocator allocator,
3009  const VkImageCreateInfo* pImageCreateInfo,
3010  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3011  VkImage* pImage,
3012  VmaAllocation* pAllocation,
3013  VmaAllocationInfo* pAllocationInfo);
3014 
3026 void vmaDestroyImage(
3027  VmaAllocator allocator,
3028  VkImage image,
3029  VmaAllocation allocation);
3030 
3031 #ifdef __cplusplus
3032 }
3033 #endif
3034 
3035 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3036 
3037 // For Visual Studio IntelliSense.
3038 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3039 #define VMA_IMPLEMENTATION
3040 #endif
3041 
3042 #ifdef VMA_IMPLEMENTATION
3043 #undef VMA_IMPLEMENTATION
3044 
3045 #include <cstdint>
3046 #include <cstdlib>
3047 #include <cstring>
3048 
3049 /*******************************************************************************
3050 CONFIGURATION SECTION
3051 
3052 Define some of these macros before each #include of this header or change them
3053 here if you need other then default behavior depending on your environment.
3054 */
3055 
3056 /*
3057 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3058 internally, like:
3059 
3060  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3061 
3062 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3063 VmaAllocatorCreateInfo::pVulkanFunctions.
3064 */
3065 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3066 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3067 #endif
3068 
3069 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3070 //#define VMA_USE_STL_CONTAINERS 1
3071 
3072 /* Set this macro to 1 to make the library including and using STL containers:
3073 std::pair, std::vector, std::list, std::unordered_map.
3074 
3075 Set it to 0 or undefined to make the library using its own implementation of
3076 the containers.
3077 */
3078 #if VMA_USE_STL_CONTAINERS
3079  #define VMA_USE_STL_VECTOR 1
3080  #define VMA_USE_STL_UNORDERED_MAP 1
3081  #define VMA_USE_STL_LIST 1
3082 #endif
3083 
3084 #ifndef VMA_USE_STL_SHARED_MUTEX
3085  // Minimum Visual Studio 2015 Update 2
3086  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3087  #define VMA_USE_STL_SHARED_MUTEX 1
3088  #endif
3089 #endif
3090 
3091 #if VMA_USE_STL_VECTOR
3092  #include <vector>
3093 #endif
3094 
3095 #if VMA_USE_STL_UNORDERED_MAP
3096  #include <unordered_map>
3097 #endif
3098 
3099 #if VMA_USE_STL_LIST
3100  #include <list>
3101 #endif
3102 
3103 /*
3104 Following headers are used in this CONFIGURATION section only, so feel free to
3105 remove them if not needed.
3106 */
3107 #include <cassert> // for assert
3108 #include <algorithm> // for min, max
3109 #include <mutex>
3110 #include <atomic> // for std::atomic
3111 
3112 #ifndef VMA_NULL
3113  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3114  #define VMA_NULL nullptr
3115 #endif
3116 
3117 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3118 #include <cstdlib>
3119 void *aligned_alloc(size_t alignment, size_t size)
3120 {
3121  // alignment must be >= sizeof(void*)
3122  if(alignment < sizeof(void*))
3123  {
3124  alignment = sizeof(void*);
3125  }
3126 
3127  return memalign(alignment, size);
3128 }
3129 #elif defined(__APPLE__) || defined(__ANDROID__)
3130 #include <cstdlib>
3131 void *aligned_alloc(size_t alignment, size_t size)
3132 {
3133  // alignment must be >= sizeof(void*)
3134  if(alignment < sizeof(void*))
3135  {
3136  alignment = sizeof(void*);
3137  }
3138 
3139  void *pointer;
3140  if(posix_memalign(&pointer, alignment, size) == 0)
3141  return pointer;
3142  return VMA_NULL;
3143 }
3144 #endif
3145 
3146 // If your compiler is not compatible with C++11 and definition of
3147 // aligned_alloc() function is missing, uncommeting following line may help:
3148 
3149 //#include <malloc.h>
3150 
3151 // Normal assert to check for programmer's errors, especially in Debug configuration.
3152 #ifndef VMA_ASSERT
3153  #ifdef _DEBUG
3154  #define VMA_ASSERT(expr) assert(expr)
3155  #else
3156  #define VMA_ASSERT(expr)
3157  #endif
3158 #endif
3159 
3160 // Assert that will be called very often, like inside data structures e.g. operator[].
3161 // Making it non-empty can make program slow.
3162 #ifndef VMA_HEAVY_ASSERT
3163  #ifdef _DEBUG
3164  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3165  #else
3166  #define VMA_HEAVY_ASSERT(expr)
3167  #endif
3168 #endif
3169 
3170 #ifndef VMA_ALIGN_OF
3171  #define VMA_ALIGN_OF(type) (__alignof(type))
3172 #endif
3173 
3174 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3175  #if defined(_WIN32)
3176  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3177  #else
3178  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3179  #endif
3180 #endif
3181 
3182 #ifndef VMA_SYSTEM_FREE
3183  #if defined(_WIN32)
3184  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3185  #else
3186  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3187  #endif
3188 #endif
3189 
3190 #ifndef VMA_MIN
3191  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3192 #endif
3193 
3194 #ifndef VMA_MAX
3195  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3196 #endif
3197 
3198 #ifndef VMA_SWAP
3199  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3200 #endif
3201 
3202 #ifndef VMA_SORT
3203  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3204 #endif
3205 
3206 #ifndef VMA_DEBUG_LOG
3207  #define VMA_DEBUG_LOG(format, ...)
3208  /*
3209  #define VMA_DEBUG_LOG(format, ...) do { \
3210  printf(format, __VA_ARGS__); \
3211  printf("\n"); \
3212  } while(false)
3213  */
3214 #endif
3215 
3216 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3217 #if VMA_STATS_STRING_ENABLED
3218  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3219  {
3220  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3221  }
3222  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3223  {
3224  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3225  }
3226  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3227  {
3228  snprintf(outStr, strLen, "%p", ptr);
3229  }
3230 #endif
3231 
3232 #ifndef VMA_MUTEX
3233  class VmaMutex
3234  {
3235  public:
3236  void Lock() { m_Mutex.lock(); }
3237  void Unlock() { m_Mutex.unlock(); }
3238  private:
3239  std::mutex m_Mutex;
3240  };
3241  #define VMA_MUTEX VmaMutex
3242 #endif
3243 
3244 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3245 #ifndef VMA_RW_MUTEX
3246  #if VMA_USE_STL_SHARED_MUTEX
3247  // Use std::shared_mutex from C++17.
3248  #include <shared_mutex>
3249  class VmaRWMutex
3250  {
3251  public:
3252  void LockRead() { m_Mutex.lock_shared(); }
3253  void UnlockRead() { m_Mutex.unlock_shared(); }
3254  void LockWrite() { m_Mutex.lock(); }
3255  void UnlockWrite() { m_Mutex.unlock(); }
3256  private:
3257  std::shared_mutex m_Mutex;
3258  };
3259  #define VMA_RW_MUTEX VmaRWMutex
3260  #elif defined(_WIN32)
3261  // Use SRWLOCK from WinAPI.
3262  class VmaRWMutex
3263  {
3264  public:
3265  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3266  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3267  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3268  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3269  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3270  private:
3271  SRWLOCK m_Lock;
3272  };
3273  #define VMA_RW_MUTEX VmaRWMutex
3274  #else
3275  // Less efficient fallback: Use normal mutex.
3276  class VmaRWMutex
3277  {
3278  public:
3279  void LockRead() { m_Mutex.Lock(); }
3280  void UnlockRead() { m_Mutex.Unlock(); }
3281  void LockWrite() { m_Mutex.Lock(); }
3282  void UnlockWrite() { m_Mutex.Unlock(); }
3283  private:
3284  VMA_MUTEX m_Mutex;
3285  };
3286  #define VMA_RW_MUTEX VmaRWMutex
3287  #endif // #if VMA_USE_STL_SHARED_MUTEX
3288 #endif // #ifndef VMA_RW_MUTEX
3289 
3290 /*
3291 If providing your own implementation, you need to implement a subset of std::atomic:
3292 
3293 - Constructor(uint32_t desired)
3294 - uint32_t load() const
3295 - void store(uint32_t desired)
3296 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3297 */
3298 #ifndef VMA_ATOMIC_UINT32
3299  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3300 #endif
3301 
3302 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3303 
3307  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3308 #endif
3309 
3310 #ifndef VMA_DEBUG_ALIGNMENT
3311 
3315  #define VMA_DEBUG_ALIGNMENT (1)
3316 #endif
3317 
3318 #ifndef VMA_DEBUG_MARGIN
3319 
3323  #define VMA_DEBUG_MARGIN (0)
3324 #endif
3325 
3326 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3327 
3331  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3332 #endif
3333 
3334 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3335 
3340  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3344 
3348  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3352 
3356  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3357 #endif
3358 
3359 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3360  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3362 #endif
3363 
3364 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3365  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3367 #endif
3368 
3369 #ifndef VMA_CLASS_NO_COPY
3370  #define VMA_CLASS_NO_COPY(className) \
3371  private: \
3372  className(const className&) = delete; \
3373  className& operator=(const className&) = delete;
3374 #endif
3375 
3376 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3377 
3378 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3379 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3380 
3381 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3382 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3383 
3384 /*******************************************************************************
3385 END OF CONFIGURATION
3386 */
3387 
3388 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3389 
3390 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3391  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3392 
3393 // Returns number of bits set to 1 in (v).
3394 static inline uint32_t VmaCountBitsSet(uint32_t v)
3395 {
3396  uint32_t c = v - ((v >> 1) & 0x55555555);
3397  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3398  c = ((c >> 4) + c) & 0x0F0F0F0F;
3399  c = ((c >> 8) + c) & 0x00FF00FF;
3400  c = ((c >> 16) + c) & 0x0000FFFF;
3401  return c;
3402 }
3403 
3404 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3405 // Use types like uint32_t, uint64_t as T.
3406 template <typename T>
3407 static inline T VmaAlignUp(T val, T align)
3408 {
3409  return (val + align - 1) / align * align;
3410 }
3411 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3412 // Use types like uint32_t, uint64_t as T.
3413 template <typename T>
3414 static inline T VmaAlignDown(T val, T align)
3415 {
3416  return val / align * align;
3417 }
3418 
3419 // Division with mathematical rounding to nearest number.
3420 template <typename T>
3421 static inline T VmaRoundDiv(T x, T y)
3422 {
3423  return (x + (y / (T)2)) / y;
3424 }
3425 
3426 /*
3427 Returns true if given number is a power of two.
3428 T must be unsigned integer number or signed integer but always nonnegative.
3429 For 0 returns true.
3430 */
3431 template <typename T>
3432 inline bool VmaIsPow2(T x)
3433 {
3434  return (x & (x-1)) == 0;
3435 }
3436 
3437 // Returns smallest power of 2 greater or equal to v.
3438 static inline uint32_t VmaNextPow2(uint32_t v)
3439 {
3440  v--;
3441  v |= v >> 1;
3442  v |= v >> 2;
3443  v |= v >> 4;
3444  v |= v >> 8;
3445  v |= v >> 16;
3446  v++;
3447  return v;
3448 }
3449 static inline uint64_t VmaNextPow2(uint64_t v)
3450 {
3451  v--;
3452  v |= v >> 1;
3453  v |= v >> 2;
3454  v |= v >> 4;
3455  v |= v >> 8;
3456  v |= v >> 16;
3457  v |= v >> 32;
3458  v++;
3459  return v;
3460 }
3461 
3462 // Returns largest power of 2 less or equal to v.
3463 static inline uint32_t VmaPrevPow2(uint32_t v)
3464 {
3465  v |= v >> 1;
3466  v |= v >> 2;
3467  v |= v >> 4;
3468  v |= v >> 8;
3469  v |= v >> 16;
3470  v = v ^ (v >> 1);
3471  return v;
3472 }
3473 static inline uint64_t VmaPrevPow2(uint64_t v)
3474 {
3475  v |= v >> 1;
3476  v |= v >> 2;
3477  v |= v >> 4;
3478  v |= v >> 8;
3479  v |= v >> 16;
3480  v |= v >> 32;
3481  v = v ^ (v >> 1);
3482  return v;
3483 }
3484 
3485 static inline bool VmaStrIsEmpty(const char* pStr)
3486 {
3487  return pStr == VMA_NULL || *pStr == '\0';
3488 }
3489 
3490 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3491 {
3492  switch(algorithm)
3493  {
3495  return "Linear";
3497  return "Buddy";
3498  case 0:
3499  return "Default";
3500  default:
3501  VMA_ASSERT(0);
3502  return "";
3503  }
3504 }
3505 
3506 #ifndef VMA_SORT
3507 
3508 template<typename Iterator, typename Compare>
3509 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3510 {
3511  Iterator centerValue = end; --centerValue;
3512  Iterator insertIndex = beg;
3513  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3514  {
3515  if(cmp(*memTypeIndex, *centerValue))
3516  {
3517  if(insertIndex != memTypeIndex)
3518  {
3519  VMA_SWAP(*memTypeIndex, *insertIndex);
3520  }
3521  ++insertIndex;
3522  }
3523  }
3524  if(insertIndex != centerValue)
3525  {
3526  VMA_SWAP(*insertIndex, *centerValue);
3527  }
3528  return insertIndex;
3529 }
3530 
3531 template<typename Iterator, typename Compare>
3532 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3533 {
3534  if(beg < end)
3535  {
3536  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3537  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3538  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3539  }
3540 }
3541 
3542 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3543 
3544 #endif // #ifndef VMA_SORT
3545 
3546 /*
3547 Returns true if two memory blocks occupy overlapping pages.
3548 ResourceA must be in less memory offset than ResourceB.
3549 
3550 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3551 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3552 */
3553 static inline bool VmaBlocksOnSamePage(
3554  VkDeviceSize resourceAOffset,
3555  VkDeviceSize resourceASize,
3556  VkDeviceSize resourceBOffset,
3557  VkDeviceSize pageSize)
3558 {
3559  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3560  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3561  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3562  VkDeviceSize resourceBStart = resourceBOffset;
3563  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3564  return resourceAEndPage == resourceBStartPage;
3565 }
3566 
3567 enum VmaSuballocationType
3568 {
3569  VMA_SUBALLOCATION_TYPE_FREE = 0,
3570  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3571  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3572  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3573  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3574  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3575  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3576 };
3577 
3578 /*
3579 Returns true if given suballocation types could conflict and must respect
3580 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3581 or linear image and another one is optimal image. If type is unknown, behave
3582 conservatively.
3583 */
3584 static inline bool VmaIsBufferImageGranularityConflict(
3585  VmaSuballocationType suballocType1,
3586  VmaSuballocationType suballocType2)
3587 {
3588  if(suballocType1 > suballocType2)
3589  {
3590  VMA_SWAP(suballocType1, suballocType2);
3591  }
3592 
3593  switch(suballocType1)
3594  {
3595  case VMA_SUBALLOCATION_TYPE_FREE:
3596  return false;
3597  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3598  return true;
3599  case VMA_SUBALLOCATION_TYPE_BUFFER:
3600  return
3601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3603  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3604  return
3605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3606  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3607  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3608  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3609  return
3610  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3611  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3612  return false;
3613  default:
3614  VMA_ASSERT(0);
3615  return true;
3616  }
3617 }
3618 
3619 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3620 {
3621  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3622  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3623  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3624  {
3625  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3626  }
3627 }
3628 
3629 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3630 {
3631  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3632  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3633  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3634  {
3635  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3636  {
3637  return false;
3638  }
3639  }
3640  return true;
3641 }
3642 
3643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3644 struct VmaMutexLock
3645 {
3646  VMA_CLASS_NO_COPY(VmaMutexLock)
3647 public:
3648  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3649  m_pMutex(useMutex ? &mutex : VMA_NULL)
3650  { if(m_pMutex) { m_pMutex->Lock(); } }
3651  ~VmaMutexLock()
3652  { if(m_pMutex) { m_pMutex->Unlock(); } }
3653 private:
3654  VMA_MUTEX* m_pMutex;
3655 };
3656 
3657 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3658 struct VmaMutexLockRead
3659 {
3660  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3661 public:
3662  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3663  m_pMutex(useMutex ? &mutex : VMA_NULL)
3664  { if(m_pMutex) { m_pMutex->LockRead(); } }
3665  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3666 private:
3667  VMA_RW_MUTEX* m_pMutex;
3668 };
3669 
3670 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3671 struct VmaMutexLockWrite
3672 {
3673  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3674 public:
3675  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3676  m_pMutex(useMutex ? &mutex : VMA_NULL)
3677  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3678  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3679 private:
3680  VMA_RW_MUTEX* m_pMutex;
3681 };
3682 
3683 #if VMA_DEBUG_GLOBAL_MUTEX
3684  static VMA_MUTEX gDebugGlobalMutex;
3685  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3686 #else
3687  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3688 #endif
3689 
3690 // Minimum size of a free suballocation to register it in the free suballocation collection.
3691 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3692 
3693 /*
3694 Performs binary search and returns iterator to first element that is greater or
3695 equal to (key), according to comparison (cmp).
3696 
3697 Cmp should return true if first argument is less than second argument.
3698 
3699 Returned value is the found element, if present in the collection or place where
3700 new element with value (key) should be inserted.
3701 */
3702 template <typename CmpLess, typename IterT, typename KeyT>
3703 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3704 {
3705  size_t down = 0, up = (end - beg);
3706  while(down < up)
3707  {
3708  const size_t mid = (down + up) / 2;
3709  if(cmp(*(beg+mid), key))
3710  {
3711  down = mid + 1;
3712  }
3713  else
3714  {
3715  up = mid;
3716  }
3717  }
3718  return beg + down;
3719 }
3720 
3722 // Memory allocation
3723 
3724 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3725 {
3726  if((pAllocationCallbacks != VMA_NULL) &&
3727  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3728  {
3729  return (*pAllocationCallbacks->pfnAllocation)(
3730  pAllocationCallbacks->pUserData,
3731  size,
3732  alignment,
3733  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3734  }
3735  else
3736  {
3737  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3738  }
3739 }
3740 
3741 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3742 {
3743  if((pAllocationCallbacks != VMA_NULL) &&
3744  (pAllocationCallbacks->pfnFree != VMA_NULL))
3745  {
3746  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3747  }
3748  else
3749  {
3750  VMA_SYSTEM_FREE(ptr);
3751  }
3752 }
3753 
3754 template<typename T>
3755 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3756 {
3757  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3758 }
3759 
3760 template<typename T>
3761 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3762 {
3763  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3764 }
3765 
3766 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3767 
3768 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3769 
3770 template<typename T>
3771 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3772 {
3773  ptr->~T();
3774  VmaFree(pAllocationCallbacks, ptr);
3775 }
3776 
3777 template<typename T>
3778 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3779 {
3780  if(ptr != VMA_NULL)
3781  {
3782  for(size_t i = count; i--; )
3783  {
3784  ptr[i].~T();
3785  }
3786  VmaFree(pAllocationCallbacks, ptr);
3787  }
3788 }
3789 
3790 // STL-compatible allocator.
3791 template<typename T>
3792 class VmaStlAllocator
3793 {
3794 public:
3795  const VkAllocationCallbacks* const m_pCallbacks;
3796  typedef T value_type;
3797 
3798  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3799  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3800 
3801  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3802  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3803 
3804  template<typename U>
3805  bool operator==(const VmaStlAllocator<U>& rhs) const
3806  {
3807  return m_pCallbacks == rhs.m_pCallbacks;
3808  }
3809  template<typename U>
3810  bool operator!=(const VmaStlAllocator<U>& rhs) const
3811  {
3812  return m_pCallbacks != rhs.m_pCallbacks;
3813  }
3814 
3815  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3816 };
3817 
3818 #if VMA_USE_STL_VECTOR
3819 
3820 #define VmaVector std::vector
3821 
3822 template<typename T, typename allocatorT>
3823 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3824 {
3825  vec.insert(vec.begin() + index, item);
3826 }
3827 
3828 template<typename T, typename allocatorT>
3829 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3830 {
3831  vec.erase(vec.begin() + index);
3832 }
3833 
3834 #else // #if VMA_USE_STL_VECTOR
3835 
3836 /* Class with interface compatible with subset of std::vector.
3837 T must be POD because constructors and destructors are not called and memcpy is
3838 used for these objects. */
3839 template<typename T, typename AllocatorT>
3840 class VmaVector
3841 {
3842 public:
3843  typedef T value_type;
3844 
3845  VmaVector(const AllocatorT& allocator) :
3846  m_Allocator(allocator),
3847  m_pArray(VMA_NULL),
3848  m_Count(0),
3849  m_Capacity(0)
3850  {
3851  }
3852 
3853  VmaVector(size_t count, const AllocatorT& allocator) :
3854  m_Allocator(allocator),
3855  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3856  m_Count(count),
3857  m_Capacity(count)
3858  {
3859  }
3860 
3861  VmaVector(const VmaVector<T, AllocatorT>& src) :
3862  m_Allocator(src.m_Allocator),
3863  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3864  m_Count(src.m_Count),
3865  m_Capacity(src.m_Count)
3866  {
3867  if(m_Count != 0)
3868  {
3869  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3870  }
3871  }
3872 
3873  ~VmaVector()
3874  {
3875  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3876  }
3877 
3878  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3879  {
3880  if(&rhs != this)
3881  {
3882  resize(rhs.m_Count);
3883  if(m_Count != 0)
3884  {
3885  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3886  }
3887  }
3888  return *this;
3889  }
3890 
3891  bool empty() const { return m_Count == 0; }
3892  size_t size() const { return m_Count; }
3893  T* data() { return m_pArray; }
3894  const T* data() const { return m_pArray; }
3895 
3896  T& operator[](size_t index)
3897  {
3898  VMA_HEAVY_ASSERT(index < m_Count);
3899  return m_pArray[index];
3900  }
3901  const T& operator[](size_t index) const
3902  {
3903  VMA_HEAVY_ASSERT(index < m_Count);
3904  return m_pArray[index];
3905  }
3906 
3907  T& front()
3908  {
3909  VMA_HEAVY_ASSERT(m_Count > 0);
3910  return m_pArray[0];
3911  }
3912  const T& front() const
3913  {
3914  VMA_HEAVY_ASSERT(m_Count > 0);
3915  return m_pArray[0];
3916  }
3917  T& back()
3918  {
3919  VMA_HEAVY_ASSERT(m_Count > 0);
3920  return m_pArray[m_Count - 1];
3921  }
3922  const T& back() const
3923  {
3924  VMA_HEAVY_ASSERT(m_Count > 0);
3925  return m_pArray[m_Count - 1];
3926  }
3927 
3928  void reserve(size_t newCapacity, bool freeMemory = false)
3929  {
3930  newCapacity = VMA_MAX(newCapacity, m_Count);
3931 
3932  if((newCapacity < m_Capacity) && !freeMemory)
3933  {
3934  newCapacity = m_Capacity;
3935  }
3936 
3937  if(newCapacity != m_Capacity)
3938  {
3939  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3940  if(m_Count != 0)
3941  {
3942  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3943  }
3944  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3945  m_Capacity = newCapacity;
3946  m_pArray = newArray;
3947  }
3948  }
3949 
3950  void resize(size_t newCount, bool freeMemory = false)
3951  {
3952  size_t newCapacity = m_Capacity;
3953  if(newCount > m_Capacity)
3954  {
3955  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3956  }
3957  else if(freeMemory)
3958  {
3959  newCapacity = newCount;
3960  }
3961 
3962  if(newCapacity != m_Capacity)
3963  {
3964  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3965  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3966  if(elementsToCopy != 0)
3967  {
3968  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3969  }
3970  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3971  m_Capacity = newCapacity;
3972  m_pArray = newArray;
3973  }
3974 
3975  m_Count = newCount;
3976  }
3977 
3978  void clear(bool freeMemory = false)
3979  {
3980  resize(0, freeMemory);
3981  }
3982 
3983  void insert(size_t index, const T& src)
3984  {
3985  VMA_HEAVY_ASSERT(index <= m_Count);
3986  const size_t oldCount = size();
3987  resize(oldCount + 1);
3988  if(index < oldCount)
3989  {
3990  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3991  }
3992  m_pArray[index] = src;
3993  }
3994 
3995  void remove(size_t index)
3996  {
3997  VMA_HEAVY_ASSERT(index < m_Count);
3998  const size_t oldCount = size();
3999  if(index < oldCount - 1)
4000  {
4001  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4002  }
4003  resize(oldCount - 1);
4004  }
4005 
4006  void push_back(const T& src)
4007  {
4008  const size_t newIndex = size();
4009  resize(newIndex + 1);
4010  m_pArray[newIndex] = src;
4011  }
4012 
4013  void pop_back()
4014  {
4015  VMA_HEAVY_ASSERT(m_Count > 0);
4016  resize(size() - 1);
4017  }
4018 
4019  void push_front(const T& src)
4020  {
4021  insert(0, src);
4022  }
4023 
4024  void pop_front()
4025  {
4026  VMA_HEAVY_ASSERT(m_Count > 0);
4027  remove(0);
4028  }
4029 
4030  typedef T* iterator;
4031 
4032  iterator begin() { return m_pArray; }
4033  iterator end() { return m_pArray + m_Count; }
4034 
4035 private:
4036  AllocatorT m_Allocator;
4037  T* m_pArray;
4038  size_t m_Count;
4039  size_t m_Capacity;
4040 };
4041 
4042 template<typename T, typename allocatorT>
4043 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4044 {
4045  vec.insert(index, item);
4046 }
4047 
4048 template<typename T, typename allocatorT>
4049 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4050 {
4051  vec.remove(index);
4052 }
4053 
4054 #endif // #if VMA_USE_STL_VECTOR
4055 
4056 template<typename CmpLess, typename VectorT>
4057 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4058 {
4059  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4060  vector.data(),
4061  vector.data() + vector.size(),
4062  value,
4063  CmpLess()) - vector.data();
4064  VmaVectorInsert(vector, indexToInsert, value);
4065  return indexToInsert;
4066 }
4067 
4068 template<typename CmpLess, typename VectorT>
4069 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4070 {
4071  CmpLess comparator;
4072  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4073  vector.begin(),
4074  vector.end(),
4075  value,
4076  comparator);
4077  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4078  {
4079  size_t indexToRemove = it - vector.begin();
4080  VmaVectorRemove(vector, indexToRemove);
4081  return true;
4082  }
4083  return false;
4084 }
4085 
4086 template<typename CmpLess, typename IterT, typename KeyT>
4087 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4088 {
4089  CmpLess comparator;
4090  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4091  beg, end, value, comparator);
4092  if(it == end ||
4093  (!comparator(*it, value) && !comparator(value, *it)))
4094  {
4095  return it;
4096  }
4097  return end;
4098 }
4099 
4101 // class VmaPoolAllocator
4102 
4103 /*
4104 Allocator for objects of type T using a list of arrays (pools) to speed up
4105 allocation. Number of elements that can be allocated is not bounded because
4106 allocator can create multiple blocks.
4107 */
4108 template<typename T>
4109 class VmaPoolAllocator
4110 {
4111  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4112 public:
4113  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4114  ~VmaPoolAllocator();
4115  void Clear();
4116  T* Alloc();
4117  void Free(T* ptr);
4118 
4119 private:
4120  union Item
4121  {
4122  uint32_t NextFreeIndex;
4123  T Value;
4124  };
4125 
4126  struct ItemBlock
4127  {
4128  Item* pItems;
4129  uint32_t FirstFreeIndex;
4130  };
4131 
4132  const VkAllocationCallbacks* m_pAllocationCallbacks;
4133  size_t m_ItemsPerBlock;
4134  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4135 
4136  ItemBlock& CreateNewBlock();
4137 };
4138 
4139 template<typename T>
4140 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4141  m_pAllocationCallbacks(pAllocationCallbacks),
4142  m_ItemsPerBlock(itemsPerBlock),
4143  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4144 {
4145  VMA_ASSERT(itemsPerBlock > 0);
4146 }
4147 
4148 template<typename T>
4149 VmaPoolAllocator<T>::~VmaPoolAllocator()
4150 {
4151  Clear();
4152 }
4153 
4154 template<typename T>
4155 void VmaPoolAllocator<T>::Clear()
4156 {
4157  for(size_t i = m_ItemBlocks.size(); i--; )
4158  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4159  m_ItemBlocks.clear();
4160 }
4161 
4162 template<typename T>
4163 T* VmaPoolAllocator<T>::Alloc()
4164 {
4165  for(size_t i = m_ItemBlocks.size(); i--; )
4166  {
4167  ItemBlock& block = m_ItemBlocks[i];
4168  // This block has some free items: Use first one.
4169  if(block.FirstFreeIndex != UINT32_MAX)
4170  {
4171  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4172  block.FirstFreeIndex = pItem->NextFreeIndex;
4173  return &pItem->Value;
4174  }
4175  }
4176 
4177  // No block has free item: Create new one and use it.
4178  ItemBlock& newBlock = CreateNewBlock();
4179  Item* const pItem = &newBlock.pItems[0];
4180  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4181  return &pItem->Value;
4182 }
4183 
4184 template<typename T>
4185 void VmaPoolAllocator<T>::Free(T* ptr)
4186 {
4187  // Search all memory blocks to find ptr.
4188  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4189  {
4190  ItemBlock& block = m_ItemBlocks[i];
4191 
4192  // Casting to union.
4193  Item* pItemPtr;
4194  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4195 
4196  // Check if pItemPtr is in address range of this block.
4197  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4198  {
4199  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4200  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4201  block.FirstFreeIndex = index;
4202  return;
4203  }
4204  }
4205  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4206 }
4207 
4208 template<typename T>
4209 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4210 {
4211  ItemBlock newBlock = {
4212  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4213 
4214  m_ItemBlocks.push_back(newBlock);
4215 
4216  // Setup singly-linked list of all free items in this block.
4217  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4218  newBlock.pItems[i].NextFreeIndex = i + 1;
4219  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4220  return m_ItemBlocks.back();
4221 }
4222 
4224 // class VmaRawList, VmaList
4225 
4226 #if VMA_USE_STL_LIST
4227 
4228 #define VmaList std::list
4229 
4230 #else // #if VMA_USE_STL_LIST
4231 
4232 template<typename T>
4233 struct VmaListItem
4234 {
4235  VmaListItem* pPrev;
4236  VmaListItem* pNext;
4237  T Value;
4238 };
4239 
4240 // Doubly linked list.
4241 template<typename T>
4242 class VmaRawList
4243 {
4244  VMA_CLASS_NO_COPY(VmaRawList)
4245 public:
4246  typedef VmaListItem<T> ItemType;
4247 
4248  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4249  ~VmaRawList();
4250  void Clear();
4251 
4252  size_t GetCount() const { return m_Count; }
4253  bool IsEmpty() const { return m_Count == 0; }
4254 
4255  ItemType* Front() { return m_pFront; }
4256  const ItemType* Front() const { return m_pFront; }
4257  ItemType* Back() { return m_pBack; }
4258  const ItemType* Back() const { return m_pBack; }
4259 
4260  ItemType* PushBack();
4261  ItemType* PushFront();
4262  ItemType* PushBack(const T& value);
4263  ItemType* PushFront(const T& value);
4264  void PopBack();
4265  void PopFront();
4266 
4267  // Item can be null - it means PushBack.
4268  ItemType* InsertBefore(ItemType* pItem);
4269  // Item can be null - it means PushFront.
4270  ItemType* InsertAfter(ItemType* pItem);
4271 
4272  ItemType* InsertBefore(ItemType* pItem, const T& value);
4273  ItemType* InsertAfter(ItemType* pItem, const T& value);
4274 
4275  void Remove(ItemType* pItem);
4276 
4277 private:
4278  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4279  VmaPoolAllocator<ItemType> m_ItemAllocator;
4280  ItemType* m_pFront;
4281  ItemType* m_pBack;
4282  size_t m_Count;
4283 };
4284 
4285 template<typename T>
4286 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4287  m_pAllocationCallbacks(pAllocationCallbacks),
4288  m_ItemAllocator(pAllocationCallbacks, 128),
4289  m_pFront(VMA_NULL),
4290  m_pBack(VMA_NULL),
4291  m_Count(0)
4292 {
4293 }
4294 
4295 template<typename T>
4296 VmaRawList<T>::~VmaRawList()
4297 {
4298  // Intentionally not calling Clear, because that would be unnecessary
4299  // computations to return all items to m_ItemAllocator as free.
4300 }
4301 
4302 template<typename T>
4303 void VmaRawList<T>::Clear()
4304 {
4305  if(IsEmpty() == false)
4306  {
4307  ItemType* pItem = m_pBack;
4308  while(pItem != VMA_NULL)
4309  {
4310  ItemType* const pPrevItem = pItem->pPrev;
4311  m_ItemAllocator.Free(pItem);
4312  pItem = pPrevItem;
4313  }
4314  m_pFront = VMA_NULL;
4315  m_pBack = VMA_NULL;
4316  m_Count = 0;
4317  }
4318 }
4319 
4320 template<typename T>
4321 VmaListItem<T>* VmaRawList<T>::PushBack()
4322 {
4323  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4324  pNewItem->pNext = VMA_NULL;
4325  if(IsEmpty())
4326  {
4327  pNewItem->pPrev = VMA_NULL;
4328  m_pFront = pNewItem;
4329  m_pBack = pNewItem;
4330  m_Count = 1;
4331  }
4332  else
4333  {
4334  pNewItem->pPrev = m_pBack;
4335  m_pBack->pNext = pNewItem;
4336  m_pBack = pNewItem;
4337  ++m_Count;
4338  }
4339  return pNewItem;
4340 }
4341 
4342 template<typename T>
4343 VmaListItem<T>* VmaRawList<T>::PushFront()
4344 {
4345  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4346  pNewItem->pPrev = VMA_NULL;
4347  if(IsEmpty())
4348  {
4349  pNewItem->pNext = VMA_NULL;
4350  m_pFront = pNewItem;
4351  m_pBack = pNewItem;
4352  m_Count = 1;
4353  }
4354  else
4355  {
4356  pNewItem->pNext = m_pFront;
4357  m_pFront->pPrev = pNewItem;
4358  m_pFront = pNewItem;
4359  ++m_Count;
4360  }
4361  return pNewItem;
4362 }
4363 
4364 template<typename T>
4365 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4366 {
4367  ItemType* const pNewItem = PushBack();
4368  pNewItem->Value = value;
4369  return pNewItem;
4370 }
4371 
4372 template<typename T>
4373 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4374 {
4375  ItemType* const pNewItem = PushFront();
4376  pNewItem->Value = value;
4377  return pNewItem;
4378 }
4379 
4380 template<typename T>
4381 void VmaRawList<T>::PopBack()
4382 {
4383  VMA_HEAVY_ASSERT(m_Count > 0);
4384  ItemType* const pBackItem = m_pBack;
4385  ItemType* const pPrevItem = pBackItem->pPrev;
4386  if(pPrevItem != VMA_NULL)
4387  {
4388  pPrevItem->pNext = VMA_NULL;
4389  }
4390  m_pBack = pPrevItem;
4391  m_ItemAllocator.Free(pBackItem);
4392  --m_Count;
4393 }
4394 
4395 template<typename T>
4396 void VmaRawList<T>::PopFront()
4397 {
4398  VMA_HEAVY_ASSERT(m_Count > 0);
4399  ItemType* const pFrontItem = m_pFront;
4400  ItemType* const pNextItem = pFrontItem->pNext;
4401  if(pNextItem != VMA_NULL)
4402  {
4403  pNextItem->pPrev = VMA_NULL;
4404  }
4405  m_pFront = pNextItem;
4406  m_ItemAllocator.Free(pFrontItem);
4407  --m_Count;
4408 }
4409 
4410 template<typename T>
4411 void VmaRawList<T>::Remove(ItemType* pItem)
4412 {
4413  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4414  VMA_HEAVY_ASSERT(m_Count > 0);
4415 
4416  if(pItem->pPrev != VMA_NULL)
4417  {
4418  pItem->pPrev->pNext = pItem->pNext;
4419  }
4420  else
4421  {
4422  VMA_HEAVY_ASSERT(m_pFront == pItem);
4423  m_pFront = pItem->pNext;
4424  }
4425 
4426  if(pItem->pNext != VMA_NULL)
4427  {
4428  pItem->pNext->pPrev = pItem->pPrev;
4429  }
4430  else
4431  {
4432  VMA_HEAVY_ASSERT(m_pBack == pItem);
4433  m_pBack = pItem->pPrev;
4434  }
4435 
4436  m_ItemAllocator.Free(pItem);
4437  --m_Count;
4438 }
4439 
4440 template<typename T>
4441 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4442 {
4443  if(pItem != VMA_NULL)
4444  {
4445  ItemType* const prevItem = pItem->pPrev;
4446  ItemType* const newItem = m_ItemAllocator.Alloc();
4447  newItem->pPrev = prevItem;
4448  newItem->pNext = pItem;
4449  pItem->pPrev = newItem;
4450  if(prevItem != VMA_NULL)
4451  {
4452  prevItem->pNext = newItem;
4453  }
4454  else
4455  {
4456  VMA_HEAVY_ASSERT(m_pFront == pItem);
4457  m_pFront = newItem;
4458  }
4459  ++m_Count;
4460  return newItem;
4461  }
4462  else
4463  return PushBack();
4464 }
4465 
4466 template<typename T>
4467 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4468 {
4469  if(pItem != VMA_NULL)
4470  {
4471  ItemType* const nextItem = pItem->pNext;
4472  ItemType* const newItem = m_ItemAllocator.Alloc();
4473  newItem->pNext = nextItem;
4474  newItem->pPrev = pItem;
4475  pItem->pNext = newItem;
4476  if(nextItem != VMA_NULL)
4477  {
4478  nextItem->pPrev = newItem;
4479  }
4480  else
4481  {
4482  VMA_HEAVY_ASSERT(m_pBack == pItem);
4483  m_pBack = newItem;
4484  }
4485  ++m_Count;
4486  return newItem;
4487  }
4488  else
4489  return PushFront();
4490 }
4491 
4492 template<typename T>
4493 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4494 {
4495  ItemType* const newItem = InsertBefore(pItem);
4496  newItem->Value = value;
4497  return newItem;
4498 }
4499 
4500 template<typename T>
4501 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4502 {
4503  ItemType* const newItem = InsertAfter(pItem);
4504  newItem->Value = value;
4505  return newItem;
4506 }
4507 
4508 template<typename T, typename AllocatorT>
4509 class VmaList
4510 {
4511  VMA_CLASS_NO_COPY(VmaList)
4512 public:
4513  class iterator
4514  {
4515  public:
4516  iterator() :
4517  m_pList(VMA_NULL),
4518  m_pItem(VMA_NULL)
4519  {
4520  }
4521 
4522  T& operator*() const
4523  {
4524  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4525  return m_pItem->Value;
4526  }
4527  T* operator->() const
4528  {
4529  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4530  return &m_pItem->Value;
4531  }
4532 
4533  iterator& operator++()
4534  {
4535  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4536  m_pItem = m_pItem->pNext;
4537  return *this;
4538  }
4539  iterator& operator--()
4540  {
4541  if(m_pItem != VMA_NULL)
4542  {
4543  m_pItem = m_pItem->pPrev;
4544  }
4545  else
4546  {
4547  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4548  m_pItem = m_pList->Back();
4549  }
4550  return *this;
4551  }
4552 
4553  iterator operator++(int)
4554  {
4555  iterator result = *this;
4556  ++*this;
4557  return result;
4558  }
4559  iterator operator--(int)
4560  {
4561  iterator result = *this;
4562  --*this;
4563  return result;
4564  }
4565 
4566  bool operator==(const iterator& rhs) const
4567  {
4568  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4569  return m_pItem == rhs.m_pItem;
4570  }
4571  bool operator!=(const iterator& rhs) const
4572  {
4573  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4574  return m_pItem != rhs.m_pItem;
4575  }
4576 
4577  private:
4578  VmaRawList<T>* m_pList;
4579  VmaListItem<T>* m_pItem;
4580 
4581  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4582  m_pList(pList),
4583  m_pItem(pItem)
4584  {
4585  }
4586 
4587  friend class VmaList<T, AllocatorT>;
4588  };
4589 
4590  class const_iterator
4591  {
4592  public:
4593  const_iterator() :
4594  m_pList(VMA_NULL),
4595  m_pItem(VMA_NULL)
4596  {
4597  }
4598 
4599  const_iterator(const iterator& src) :
4600  m_pList(src.m_pList),
4601  m_pItem(src.m_pItem)
4602  {
4603  }
4604 
4605  const T& operator*() const
4606  {
4607  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4608  return m_pItem->Value;
4609  }
4610  const T* operator->() const
4611  {
4612  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4613  return &m_pItem->Value;
4614  }
4615 
4616  const_iterator& operator++()
4617  {
4618  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4619  m_pItem = m_pItem->pNext;
4620  return *this;
4621  }
4622  const_iterator& operator--()
4623  {
4624  if(m_pItem != VMA_NULL)
4625  {
4626  m_pItem = m_pItem->pPrev;
4627  }
4628  else
4629  {
4630  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4631  m_pItem = m_pList->Back();
4632  }
4633  return *this;
4634  }
4635 
4636  const_iterator operator++(int)
4637  {
4638  const_iterator result = *this;
4639  ++*this;
4640  return result;
4641  }
4642  const_iterator operator--(int)
4643  {
4644  const_iterator result = *this;
4645  --*this;
4646  return result;
4647  }
4648 
4649  bool operator==(const const_iterator& rhs) const
4650  {
4651  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4652  return m_pItem == rhs.m_pItem;
4653  }
4654  bool operator!=(const const_iterator& rhs) const
4655  {
4656  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4657  return m_pItem != rhs.m_pItem;
4658  }
4659 
4660  private:
4661  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4662  m_pList(pList),
4663  m_pItem(pItem)
4664  {
4665  }
4666 
4667  const VmaRawList<T>* m_pList;
4668  const VmaListItem<T>* m_pItem;
4669 
4670  friend class VmaList<T, AllocatorT>;
4671  };
4672 
4673  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4674 
4675  bool empty() const { return m_RawList.IsEmpty(); }
4676  size_t size() const { return m_RawList.GetCount(); }
4677 
4678  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4679  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4680 
4681  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4682  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4683 
4684  void clear() { m_RawList.Clear(); }
4685  void push_back(const T& value) { m_RawList.PushBack(value); }
4686  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4687  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4688 
4689 private:
4690  VmaRawList<T> m_RawList;
4691 };
4692 
4693 #endif // #if VMA_USE_STL_LIST
4694 
4696 // class VmaMap
4697 
4698 // Unused in this version.
4699 #if 0
4700 
4701 #if VMA_USE_STL_UNORDERED_MAP
4702 
4703 #define VmaPair std::pair
4704 
4705 #define VMA_MAP_TYPE(KeyT, ValueT) \
4706  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4707 
4708 #else // #if VMA_USE_STL_UNORDERED_MAP
4709 
4710 template<typename T1, typename T2>
4711 struct VmaPair
4712 {
4713  T1 first;
4714  T2 second;
4715 
4716  VmaPair() : first(), second() { }
4717  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4718 };
4719 
4720 /* Class compatible with subset of interface of std::unordered_map.
4721 KeyT, ValueT must be POD because they will be stored in VmaVector.
4722 */
4723 template<typename KeyT, typename ValueT>
4724 class VmaMap
4725 {
4726 public:
4727  typedef VmaPair<KeyT, ValueT> PairType;
4728  typedef PairType* iterator;
4729 
4730  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4731 
4732  iterator begin() { return m_Vector.begin(); }
4733  iterator end() { return m_Vector.end(); }
4734 
4735  void insert(const PairType& pair);
4736  iterator find(const KeyT& key);
4737  void erase(iterator it);
4738 
4739 private:
4740  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4741 };
4742 
4743 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4744 
4745 template<typename FirstT, typename SecondT>
4746 struct VmaPairFirstLess
4747 {
4748  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4749  {
4750  return lhs.first < rhs.first;
4751  }
4752  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4753  {
4754  return lhs.first < rhsFirst;
4755  }
4756 };
4757 
4758 template<typename KeyT, typename ValueT>
4759 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4760 {
4761  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4762  m_Vector.data(),
4763  m_Vector.data() + m_Vector.size(),
4764  pair,
4765  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4766  VmaVectorInsert(m_Vector, indexToInsert, pair);
4767 }
4768 
4769 template<typename KeyT, typename ValueT>
4770 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4771 {
4772  PairType* it = VmaBinaryFindFirstNotLess(
4773  m_Vector.data(),
4774  m_Vector.data() + m_Vector.size(),
4775  key,
4776  VmaPairFirstLess<KeyT, ValueT>());
4777  if((it != m_Vector.end()) && (it->first == key))
4778  {
4779  return it;
4780  }
4781  else
4782  {
4783  return m_Vector.end();
4784  }
4785 }
4786 
4787 template<typename KeyT, typename ValueT>
4788 void VmaMap<KeyT, ValueT>::erase(iterator it)
4789 {
4790  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4791 }
4792 
4793 #endif // #if VMA_USE_STL_UNORDERED_MAP
4794 
4795 #endif // #if 0
4796 
4798 
4799 class VmaDeviceMemoryBlock;
4800 
4801 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4802 
4803 struct VmaAllocation_T
4804 {
4805  VMA_CLASS_NO_COPY(VmaAllocation_T)
4806 private:
4807  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4808 
4809  enum FLAGS
4810  {
4811  FLAG_USER_DATA_STRING = 0x01,
4812  };
4813 
4814 public:
4815  enum ALLOCATION_TYPE
4816  {
4817  ALLOCATION_TYPE_NONE,
4818  ALLOCATION_TYPE_BLOCK,
4819  ALLOCATION_TYPE_DEDICATED,
4820  };
4821 
4822  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4823  m_Alignment(1),
4824  m_Size(0),
4825  m_pUserData(VMA_NULL),
4826  m_LastUseFrameIndex(currentFrameIndex),
4827  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4828  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4829  m_MapCount(0),
4830  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4831  {
4832 #if VMA_STATS_STRING_ENABLED
4833  m_CreationFrameIndex = currentFrameIndex;
4834  m_BufferImageUsage = 0;
4835 #endif
4836  }
4837 
4838  ~VmaAllocation_T()
4839  {
4840  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4841 
4842  // Check if owned string was freed.
4843  VMA_ASSERT(m_pUserData == VMA_NULL);
4844  }
4845 
4846  void InitBlockAllocation(
4847  VmaPool hPool,
4848  VmaDeviceMemoryBlock* block,
4849  VkDeviceSize offset,
4850  VkDeviceSize alignment,
4851  VkDeviceSize size,
4852  VmaSuballocationType suballocationType,
4853  bool mapped,
4854  bool canBecomeLost)
4855  {
4856  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4857  VMA_ASSERT(block != VMA_NULL);
4858  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4859  m_Alignment = alignment;
4860  m_Size = size;
4861  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4862  m_SuballocationType = (uint8_t)suballocationType;
4863  m_BlockAllocation.m_hPool = hPool;
4864  m_BlockAllocation.m_Block = block;
4865  m_BlockAllocation.m_Offset = offset;
4866  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4867  }
4868 
4869  void InitLost()
4870  {
4871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4872  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4873  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4874  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4875  m_BlockAllocation.m_Block = VMA_NULL;
4876  m_BlockAllocation.m_Offset = 0;
4877  m_BlockAllocation.m_CanBecomeLost = true;
4878  }
4879 
4880  void ChangeBlockAllocation(
4881  VmaAllocator hAllocator,
4882  VmaDeviceMemoryBlock* block,
4883  VkDeviceSize offset);
4884 
4885  void ChangeSize(VkDeviceSize newSize);
4886  void ChangeOffset(VkDeviceSize newOffset);
4887 
4888  // pMappedData not null means allocation is created with MAPPED flag.
4889  void InitDedicatedAllocation(
4890  uint32_t memoryTypeIndex,
4891  VkDeviceMemory hMemory,
4892  VmaSuballocationType suballocationType,
4893  void* pMappedData,
4894  VkDeviceSize size)
4895  {
4896  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4897  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4898  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4899  m_Alignment = 0;
4900  m_Size = size;
4901  m_SuballocationType = (uint8_t)suballocationType;
4902  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4903  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4904  m_DedicatedAllocation.m_hMemory = hMemory;
4905  m_DedicatedAllocation.m_pMappedData = pMappedData;
4906  }
4907 
4908  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4909  VkDeviceSize GetAlignment() const { return m_Alignment; }
4910  VkDeviceSize GetSize() const { return m_Size; }
4911  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4912  void* GetUserData() const { return m_pUserData; }
4913  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4914  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4915 
4916  VmaDeviceMemoryBlock* GetBlock() const
4917  {
4918  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4919  return m_BlockAllocation.m_Block;
4920  }
4921  VkDeviceSize GetOffset() const;
4922  VkDeviceMemory GetMemory() const;
4923  uint32_t GetMemoryTypeIndex() const;
4924  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4925  void* GetMappedData() const;
4926  bool CanBecomeLost() const;
4927  VmaPool GetPool() const;
4928 
4929  uint32_t GetLastUseFrameIndex() const
4930  {
4931  return m_LastUseFrameIndex.load();
4932  }
4933  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4934  {
4935  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4936  }
4937  /*
4938  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4939  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4940  - Else, returns false.
4941 
4942  If hAllocation is already lost, assert - you should not call it then.
4943  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4944  */
4945  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4946 
4947  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4948  {
4949  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4950  outInfo.blockCount = 1;
4951  outInfo.allocationCount = 1;
4952  outInfo.unusedRangeCount = 0;
4953  outInfo.usedBytes = m_Size;
4954  outInfo.unusedBytes = 0;
4955  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4956  outInfo.unusedRangeSizeMin = UINT64_MAX;
4957  outInfo.unusedRangeSizeMax = 0;
4958  }
4959 
4960  void BlockAllocMap();
4961  void BlockAllocUnmap();
4962  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4963  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4964 
4965 #if VMA_STATS_STRING_ENABLED
4966  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4967  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4968 
4969  void InitBufferImageUsage(uint32_t bufferImageUsage)
4970  {
4971  VMA_ASSERT(m_BufferImageUsage == 0);
4972  m_BufferImageUsage = bufferImageUsage;
4973  }
4974 
4975  void PrintParameters(class VmaJsonWriter& json) const;
4976 #endif
4977 
4978 private:
4979  VkDeviceSize m_Alignment;
4980  VkDeviceSize m_Size;
4981  void* m_pUserData;
4982  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4983  uint8_t m_Type; // ALLOCATION_TYPE
4984  uint8_t m_SuballocationType; // VmaSuballocationType
4985  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4986  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4987  uint8_t m_MapCount;
4988  uint8_t m_Flags; // enum FLAGS
4989 
4990  // Allocation out of VmaDeviceMemoryBlock.
4991  struct BlockAllocation
4992  {
4993  VmaPool m_hPool; // Null if belongs to general memory.
4994  VmaDeviceMemoryBlock* m_Block;
4995  VkDeviceSize m_Offset;
4996  bool m_CanBecomeLost;
4997  };
4998 
4999  // Allocation for an object that has its own private VkDeviceMemory.
5000  struct DedicatedAllocation
5001  {
5002  uint32_t m_MemoryTypeIndex;
5003  VkDeviceMemory m_hMemory;
5004  void* m_pMappedData; // Not null means memory is mapped.
5005  };
5006 
5007  union
5008  {
5009  // Allocation out of VmaDeviceMemoryBlock.
5010  BlockAllocation m_BlockAllocation;
5011  // Allocation for an object that has its own private VkDeviceMemory.
5012  DedicatedAllocation m_DedicatedAllocation;
5013  };
5014 
5015 #if VMA_STATS_STRING_ENABLED
5016  uint32_t m_CreationFrameIndex;
5017  uint32_t m_BufferImageUsage; // 0 if unknown.
5018 #endif
5019 
5020  void FreeUserDataString(VmaAllocator hAllocator);
5021 };
5022 
5023 /*
5024 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5025 allocated memory block or free.
5026 */
5027 struct VmaSuballocation
5028 {
5029  VkDeviceSize offset;
5030  VkDeviceSize size;
5031  VmaAllocation hAllocation;
5032  VmaSuballocationType type;
5033 };
5034 
5035 // Comparator for offsets.
5036 struct VmaSuballocationOffsetLess
5037 {
5038  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5039  {
5040  return lhs.offset < rhs.offset;
5041  }
5042 };
5043 struct VmaSuballocationOffsetGreater
5044 {
5045  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5046  {
5047  return lhs.offset > rhs.offset;
5048  }
5049 };
5050 
5051 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5052 
5053 // Cost of one additional allocation lost, as equivalent in bytes.
5054 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5055 
5056 /*
5057 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5058 
5059 If canMakeOtherLost was false:
5060 - item points to a FREE suballocation.
5061 - itemsToMakeLostCount is 0.
5062 
5063 If canMakeOtherLost was true:
5064 - item points to first of sequence of suballocations, which are either FREE,
5065  or point to VmaAllocations that can become lost.
5066 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5067  the requested allocation to succeed.
5068 */
5069 struct VmaAllocationRequest
5070 {
5071  VkDeviceSize offset;
5072  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5073  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5074  VmaSuballocationList::iterator item;
5075  size_t itemsToMakeLostCount;
5076  void* customData;
5077 
5078  VkDeviceSize CalcCost() const
5079  {
5080  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5081  }
5082 };
5083 
5084 /*
5085 Data structure used for bookkeeping of allocations and unused ranges of memory
5086 in a single VkDeviceMemory block.
5087 */
5088 class VmaBlockMetadata
5089 {
5090 public:
5091  VmaBlockMetadata(VmaAllocator hAllocator);
5092  virtual ~VmaBlockMetadata() { }
5093  virtual void Init(VkDeviceSize size) { m_Size = size; }
5094 
5095  // Validates all data structures inside this object. If not valid, returns false.
5096  virtual bool Validate() const = 0;
5097  VkDeviceSize GetSize() const { return m_Size; }
5098  virtual size_t GetAllocationCount() const = 0;
5099  virtual VkDeviceSize GetSumFreeSize() const = 0;
5100  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5101  // Returns true if this block is empty - contains only single free suballocation.
5102  virtual bool IsEmpty() const = 0;
5103 
5104  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5105  // Shouldn't modify blockCount.
5106  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5107 
5108 #if VMA_STATS_STRING_ENABLED
5109  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5110 #endif
5111 
5112  // Tries to find a place for suballocation with given parameters inside this block.
5113  // If succeeded, fills pAllocationRequest and returns true.
5114  // If failed, returns false.
5115  virtual bool CreateAllocationRequest(
5116  uint32_t currentFrameIndex,
5117  uint32_t frameInUseCount,
5118  VkDeviceSize bufferImageGranularity,
5119  VkDeviceSize allocSize,
5120  VkDeviceSize allocAlignment,
5121  bool upperAddress,
5122  VmaSuballocationType allocType,
5123  bool canMakeOtherLost,
5124  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5125  uint32_t strategy,
5126  VmaAllocationRequest* pAllocationRequest) = 0;
5127 
5128  virtual bool MakeRequestedAllocationsLost(
5129  uint32_t currentFrameIndex,
5130  uint32_t frameInUseCount,
5131  VmaAllocationRequest* pAllocationRequest) = 0;
5132 
5133  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5134 
5135  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5136 
5137  // Makes actual allocation based on request. Request must already be checked and valid.
5138  virtual void Alloc(
5139  const VmaAllocationRequest& request,
5140  VmaSuballocationType type,
5141  VkDeviceSize allocSize,
5142  bool upperAddress,
5143  VmaAllocation hAllocation) = 0;
5144 
5145  // Frees suballocation assigned to given memory region.
5146  virtual void Free(const VmaAllocation allocation) = 0;
5147  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5148 
5149  // Tries to resize (grow or shrink) space for given allocation, in place.
5150  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5151 
5152 protected:
5153  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5154 
5155 #if VMA_STATS_STRING_ENABLED
5156  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5157  VkDeviceSize unusedBytes,
5158  size_t allocationCount,
5159  size_t unusedRangeCount) const;
5160  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5161  VkDeviceSize offset,
5162  VmaAllocation hAllocation) const;
5163  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5164  VkDeviceSize offset,
5165  VkDeviceSize size) const;
5166  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5167 #endif
5168 
5169 private:
5170  VkDeviceSize m_Size;
5171  const VkAllocationCallbacks* m_pAllocationCallbacks;
5172 };
5173 
5174 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5175  VMA_ASSERT(0 && "Validation failed: " #cond); \
5176  return false; \
5177  } } while(false)
5178 
5179 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5180 {
5181  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5182 public:
5183  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5184  virtual ~VmaBlockMetadata_Generic();
5185  virtual void Init(VkDeviceSize size);
5186 
5187  virtual bool Validate() const;
5188  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5189  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5190  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5191  virtual bool IsEmpty() const;
5192 
5193  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5194  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5195 
5196 #if VMA_STATS_STRING_ENABLED
5197  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5198 #endif
5199 
5200  virtual bool CreateAllocationRequest(
5201  uint32_t currentFrameIndex,
5202  uint32_t frameInUseCount,
5203  VkDeviceSize bufferImageGranularity,
5204  VkDeviceSize allocSize,
5205  VkDeviceSize allocAlignment,
5206  bool upperAddress,
5207  VmaSuballocationType allocType,
5208  bool canMakeOtherLost,
5209  uint32_t strategy,
5210  VmaAllocationRequest* pAllocationRequest);
5211 
5212  virtual bool MakeRequestedAllocationsLost(
5213  uint32_t currentFrameIndex,
5214  uint32_t frameInUseCount,
5215  VmaAllocationRequest* pAllocationRequest);
5216 
5217  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5218 
5219  virtual VkResult CheckCorruption(const void* pBlockData);
5220 
5221  virtual void Alloc(
5222  const VmaAllocationRequest& request,
5223  VmaSuballocationType type,
5224  VkDeviceSize allocSize,
5225  bool upperAddress,
5226  VmaAllocation hAllocation);
5227 
5228  virtual void Free(const VmaAllocation allocation);
5229  virtual void FreeAtOffset(VkDeviceSize offset);
5230 
5231  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5232 
5234  // For defragmentation
5235 
5236  bool IsBufferImageGranularityConflictPossible(
5237  VkDeviceSize bufferImageGranularity,
5238  VmaSuballocationType& inOutPrevSuballocType) const;
5239 
5240 private:
5241  friend class VmaDefragmentationAlgorithm_Generic;
5242  friend class VmaDefragmentationAlgorithm_Fast;
5243 
5244  uint32_t m_FreeCount;
5245  VkDeviceSize m_SumFreeSize;
5246  VmaSuballocationList m_Suballocations;
5247  // Suballocations that are free and have size greater than certain threshold.
5248  // Sorted by size, ascending.
5249  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5250 
5251  bool ValidateFreeSuballocationList() const;
5252 
5253  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5254  // If yes, fills pOffset and returns true. If no, returns false.
5255  bool CheckAllocation(
5256  uint32_t currentFrameIndex,
5257  uint32_t frameInUseCount,
5258  VkDeviceSize bufferImageGranularity,
5259  VkDeviceSize allocSize,
5260  VkDeviceSize allocAlignment,
5261  VmaSuballocationType allocType,
5262  VmaSuballocationList::const_iterator suballocItem,
5263  bool canMakeOtherLost,
5264  VkDeviceSize* pOffset,
5265  size_t* itemsToMakeLostCount,
5266  VkDeviceSize* pSumFreeSize,
5267  VkDeviceSize* pSumItemSize) const;
5268  // Given free suballocation, it merges it with following one, which must also be free.
5269  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5270  // Releases given suballocation, making it free.
5271  // Merges it with adjacent free suballocations if applicable.
5272  // Returns iterator to new free suballocation at this place.
5273  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5274  // Given free suballocation, it inserts it into sorted list of
5275  // m_FreeSuballocationsBySize if it's suitable.
5276  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5277  // Given free suballocation, it removes it from sorted list of
5278  // m_FreeSuballocationsBySize if it's suitable.
5279  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5280 };
5281 
5282 /*
5283 Allocations and their references in internal data structure look like this:
5284 
5285 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5286 
5287  0 +-------+
5288  | |
5289  | |
5290  | |
5291  +-------+
5292  | Alloc | 1st[m_1stNullItemsBeginCount]
5293  +-------+
5294  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5295  +-------+
5296  | ... |
5297  +-------+
5298  | Alloc | 1st[1st.size() - 1]
5299  +-------+
5300  | |
5301  | |
5302  | |
5303 GetSize() +-------+
5304 
5305 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5306 
5307  0 +-------+
5308  | Alloc | 2nd[0]
5309  +-------+
5310  | Alloc | 2nd[1]
5311  +-------+
5312  | ... |
5313  +-------+
5314  | Alloc | 2nd[2nd.size() - 1]
5315  +-------+
5316  | |
5317  | |
5318  | |
5319  +-------+
5320  | Alloc | 1st[m_1stNullItemsBeginCount]
5321  +-------+
5322  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5323  +-------+
5324  | ... |
5325  +-------+
5326  | Alloc | 1st[1st.size() - 1]
5327  +-------+
5328  | |
5329 GetSize() +-------+
5330 
5331 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5332 
5333  0 +-------+
5334  | |
5335  | |
5336  | |
5337  +-------+
5338  | Alloc | 1st[m_1stNullItemsBeginCount]
5339  +-------+
5340  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5341  +-------+
5342  | ... |
5343  +-------+
5344  | Alloc | 1st[1st.size() - 1]
5345  +-------+
5346  | |
5347  | |
5348  | |
5349  +-------+
5350  | Alloc | 2nd[2nd.size() - 1]
5351  +-------+
5352  | ... |
5353  +-------+
5354  | Alloc | 2nd[1]
5355  +-------+
5356  | Alloc | 2nd[0]
5357 GetSize() +-------+
5358 
5359 */
5360 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5361 {
5362  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5363 public:
5364  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5365  virtual ~VmaBlockMetadata_Linear();
5366  virtual void Init(VkDeviceSize size);
5367 
5368  virtual bool Validate() const;
5369  virtual size_t GetAllocationCount() const;
5370  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5371  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5372  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5373 
5374  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5375  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5376 
5377 #if VMA_STATS_STRING_ENABLED
5378  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5379 #endif
5380 
5381  virtual bool CreateAllocationRequest(
5382  uint32_t currentFrameIndex,
5383  uint32_t frameInUseCount,
5384  VkDeviceSize bufferImageGranularity,
5385  VkDeviceSize allocSize,
5386  VkDeviceSize allocAlignment,
5387  bool upperAddress,
5388  VmaSuballocationType allocType,
5389  bool canMakeOtherLost,
5390  uint32_t strategy,
5391  VmaAllocationRequest* pAllocationRequest);
5392 
5393  virtual bool MakeRequestedAllocationsLost(
5394  uint32_t currentFrameIndex,
5395  uint32_t frameInUseCount,
5396  VmaAllocationRequest* pAllocationRequest);
5397 
5398  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5399 
5400  virtual VkResult CheckCorruption(const void* pBlockData);
5401 
5402  virtual void Alloc(
5403  const VmaAllocationRequest& request,
5404  VmaSuballocationType type,
5405  VkDeviceSize allocSize,
5406  bool upperAddress,
5407  VmaAllocation hAllocation);
5408 
5409  virtual void Free(const VmaAllocation allocation);
5410  virtual void FreeAtOffset(VkDeviceSize offset);
5411 
5412 private:
5413  /*
5414  There are two suballocation vectors, used in ping-pong way.
5415  The one with index m_1stVectorIndex is called 1st.
5416  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5417  2nd can be non-empty only when 1st is not empty.
5418  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5419  */
5420  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5421 
5422  enum SECOND_VECTOR_MODE
5423  {
5424  SECOND_VECTOR_EMPTY,
5425  /*
5426  Suballocations in 2nd vector are created later than the ones in 1st, but they
5427  all have smaller offset.
5428  */
5429  SECOND_VECTOR_RING_BUFFER,
5430  /*
5431  Suballocations in 2nd vector are upper side of double stack.
5432  They all have offsets higher than those in 1st vector.
5433  Top of this stack means smaller offsets, but higher indices in this vector.
5434  */
5435  SECOND_VECTOR_DOUBLE_STACK,
5436  };
5437 
5438  VkDeviceSize m_SumFreeSize;
5439  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5440  uint32_t m_1stVectorIndex;
5441  SECOND_VECTOR_MODE m_2ndVectorMode;
5442 
5443  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5444  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5445  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5446  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5447 
5448  // Number of items in 1st vector with hAllocation = null at the beginning.
5449  size_t m_1stNullItemsBeginCount;
5450  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5451  size_t m_1stNullItemsMiddleCount;
5452  // Number of items in 2nd vector with hAllocation = null.
5453  size_t m_2ndNullItemsCount;
5454 
5455  bool ShouldCompact1st() const;
5456  void CleanupAfterFree();
5457 };
5458 
5459 /*
5460 - GetSize() is the original size of allocated memory block.
5461 - m_UsableSize is this size aligned down to a power of two.
5462  All allocations and calculations happen relative to m_UsableSize.
5463 - GetUnusableSize() is the difference between them.
5464  It is repoted as separate, unused range, not available for allocations.
5465 
5466 Node at level 0 has size = m_UsableSize.
5467 Each next level contains nodes with size 2 times smaller than current level.
5468 m_LevelCount is the maximum number of levels to use in the current object.
5469 */
5470 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5471 {
5472  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5473 public:
5474  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5475  virtual ~VmaBlockMetadata_Buddy();
5476  virtual void Init(VkDeviceSize size);
5477 
5478  virtual bool Validate() const;
5479  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5480  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5481  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5482  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5483 
5484  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5485  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5486 
5487 #if VMA_STATS_STRING_ENABLED
5488  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5489 #endif
5490 
5491  virtual bool CreateAllocationRequest(
5492  uint32_t currentFrameIndex,
5493  uint32_t frameInUseCount,
5494  VkDeviceSize bufferImageGranularity,
5495  VkDeviceSize allocSize,
5496  VkDeviceSize allocAlignment,
5497  bool upperAddress,
5498  VmaSuballocationType allocType,
5499  bool canMakeOtherLost,
5500  uint32_t strategy,
5501  VmaAllocationRequest* pAllocationRequest);
5502 
5503  virtual bool MakeRequestedAllocationsLost(
5504  uint32_t currentFrameIndex,
5505  uint32_t frameInUseCount,
5506  VmaAllocationRequest* pAllocationRequest);
5507 
5508  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5509 
5510  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5511 
5512  virtual void Alloc(
5513  const VmaAllocationRequest& request,
5514  VmaSuballocationType type,
5515  VkDeviceSize allocSize,
5516  bool upperAddress,
5517  VmaAllocation hAllocation);
5518 
5519  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5520  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5521 
5522 private:
5523  static const VkDeviceSize MIN_NODE_SIZE = 32;
5524  static const size_t MAX_LEVELS = 30;
5525 
5526  struct ValidationContext
5527  {
5528  size_t calculatedAllocationCount;
5529  size_t calculatedFreeCount;
5530  VkDeviceSize calculatedSumFreeSize;
5531 
5532  ValidationContext() :
5533  calculatedAllocationCount(0),
5534  calculatedFreeCount(0),
5535  calculatedSumFreeSize(0) { }
5536  };
5537 
5538  struct Node
5539  {
5540  VkDeviceSize offset;
5541  enum TYPE
5542  {
5543  TYPE_FREE,
5544  TYPE_ALLOCATION,
5545  TYPE_SPLIT,
5546  TYPE_COUNT
5547  } type;
5548  Node* parent;
5549  Node* buddy;
5550 
5551  union
5552  {
5553  struct
5554  {
5555  Node* prev;
5556  Node* next;
5557  } free;
5558  struct
5559  {
5560  VmaAllocation alloc;
5561  } allocation;
5562  struct
5563  {
5564  Node* leftChild;
5565  } split;
5566  };
5567  };
5568 
5569  // Size of the memory block aligned down to a power of two.
5570  VkDeviceSize m_UsableSize;
5571  uint32_t m_LevelCount;
5572 
5573  Node* m_Root;
5574  struct {
5575  Node* front;
5576  Node* back;
5577  } m_FreeList[MAX_LEVELS];
5578  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5579  size_t m_AllocationCount;
5580  // Number of nodes in the tree with type == TYPE_FREE.
5581  size_t m_FreeCount;
5582  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5583  VkDeviceSize m_SumFreeSize;
5584 
5585  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5586  void DeleteNode(Node* node);
5587  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5588  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5589  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5590  // Alloc passed just for validation. Can be null.
5591  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5592  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5593  // Adds node to the front of FreeList at given level.
5594  // node->type must be FREE.
5595  // node->free.prev, next can be undefined.
5596  void AddToFreeListFront(uint32_t level, Node* node);
5597  // Removes node from FreeList at given level.
5598  // node->type must be FREE.
5599  // node->free.prev, next stay untouched.
5600  void RemoveFromFreeList(uint32_t level, Node* node);
5601 
5602 #if VMA_STATS_STRING_ENABLED
5603  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5604 #endif
5605 };
5606 
5607 /*
5608 Represents a single block of device memory (`VkDeviceMemory`) with all the
5609 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5610 
5611 Thread-safety: This class must be externally synchronized.
5612 */
5613 class VmaDeviceMemoryBlock
5614 {
5615  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5616 public:
5617  VmaBlockMetadata* m_pMetadata;
5618 
5619  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5620 
5621  ~VmaDeviceMemoryBlock()
5622  {
5623  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5624  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5625  }
5626 
5627  // Always call after construction.
5628  void Init(
5629  VmaAllocator hAllocator,
5630  uint32_t newMemoryTypeIndex,
5631  VkDeviceMemory newMemory,
5632  VkDeviceSize newSize,
5633  uint32_t id,
5634  uint32_t algorithm);
5635  // Always call before destruction.
5636  void Destroy(VmaAllocator allocator);
5637 
5638  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5639  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5640  uint32_t GetId() const { return m_Id; }
5641  void* GetMappedData() const { return m_pMappedData; }
5642 
5643  // Validates all data structures inside this object. If not valid, returns false.
5644  bool Validate() const;
5645 
5646  VkResult CheckCorruption(VmaAllocator hAllocator);
5647 
5648  // ppData can be null.
5649  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5650  void Unmap(VmaAllocator hAllocator, uint32_t count);
5651 
5652  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5653  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5654 
5655  VkResult BindBufferMemory(
5656  const VmaAllocator hAllocator,
5657  const VmaAllocation hAllocation,
5658  VkBuffer hBuffer);
5659  VkResult BindImageMemory(
5660  const VmaAllocator hAllocator,
5661  const VmaAllocation hAllocation,
5662  VkImage hImage);
5663 
5664 private:
5665  uint32_t m_MemoryTypeIndex;
5666  uint32_t m_Id;
5667  VkDeviceMemory m_hMemory;
5668 
5669  /*
5670  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5671  Also protects m_MapCount, m_pMappedData.
5672  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5673  */
5674  VMA_MUTEX m_Mutex;
5675  uint32_t m_MapCount;
5676  void* m_pMappedData;
5677 };
5678 
5679 struct VmaPointerLess
5680 {
5681  bool operator()(const void* lhs, const void* rhs) const
5682  {
5683  return lhs < rhs;
5684  }
5685 };
5686 
5687 struct VmaDefragmentationMove
5688 {
5689  size_t srcBlockIndex;
5690  size_t dstBlockIndex;
5691  VkDeviceSize srcOffset;
5692  VkDeviceSize dstOffset;
5693  VkDeviceSize size;
5694 };
5695 
5696 class VmaDefragmentationAlgorithm;
5697 
5698 /*
5699 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5700 Vulkan memory type.
5701 
5702 Synchronized internally with a mutex.
5703 */
5704 struct VmaBlockVector
5705 {
5706  VMA_CLASS_NO_COPY(VmaBlockVector)
5707 public:
5708  VmaBlockVector(
5709  VmaAllocator hAllocator,
5710  uint32_t memoryTypeIndex,
5711  VkDeviceSize preferredBlockSize,
5712  size_t minBlockCount,
5713  size_t maxBlockCount,
5714  VkDeviceSize bufferImageGranularity,
5715  uint32_t frameInUseCount,
5716  bool isCustomPool,
5717  bool explicitBlockSize,
5718  uint32_t algorithm);
5719  ~VmaBlockVector();
5720 
5721  VkResult CreateMinBlocks();
5722 
5723  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5724  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5725  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5726  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5727  uint32_t GetAlgorithm() const { return m_Algorithm; }
5728 
5729  void GetPoolStats(VmaPoolStats* pStats);
5730 
5731  bool IsEmpty() const { return m_Blocks.empty(); }
5732  bool IsCorruptionDetectionEnabled() const;
5733 
5734  VkResult Allocate(
5735  VmaPool hCurrentPool,
5736  uint32_t currentFrameIndex,
5737  VkDeviceSize size,
5738  VkDeviceSize alignment,
5739  const VmaAllocationCreateInfo& createInfo,
5740  VmaSuballocationType suballocType,
5741  VmaAllocation* pAllocation);
5742 
5743  void Free(
5744  VmaAllocation hAllocation);
5745 
5746  // Adds statistics of this BlockVector to pStats.
5747  void AddStats(VmaStats* pStats);
5748 
5749 #if VMA_STATS_STRING_ENABLED
5750  void PrintDetailedMap(class VmaJsonWriter& json);
5751 #endif
5752 
5753  void MakePoolAllocationsLost(
5754  uint32_t currentFrameIndex,
5755  size_t* pLostAllocationCount);
5756  VkResult CheckCorruption();
5757 
5758  // Saves results in pCtx->res.
5759  void Defragment(
5760  class VmaBlockVectorDefragmentationContext* pCtx,
5761  VmaDefragmentationStats* pStats,
5762  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5763  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5764  VkCommandBuffer commandBuffer);
5765  void DefragmentationEnd(
5766  class VmaBlockVectorDefragmentationContext* pCtx,
5767  VmaDefragmentationStats* pStats);
5768 
5770  // To be used only while the m_Mutex is locked. Used during defragmentation.
5771 
5772  size_t GetBlockCount() const { return m_Blocks.size(); }
5773  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5774  size_t CalcAllocationCount() const;
5775  bool IsBufferImageGranularityConflictPossible() const;
5776 
5777 private:
5778  friend class VmaDefragmentationAlgorithm_Generic;
5779 
5780  const VmaAllocator m_hAllocator;
5781  const uint32_t m_MemoryTypeIndex;
5782  const VkDeviceSize m_PreferredBlockSize;
5783  const size_t m_MinBlockCount;
5784  const size_t m_MaxBlockCount;
5785  const VkDeviceSize m_BufferImageGranularity;
5786  const uint32_t m_FrameInUseCount;
5787  const bool m_IsCustomPool;
5788  const bool m_ExplicitBlockSize;
5789  const uint32_t m_Algorithm;
5790  /* There can be at most one allocation that is completely empty - a
5791  hysteresis to avoid pessimistic case of alternating creation and destruction
5792  of a VkDeviceMemory. */
5793  bool m_HasEmptyBlock;
5794  VMA_RW_MUTEX m_Mutex;
5795  // Incrementally sorted by sumFreeSize, ascending.
5796  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5797  uint32_t m_NextBlockId;
5798 
5799  VkDeviceSize CalcMaxBlockSize() const;
5800 
5801  // Finds and removes given block from vector.
5802  void Remove(VmaDeviceMemoryBlock* pBlock);
5803 
5804  // Performs single step in sorting m_Blocks. They may not be fully sorted
5805  // after this call.
5806  void IncrementallySortBlocks();
5807 
5808  // To be used only without CAN_MAKE_OTHER_LOST flag.
5809  VkResult AllocateFromBlock(
5810  VmaDeviceMemoryBlock* pBlock,
5811  VmaPool hCurrentPool,
5812  uint32_t currentFrameIndex,
5813  VkDeviceSize size,
5814  VkDeviceSize alignment,
5815  VmaAllocationCreateFlags allocFlags,
5816  void* pUserData,
5817  VmaSuballocationType suballocType,
5818  uint32_t strategy,
5819  VmaAllocation* pAllocation);
5820 
5821  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5822 
5823  // Saves result to pCtx->res.
5824  void ApplyDefragmentationMovesCpu(
5825  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5826  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5827  // Saves result to pCtx->res.
5828  void ApplyDefragmentationMovesGpu(
5829  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5830  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5831  VkCommandBuffer commandBuffer);
5832 
5833  /*
5834  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5835  - updated with new data.
5836  */
5837  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5838 };
5839 
5840 struct VmaPool_T
5841 {
5842  VMA_CLASS_NO_COPY(VmaPool_T)
5843 public:
5844  VmaBlockVector m_BlockVector;
5845 
5846  VmaPool_T(
5847  VmaAllocator hAllocator,
5848  const VmaPoolCreateInfo& createInfo,
5849  VkDeviceSize preferredBlockSize);
5850  ~VmaPool_T();
5851 
5852  uint32_t GetId() const { return m_Id; }
5853  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5854 
5855 #if VMA_STATS_STRING_ENABLED
5856  //void PrintDetailedMap(class VmaStringBuilder& sb);
5857 #endif
5858 
5859 private:
5860  uint32_t m_Id;
5861 };
5862 
5863 /*
5864 Performs defragmentation:
5865 
5866 - Updates `pBlockVector->m_pMetadata`.
5867 - Updates allocations by calling ChangeBlockAllocation().
5868 - Does not move actual data, only returns requested moves as `moves`.
5869 */
5870 class VmaDefragmentationAlgorithm
5871 {
5872  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5873 public:
5874  VmaDefragmentationAlgorithm(
5875  VmaAllocator hAllocator,
5876  VmaBlockVector* pBlockVector,
5877  uint32_t currentFrameIndex) :
5878  m_hAllocator(hAllocator),
5879  m_pBlockVector(pBlockVector),
5880  m_CurrentFrameIndex(currentFrameIndex)
5881  {
5882  }
5883  virtual ~VmaDefragmentationAlgorithm()
5884  {
5885  }
5886 
5887  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5888  virtual void AddAll() = 0;
5889 
5890  virtual VkResult Defragment(
5891  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5892  VkDeviceSize maxBytesToMove,
5893  uint32_t maxAllocationsToMove) = 0;
5894 
5895  virtual VkDeviceSize GetBytesMoved() const = 0;
5896  virtual uint32_t GetAllocationsMoved() const = 0;
5897 
5898 protected:
5899  VmaAllocator const m_hAllocator;
5900  VmaBlockVector* const m_pBlockVector;
5901  const uint32_t m_CurrentFrameIndex;
5902 
5903  struct AllocationInfo
5904  {
5905  VmaAllocation m_hAllocation;
5906  VkBool32* m_pChanged;
5907 
5908  AllocationInfo() :
5909  m_hAllocation(VK_NULL_HANDLE),
5910  m_pChanged(VMA_NULL)
5911  {
5912  }
5913  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5914  m_hAllocation(hAlloc),
5915  m_pChanged(pChanged)
5916  {
5917  }
5918  };
5919 };
5920 
5921 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5922 {
5923  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
5924 public:
5925  VmaDefragmentationAlgorithm_Generic(
5926  VmaAllocator hAllocator,
5927  VmaBlockVector* pBlockVector,
5928  uint32_t currentFrameIndex,
5929  bool overlappingMoveSupported);
5930  virtual ~VmaDefragmentationAlgorithm_Generic();
5931 
5932  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5933  virtual void AddAll() { m_AllAllocations = true; }
5934 
5935  virtual VkResult Defragment(
5936  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5937  VkDeviceSize maxBytesToMove,
5938  uint32_t maxAllocationsToMove);
5939 
5940  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5941  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5942 
5943 private:
5944  uint32_t m_AllocationCount;
5945  bool m_AllAllocations;
5946 
5947  VkDeviceSize m_BytesMoved;
5948  uint32_t m_AllocationsMoved;
5949 
5950  struct AllocationInfoSizeGreater
5951  {
5952  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5953  {
5954  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5955  }
5956  };
5957 
5958  struct AllocationInfoOffsetGreater
5959  {
5960  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5961  {
5962  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
5963  }
5964  };
5965 
5966  struct BlockInfo
5967  {
5968  size_t m_OriginalBlockIndex;
5969  VmaDeviceMemoryBlock* m_pBlock;
5970  bool m_HasNonMovableAllocations;
5971  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5972 
5973  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5974  m_OriginalBlockIndex(SIZE_MAX),
5975  m_pBlock(VMA_NULL),
5976  m_HasNonMovableAllocations(true),
5977  m_Allocations(pAllocationCallbacks)
5978  {
5979  }
5980 
5981  void CalcHasNonMovableAllocations()
5982  {
5983  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5984  const size_t defragmentAllocCount = m_Allocations.size();
5985  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5986  }
5987 
5988  void SortAllocationsBySizeDescending()
5989  {
5990  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5991  }
5992 
5993  void SortAllocationsByOffsetDescending()
5994  {
5995  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
5996  }
5997  };
5998 
5999  struct BlockPointerLess
6000  {
6001  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6002  {
6003  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6004  }
6005  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6006  {
6007  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6008  }
6009  };
6010 
6011  // 1. Blocks with some non-movable allocations go first.
6012  // 2. Blocks with smaller sumFreeSize go first.
6013  struct BlockInfoCompareMoveDestination
6014  {
6015  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6016  {
6017  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6018  {
6019  return true;
6020  }
6021  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6022  {
6023  return false;
6024  }
6025  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6026  {
6027  return true;
6028  }
6029  return false;
6030  }
6031  };
6032 
6033  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6034  BlockInfoVector m_Blocks;
6035 
6036  VkResult DefragmentRound(
6037  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6038  VkDeviceSize maxBytesToMove,
6039  uint32_t maxAllocationsToMove);
6040 
6041  size_t CalcBlocksWithNonMovableCount() const;
6042 
6043  static bool MoveMakesSense(
6044  size_t dstBlockIndex, VkDeviceSize dstOffset,
6045  size_t srcBlockIndex, VkDeviceSize srcOffset);
6046 };
6047 
6048 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6049 {
6050  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6051 public:
6052  VmaDefragmentationAlgorithm_Fast(
6053  VmaAllocator hAllocator,
6054  VmaBlockVector* pBlockVector,
6055  uint32_t currentFrameIndex,
6056  bool overlappingMoveSupported);
6057  virtual ~VmaDefragmentationAlgorithm_Fast();
6058 
6059  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6060  virtual void AddAll() { m_AllAllocations = true; }
6061 
6062  virtual VkResult Defragment(
6063  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6064  VkDeviceSize maxBytesToMove,
6065  uint32_t maxAllocationsToMove);
6066 
6067  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6068  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6069 
6070 private:
6071  struct BlockInfo
6072  {
6073  size_t origBlockIndex;
6074  };
6075 
6076  class FreeSpaceDatabase
6077  {
6078  public:
6079  FreeSpaceDatabase()
6080  {
6081  FreeSpace s = {};
6082  s.blockInfoIndex = SIZE_MAX;
6083  for(size_t i = 0; i < MAX_COUNT; ++i)
6084  {
6085  m_FreeSpaces[i] = s;
6086  }
6087  }
6088 
6089  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6090  {
6091  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6092  {
6093  return;
6094  }
6095 
6096  // Find first invalid or the smallest structure.
6097  size_t bestIndex = SIZE_MAX;
6098  for(size_t i = 0; i < MAX_COUNT; ++i)
6099  {
6100  // Empty structure.
6101  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6102  {
6103  bestIndex = i;
6104  break;
6105  }
6106  if(m_FreeSpaces[i].size < size &&
6107  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6108  {
6109  bestIndex = i;
6110  }
6111  }
6112 
6113  if(bestIndex != SIZE_MAX)
6114  {
6115  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6116  m_FreeSpaces[bestIndex].offset = offset;
6117  m_FreeSpaces[bestIndex].size = size;
6118  }
6119  }
6120 
6121  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6122  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6123  {
6124  size_t bestIndex = SIZE_MAX;
6125  VkDeviceSize bestFreeSpaceAfter = 0;
6126  for(size_t i = 0; i < MAX_COUNT; ++i)
6127  {
6128  // Structure is valid.
6129  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6130  {
6131  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6132  // Allocation fits into this structure.
6133  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6134  {
6135  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6136  (dstOffset + size);
6137  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6138  {
6139  bestIndex = i;
6140  bestFreeSpaceAfter = freeSpaceAfter;
6141  }
6142  }
6143  }
6144  }
6145 
6146  if(bestIndex != SIZE_MAX)
6147  {
6148  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6149  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6150 
6151  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6152  {
6153  // Leave this structure for remaining empty space.
6154  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6155  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6156  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6157  }
6158  else
6159  {
6160  // This structure becomes invalid.
6161  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6162  }
6163 
6164  return true;
6165  }
6166 
6167  return false;
6168  }
6169 
6170  private:
6171  static const size_t MAX_COUNT = 4;
6172 
6173  struct FreeSpace
6174  {
6175  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6176  VkDeviceSize offset;
6177  VkDeviceSize size;
6178  } m_FreeSpaces[MAX_COUNT];
6179  };
6180 
6181  const bool m_OverlappingMoveSupported;
6182 
6183  uint32_t m_AllocationCount;
6184  bool m_AllAllocations;
6185 
6186  VkDeviceSize m_BytesMoved;
6187  uint32_t m_AllocationsMoved;
6188 
6189  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6190 
6191  void PreprocessMetadata();
6192  void PostprocessMetadata();
6193  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6194 };
6195 
6196 struct VmaBlockDefragmentationContext
6197 {
6198 private:
6199  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6200 public:
6201  enum BLOCK_FLAG
6202  {
6203  BLOCK_FLAG_USED = 0x00000001,
6204  };
6205  uint32_t flags;
6206  VkBuffer hBuffer;
6207 
6208  VmaBlockDefragmentationContext() :
6209  flags(0),
6210  hBuffer(VK_NULL_HANDLE)
6211  {
6212  }
6213 };
6214 
6215 class VmaBlockVectorDefragmentationContext
6216 {
6217  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6218 public:
6219  VkResult res;
6220  bool mutexLocked;
6221  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6222 
6223  VmaBlockVectorDefragmentationContext(
6224  VmaAllocator hAllocator,
6225  VmaPool hCustomPool, // Optional.
6226  VmaBlockVector* pBlockVector,
6227  uint32_t currFrameIndex,
6228  uint32_t flags);
6229  ~VmaBlockVectorDefragmentationContext();
6230 
6231  VmaPool GetCustomPool() const { return m_hCustomPool; }
6232  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6233  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6234 
6235  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6236  void AddAll() { m_AllAllocations = true; }
6237 
6238  void Begin(bool overlappingMoveSupported);
6239 
6240 private:
6241  const VmaAllocator m_hAllocator;
6242  // Null if not from custom pool.
6243  const VmaPool m_hCustomPool;
6244  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6245  VmaBlockVector* const m_pBlockVector;
6246  const uint32_t m_CurrFrameIndex;
6247  const uint32_t m_AlgorithmFlags;
6248  // Owner of this object.
6249  VmaDefragmentationAlgorithm* m_pAlgorithm;
6250 
6251  struct AllocInfo
6252  {
6253  VmaAllocation hAlloc;
6254  VkBool32* pChanged;
6255  };
6256  // Used between constructor and Begin.
6257  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6258  bool m_AllAllocations;
6259 };
6260 
6261 struct VmaDefragmentationContext_T
6262 {
6263 private:
6264  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6265 public:
6266  VmaDefragmentationContext_T(
6267  VmaAllocator hAllocator,
6268  uint32_t currFrameIndex,
6269  uint32_t flags,
6270  VmaDefragmentationStats* pStats);
6271  ~VmaDefragmentationContext_T();
6272 
6273  void AddPools(uint32_t poolCount, VmaPool* pPools);
6274  void AddAllocations(
6275  uint32_t allocationCount,
6276  VmaAllocation* pAllocations,
6277  VkBool32* pAllocationsChanged);
6278 
6279  /*
6280  Returns:
6281  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6282  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6283  - Negative value if error occured and object can be destroyed immediately.
6284  */
6285  VkResult Defragment(
6286  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6287  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6288  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6289 
6290 private:
6291  const VmaAllocator m_hAllocator;
6292  const uint32_t m_CurrFrameIndex;
6293  const uint32_t m_Flags;
6294  VmaDefragmentationStats* const m_pStats;
6295  // Owner of these objects.
6296  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6297  // Owner of these objects.
6298  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6299 };
6300 
6301 #if VMA_RECORDING_ENABLED
6302 
6303 class VmaRecorder
6304 {
6305 public:
6306  VmaRecorder();
6307  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6308  void WriteConfiguration(
6309  const VkPhysicalDeviceProperties& devProps,
6310  const VkPhysicalDeviceMemoryProperties& memProps,
6311  bool dedicatedAllocationExtensionEnabled);
6312  ~VmaRecorder();
6313 
6314  void RecordCreateAllocator(uint32_t frameIndex);
6315  void RecordDestroyAllocator(uint32_t frameIndex);
6316  void RecordCreatePool(uint32_t frameIndex,
6317  const VmaPoolCreateInfo& createInfo,
6318  VmaPool pool);
6319  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6320  void RecordAllocateMemory(uint32_t frameIndex,
6321  const VkMemoryRequirements& vkMemReq,
6322  const VmaAllocationCreateInfo& createInfo,
6323  VmaAllocation allocation);
6324  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6325  const VkMemoryRequirements& vkMemReq,
6326  bool requiresDedicatedAllocation,
6327  bool prefersDedicatedAllocation,
6328  const VmaAllocationCreateInfo& createInfo,
6329  VmaAllocation allocation);
6330  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6331  const VkMemoryRequirements& vkMemReq,
6332  bool requiresDedicatedAllocation,
6333  bool prefersDedicatedAllocation,
6334  const VmaAllocationCreateInfo& createInfo,
6335  VmaAllocation allocation);
6336  void RecordFreeMemory(uint32_t frameIndex,
6337  VmaAllocation allocation);
6338  void RecordResizeAllocation(
6339  uint32_t frameIndex,
6340  VmaAllocation allocation,
6341  VkDeviceSize newSize);
6342  void RecordSetAllocationUserData(uint32_t frameIndex,
6343  VmaAllocation allocation,
6344  const void* pUserData);
6345  void RecordCreateLostAllocation(uint32_t frameIndex,
6346  VmaAllocation allocation);
6347  void RecordMapMemory(uint32_t frameIndex,
6348  VmaAllocation allocation);
6349  void RecordUnmapMemory(uint32_t frameIndex,
6350  VmaAllocation allocation);
6351  void RecordFlushAllocation(uint32_t frameIndex,
6352  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6353  void RecordInvalidateAllocation(uint32_t frameIndex,
6354  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6355  void RecordCreateBuffer(uint32_t frameIndex,
6356  const VkBufferCreateInfo& bufCreateInfo,
6357  const VmaAllocationCreateInfo& allocCreateInfo,
6358  VmaAllocation allocation);
6359  void RecordCreateImage(uint32_t frameIndex,
6360  const VkImageCreateInfo& imageCreateInfo,
6361  const VmaAllocationCreateInfo& allocCreateInfo,
6362  VmaAllocation allocation);
6363  void RecordDestroyBuffer(uint32_t frameIndex,
6364  VmaAllocation allocation);
6365  void RecordDestroyImage(uint32_t frameIndex,
6366  VmaAllocation allocation);
6367  void RecordTouchAllocation(uint32_t frameIndex,
6368  VmaAllocation allocation);
6369  void RecordGetAllocationInfo(uint32_t frameIndex,
6370  VmaAllocation allocation);
6371  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6372  VmaPool pool);
6373 
6374 private:
6375  struct CallParams
6376  {
6377  uint32_t threadId;
6378  double time;
6379  };
6380 
6381  class UserDataString
6382  {
6383  public:
6384  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6385  const char* GetString() const { return m_Str; }
6386 
6387  private:
6388  char m_PtrStr[17];
6389  const char* m_Str;
6390  };
6391 
6392  bool m_UseMutex;
6393  VmaRecordFlags m_Flags;
6394  FILE* m_File;
6395  VMA_MUTEX m_FileMutex;
6396  int64_t m_Freq;
6397  int64_t m_StartCounter;
6398 
6399  void GetBasicParams(CallParams& outParams);
6400  void Flush();
6401 };
6402 
6403 #endif // #if VMA_RECORDING_ENABLED
6404 
6405 // Main allocator object.
6406 struct VmaAllocator_T
6407 {
6408  VMA_CLASS_NO_COPY(VmaAllocator_T)
6409 public:
6410  bool m_UseMutex;
6411  bool m_UseKhrDedicatedAllocation;
6412  VkDevice m_hDevice;
6413  bool m_AllocationCallbacksSpecified;
6414  VkAllocationCallbacks m_AllocationCallbacks;
6415  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6416 
6417  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6418  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6419  VMA_MUTEX m_HeapSizeLimitMutex;
6420 
6421  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6422  VkPhysicalDeviceMemoryProperties m_MemProps;
6423 
6424  // Default pools.
6425  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6426 
6427  // Each vector is sorted by memory (handle value).
6428  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6429  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6430  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6431 
6432  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6433  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6434  ~VmaAllocator_T();
6435 
6436  const VkAllocationCallbacks* GetAllocationCallbacks() const
6437  {
6438  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6439  }
6440  const VmaVulkanFunctions& GetVulkanFunctions() const
6441  {
6442  return m_VulkanFunctions;
6443  }
6444 
6445  VkDeviceSize GetBufferImageGranularity() const
6446  {
6447  return VMA_MAX(
6448  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6449  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6450  }
6451 
6452  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6453  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6454 
6455  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6456  {
6457  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6458  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6459  }
6460  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6461  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6462  {
6463  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6464  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6465  }
6466  // Minimum alignment for all allocations in specific memory type.
6467  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6468  {
6469  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6470  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6471  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6472  }
6473 
6474  bool IsIntegratedGpu() const
6475  {
6476  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6477  }
6478 
6479 #if VMA_RECORDING_ENABLED
6480  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6481 #endif
6482 
6483  void GetBufferMemoryRequirements(
6484  VkBuffer hBuffer,
6485  VkMemoryRequirements& memReq,
6486  bool& requiresDedicatedAllocation,
6487  bool& prefersDedicatedAllocation) const;
6488  void GetImageMemoryRequirements(
6489  VkImage hImage,
6490  VkMemoryRequirements& memReq,
6491  bool& requiresDedicatedAllocation,
6492  bool& prefersDedicatedAllocation) const;
6493 
6494  // Main allocation function.
6495  VkResult AllocateMemory(
6496  const VkMemoryRequirements& vkMemReq,
6497  bool requiresDedicatedAllocation,
6498  bool prefersDedicatedAllocation,
6499  VkBuffer dedicatedBuffer,
6500  VkImage dedicatedImage,
6501  const VmaAllocationCreateInfo& createInfo,
6502  VmaSuballocationType suballocType,
6503  VmaAllocation* pAllocation);
6504 
6505  // Main deallocation function.
6506  void FreeMemory(const VmaAllocation allocation);
6507 
6508  VkResult ResizeAllocation(
6509  const VmaAllocation alloc,
6510  VkDeviceSize newSize);
6511 
6512  void CalculateStats(VmaStats* pStats);
6513 
6514 #if VMA_STATS_STRING_ENABLED
6515  void PrintDetailedMap(class VmaJsonWriter& json);
6516 #endif
6517 
6518  VkResult DefragmentationBegin(
6519  const VmaDefragmentationInfo2& info,
6520  VmaDefragmentationStats* pStats,
6521  VmaDefragmentationContext* pContext);
6522  VkResult DefragmentationEnd(
6523  VmaDefragmentationContext context);
6524 
6525  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6526  bool TouchAllocation(VmaAllocation hAllocation);
6527 
6528  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6529  void DestroyPool(VmaPool pool);
6530  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6531 
6532  void SetCurrentFrameIndex(uint32_t frameIndex);
6533  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6534 
6535  void MakePoolAllocationsLost(
6536  VmaPool hPool,
6537  size_t* pLostAllocationCount);
6538  VkResult CheckPoolCorruption(VmaPool hPool);
6539  VkResult CheckCorruption(uint32_t memoryTypeBits);
6540 
6541  void CreateLostAllocation(VmaAllocation* pAllocation);
6542 
6543  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6544  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6545 
6546  VkResult Map(VmaAllocation hAllocation, void** ppData);
6547  void Unmap(VmaAllocation hAllocation);
6548 
6549  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6550  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6551 
6552  void FlushOrInvalidateAllocation(
6553  VmaAllocation hAllocation,
6554  VkDeviceSize offset, VkDeviceSize size,
6555  VMA_CACHE_OPERATION op);
6556 
6557  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6558 
6559 private:
6560  VkDeviceSize m_PreferredLargeHeapBlockSize;
6561 
6562  VkPhysicalDevice m_PhysicalDevice;
6563  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6564 
6565  VMA_RW_MUTEX m_PoolsMutex;
6566  // Protected by m_PoolsMutex. Sorted by pointer value.
6567  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6568  uint32_t m_NextPoolId;
6569 
6570  VmaVulkanFunctions m_VulkanFunctions;
6571 
6572 #if VMA_RECORDING_ENABLED
6573  VmaRecorder* m_pRecorder;
6574 #endif
6575 
6576  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6577 
6578  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6579 
6580  VkResult AllocateMemoryOfType(
6581  VkDeviceSize size,
6582  VkDeviceSize alignment,
6583  bool dedicatedAllocation,
6584  VkBuffer dedicatedBuffer,
6585  VkImage dedicatedImage,
6586  const VmaAllocationCreateInfo& createInfo,
6587  uint32_t memTypeIndex,
6588  VmaSuballocationType suballocType,
6589  VmaAllocation* pAllocation);
6590 
6591  // Allocates and registers new VkDeviceMemory specifically for single allocation.
6592  VkResult AllocateDedicatedMemory(
6593  VkDeviceSize size,
6594  VmaSuballocationType suballocType,
6595  uint32_t memTypeIndex,
6596  bool map,
6597  bool isUserDataString,
6598  void* pUserData,
6599  VkBuffer dedicatedBuffer,
6600  VkImage dedicatedImage,
6601  VmaAllocation* pAllocation);
6602 
6603  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6604  void FreeDedicatedMemory(VmaAllocation allocation);
6605 };
6606 
6608 // Memory allocation #2 after VmaAllocator_T definition
6609 
6610 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6611 {
6612  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6613 }
6614 
6615 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6616 {
6617  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6618 }
6619 
6620 template<typename T>
6621 static T* VmaAllocate(VmaAllocator hAllocator)
6622 {
6623  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6624 }
6625 
6626 template<typename T>
6627 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6628 {
6629  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6630 }
6631 
6632 template<typename T>
6633 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6634 {
6635  if(ptr != VMA_NULL)
6636  {
6637  ptr->~T();
6638  VmaFree(hAllocator, ptr);
6639  }
6640 }
6641 
6642 template<typename T>
6643 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6644 {
6645  if(ptr != VMA_NULL)
6646  {
6647  for(size_t i = count; i--; )
6648  ptr[i].~T();
6649  VmaFree(hAllocator, ptr);
6650  }
6651 }
6652 
6654 // VmaStringBuilder
6655 
6656 #if VMA_STATS_STRING_ENABLED
6657 
6658 class VmaStringBuilder
6659 {
6660 public:
6661  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6662  size_t GetLength() const { return m_Data.size(); }
6663  const char* GetData() const { return m_Data.data(); }
6664 
6665  void Add(char ch) { m_Data.push_back(ch); }
6666  void Add(const char* pStr);
6667  void AddNewLine() { Add('\n'); }
6668  void AddNumber(uint32_t num);
6669  void AddNumber(uint64_t num);
6670  void AddPointer(const void* ptr);
6671 
6672 private:
6673  VmaVector< char, VmaStlAllocator<char> > m_Data;
6674 };
6675 
6676 void VmaStringBuilder::Add(const char* pStr)
6677 {
6678  const size_t strLen = strlen(pStr);
6679  if(strLen > 0)
6680  {
6681  const size_t oldCount = m_Data.size();
6682  m_Data.resize(oldCount + strLen);
6683  memcpy(m_Data.data() + oldCount, pStr, strLen);
6684  }
6685 }
6686 
6687 void VmaStringBuilder::AddNumber(uint32_t num)
6688 {
6689  char buf[11];
6690  VmaUint32ToStr(buf, sizeof(buf), num);
6691  Add(buf);
6692 }
6693 
6694 void VmaStringBuilder::AddNumber(uint64_t num)
6695 {
6696  char buf[21];
6697  VmaUint64ToStr(buf, sizeof(buf), num);
6698  Add(buf);
6699 }
6700 
6701 void VmaStringBuilder::AddPointer(const void* ptr)
6702 {
6703  char buf[21];
6704  VmaPtrToStr(buf, sizeof(buf), ptr);
6705  Add(buf);
6706 }
6707 
6708 #endif // #if VMA_STATS_STRING_ENABLED
6709 
6711 // VmaJsonWriter
6712 
6713 #if VMA_STATS_STRING_ENABLED
6714 
6715 class VmaJsonWriter
6716 {
6717  VMA_CLASS_NO_COPY(VmaJsonWriter)
6718 public:
6719  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6720  ~VmaJsonWriter();
6721 
6722  void BeginObject(bool singleLine = false);
6723  void EndObject();
6724 
6725  void BeginArray(bool singleLine = false);
6726  void EndArray();
6727 
6728  void WriteString(const char* pStr);
6729  void BeginString(const char* pStr = VMA_NULL);
6730  void ContinueString(const char* pStr);
6731  void ContinueString(uint32_t n);
6732  void ContinueString(uint64_t n);
6733  void ContinueString_Pointer(const void* ptr);
6734  void EndString(const char* pStr = VMA_NULL);
6735 
6736  void WriteNumber(uint32_t n);
6737  void WriteNumber(uint64_t n);
6738  void WriteBool(bool b);
6739  void WriteNull();
6740 
6741 private:
6742  static const char* const INDENT;
6743 
6744  enum COLLECTION_TYPE
6745  {
6746  COLLECTION_TYPE_OBJECT,
6747  COLLECTION_TYPE_ARRAY,
6748  };
6749  struct StackItem
6750  {
6751  COLLECTION_TYPE type;
6752  uint32_t valueCount;
6753  bool singleLineMode;
6754  };
6755 
6756  VmaStringBuilder& m_SB;
6757  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6758  bool m_InsideString;
6759 
6760  void BeginValue(bool isString);
6761  void WriteIndent(bool oneLess = false);
6762 };
6763 
6764 const char* const VmaJsonWriter::INDENT = " ";
6765 
6766 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6767  m_SB(sb),
6768  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6769  m_InsideString(false)
6770 {
6771 }
6772 
6773 VmaJsonWriter::~VmaJsonWriter()
6774 {
6775  VMA_ASSERT(!m_InsideString);
6776  VMA_ASSERT(m_Stack.empty());
6777 }
6778 
6779 void VmaJsonWriter::BeginObject(bool singleLine)
6780 {
6781  VMA_ASSERT(!m_InsideString);
6782 
6783  BeginValue(false);
6784  m_SB.Add('{');
6785 
6786  StackItem item;
6787  item.type = COLLECTION_TYPE_OBJECT;
6788  item.valueCount = 0;
6789  item.singleLineMode = singleLine;
6790  m_Stack.push_back(item);
6791 }
6792 
6793 void VmaJsonWriter::EndObject()
6794 {
6795  VMA_ASSERT(!m_InsideString);
6796 
6797  WriteIndent(true);
6798  m_SB.Add('}');
6799 
6800  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6801  m_Stack.pop_back();
6802 }
6803 
6804 void VmaJsonWriter::BeginArray(bool singleLine)
6805 {
6806  VMA_ASSERT(!m_InsideString);
6807 
6808  BeginValue(false);
6809  m_SB.Add('[');
6810 
6811  StackItem item;
6812  item.type = COLLECTION_TYPE_ARRAY;
6813  item.valueCount = 0;
6814  item.singleLineMode = singleLine;
6815  m_Stack.push_back(item);
6816 }
6817 
6818 void VmaJsonWriter::EndArray()
6819 {
6820  VMA_ASSERT(!m_InsideString);
6821 
6822  WriteIndent(true);
6823  m_SB.Add(']');
6824 
6825  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6826  m_Stack.pop_back();
6827 }
6828 
6829 void VmaJsonWriter::WriteString(const char* pStr)
6830 {
6831  BeginString(pStr);
6832  EndString();
6833 }
6834 
6835 void VmaJsonWriter::BeginString(const char* pStr)
6836 {
6837  VMA_ASSERT(!m_InsideString);
6838 
6839  BeginValue(true);
6840  m_SB.Add('"');
6841  m_InsideString = true;
6842  if(pStr != VMA_NULL && pStr[0] != '\0')
6843  {
6844  ContinueString(pStr);
6845  }
6846 }
6847 
6848 void VmaJsonWriter::ContinueString(const char* pStr)
6849 {
6850  VMA_ASSERT(m_InsideString);
6851 
6852  const size_t strLen = strlen(pStr);
6853  for(size_t i = 0; i < strLen; ++i)
6854  {
6855  char ch = pStr[i];
6856  if(ch == '\\')
6857  {
6858  m_SB.Add("\\\\");
6859  }
6860  else if(ch == '"')
6861  {
6862  m_SB.Add("\\\"");
6863  }
6864  else if(ch >= 32)
6865  {
6866  m_SB.Add(ch);
6867  }
6868  else switch(ch)
6869  {
6870  case '\b':
6871  m_SB.Add("\\b");
6872  break;
6873  case '\f':
6874  m_SB.Add("\\f");
6875  break;
6876  case '\n':
6877  m_SB.Add("\\n");
6878  break;
6879  case '\r':
6880  m_SB.Add("\\r");
6881  break;
6882  case '\t':
6883  m_SB.Add("\\t");
6884  break;
6885  default:
6886  VMA_ASSERT(0 && "Character not currently supported.");
6887  break;
6888  }
6889  }
6890 }
6891 
6892 void VmaJsonWriter::ContinueString(uint32_t n)
6893 {
6894  VMA_ASSERT(m_InsideString);
6895  m_SB.AddNumber(n);
6896 }
6897 
6898 void VmaJsonWriter::ContinueString(uint64_t n)
6899 {
6900  VMA_ASSERT(m_InsideString);
6901  m_SB.AddNumber(n);
6902 }
6903 
6904 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6905 {
6906  VMA_ASSERT(m_InsideString);
6907  m_SB.AddPointer(ptr);
6908 }
6909 
6910 void VmaJsonWriter::EndString(const char* pStr)
6911 {
6912  VMA_ASSERT(m_InsideString);
6913  if(pStr != VMA_NULL && pStr[0] != '\0')
6914  {
6915  ContinueString(pStr);
6916  }
6917  m_SB.Add('"');
6918  m_InsideString = false;
6919 }
6920 
6921 void VmaJsonWriter::WriteNumber(uint32_t n)
6922 {
6923  VMA_ASSERT(!m_InsideString);
6924  BeginValue(false);
6925  m_SB.AddNumber(n);
6926 }
6927 
6928 void VmaJsonWriter::WriteNumber(uint64_t n)
6929 {
6930  VMA_ASSERT(!m_InsideString);
6931  BeginValue(false);
6932  m_SB.AddNumber(n);
6933 }
6934 
6935 void VmaJsonWriter::WriteBool(bool b)
6936 {
6937  VMA_ASSERT(!m_InsideString);
6938  BeginValue(false);
6939  m_SB.Add(b ? "true" : "false");
6940 }
6941 
6942 void VmaJsonWriter::WriteNull()
6943 {
6944  VMA_ASSERT(!m_InsideString);
6945  BeginValue(false);
6946  m_SB.Add("null");
6947 }
6948 
6949 void VmaJsonWriter::BeginValue(bool isString)
6950 {
6951  if(!m_Stack.empty())
6952  {
6953  StackItem& currItem = m_Stack.back();
6954  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6955  currItem.valueCount % 2 == 0)
6956  {
6957  VMA_ASSERT(isString);
6958  }
6959 
6960  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6961  currItem.valueCount % 2 != 0)
6962  {
6963  m_SB.Add(": ");
6964  }
6965  else if(currItem.valueCount > 0)
6966  {
6967  m_SB.Add(", ");
6968  WriteIndent();
6969  }
6970  else
6971  {
6972  WriteIndent();
6973  }
6974  ++currItem.valueCount;
6975  }
6976 }
6977 
6978 void VmaJsonWriter::WriteIndent(bool oneLess)
6979 {
6980  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6981  {
6982  m_SB.AddNewLine();
6983 
6984  size_t count = m_Stack.size();
6985  if(count > 0 && oneLess)
6986  {
6987  --count;
6988  }
6989  for(size_t i = 0; i < count; ++i)
6990  {
6991  m_SB.Add(INDENT);
6992  }
6993  }
6994 }
6995 
6996 #endif // #if VMA_STATS_STRING_ENABLED
6997 
6999 
7000 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7001 {
7002  if(IsUserDataString())
7003  {
7004  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7005 
7006  FreeUserDataString(hAllocator);
7007 
7008  if(pUserData != VMA_NULL)
7009  {
7010  const char* const newStrSrc = (char*)pUserData;
7011  const size_t newStrLen = strlen(newStrSrc);
7012  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7013  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7014  m_pUserData = newStrDst;
7015  }
7016  }
7017  else
7018  {
7019  m_pUserData = pUserData;
7020  }
7021 }
7022 
7023 void VmaAllocation_T::ChangeBlockAllocation(
7024  VmaAllocator hAllocator,
7025  VmaDeviceMemoryBlock* block,
7026  VkDeviceSize offset)
7027 {
7028  VMA_ASSERT(block != VMA_NULL);
7029  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7030 
7031  // Move mapping reference counter from old block to new block.
7032  if(block != m_BlockAllocation.m_Block)
7033  {
7034  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7035  if(IsPersistentMap())
7036  ++mapRefCount;
7037  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7038  block->Map(hAllocator, mapRefCount, VMA_NULL);
7039  }
7040 
7041  m_BlockAllocation.m_Block = block;
7042  m_BlockAllocation.m_Offset = offset;
7043 }
7044 
7045 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7046 {
7047  VMA_ASSERT(newSize > 0);
7048  m_Size = newSize;
7049 }
7050 
7051 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7052 {
7053  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7054  m_BlockAllocation.m_Offset = newOffset;
7055 }
7056 
7057 VkDeviceSize VmaAllocation_T::GetOffset() const
7058 {
7059  switch(m_Type)
7060  {
7061  case ALLOCATION_TYPE_BLOCK:
7062  return m_BlockAllocation.m_Offset;
7063  case ALLOCATION_TYPE_DEDICATED:
7064  return 0;
7065  default:
7066  VMA_ASSERT(0);
7067  return 0;
7068  }
7069 }
7070 
7071 VkDeviceMemory VmaAllocation_T::GetMemory() const
7072 {
7073  switch(m_Type)
7074  {
7075  case ALLOCATION_TYPE_BLOCK:
7076  return m_BlockAllocation.m_Block->GetDeviceMemory();
7077  case ALLOCATION_TYPE_DEDICATED:
7078  return m_DedicatedAllocation.m_hMemory;
7079  default:
7080  VMA_ASSERT(0);
7081  return VK_NULL_HANDLE;
7082  }
7083 }
7084 
7085 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7086 {
7087  switch(m_Type)
7088  {
7089  case ALLOCATION_TYPE_BLOCK:
7090  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7091  case ALLOCATION_TYPE_DEDICATED:
7092  return m_DedicatedAllocation.m_MemoryTypeIndex;
7093  default:
7094  VMA_ASSERT(0);
7095  return UINT32_MAX;
7096  }
7097 }
7098 
7099 void* VmaAllocation_T::GetMappedData() const
7100 {
7101  switch(m_Type)
7102  {
7103  case ALLOCATION_TYPE_BLOCK:
7104  if(m_MapCount != 0)
7105  {
7106  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7107  VMA_ASSERT(pBlockData != VMA_NULL);
7108  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7109  }
7110  else
7111  {
7112  return VMA_NULL;
7113  }
7114  break;
7115  case ALLOCATION_TYPE_DEDICATED:
7116  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7117  return m_DedicatedAllocation.m_pMappedData;
7118  default:
7119  VMA_ASSERT(0);
7120  return VMA_NULL;
7121  }
7122 }
7123 
7124 bool VmaAllocation_T::CanBecomeLost() const
7125 {
7126  switch(m_Type)
7127  {
7128  case ALLOCATION_TYPE_BLOCK:
7129  return m_BlockAllocation.m_CanBecomeLost;
7130  case ALLOCATION_TYPE_DEDICATED:
7131  return false;
7132  default:
7133  VMA_ASSERT(0);
7134  return false;
7135  }
7136 }
7137 
7138 VmaPool VmaAllocation_T::GetPool() const
7139 {
7140  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7141  return m_BlockAllocation.m_hPool;
7142 }
7143 
7144 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7145 {
7146  VMA_ASSERT(CanBecomeLost());
7147 
7148  /*
7149  Warning: This is a carefully designed algorithm.
7150  Do not modify unless you really know what you're doing :)
7151  */
7152  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7153  for(;;)
7154  {
7155  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7156  {
7157  VMA_ASSERT(0);
7158  return false;
7159  }
7160  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7161  {
7162  return false;
7163  }
7164  else // Last use time earlier than current time.
7165  {
7166  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7167  {
7168  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7169  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7170  return true;
7171  }
7172  }
7173  }
7174 }
7175 
7176 #if VMA_STATS_STRING_ENABLED
7177 
7178 // Correspond to values of enum VmaSuballocationType.
7179 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7180  "FREE",
7181  "UNKNOWN",
7182  "BUFFER",
7183  "IMAGE_UNKNOWN",
7184  "IMAGE_LINEAR",
7185  "IMAGE_OPTIMAL",
7186 };
7187 
7188 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7189 {
7190  json.WriteString("Type");
7191  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7192 
7193  json.WriteString("Size");
7194  json.WriteNumber(m_Size);
7195 
7196  if(m_pUserData != VMA_NULL)
7197  {
7198  json.WriteString("UserData");
7199  if(IsUserDataString())
7200  {
7201  json.WriteString((const char*)m_pUserData);
7202  }
7203  else
7204  {
7205  json.BeginString();
7206  json.ContinueString_Pointer(m_pUserData);
7207  json.EndString();
7208  }
7209  }
7210 
7211  json.WriteString("CreationFrameIndex");
7212  json.WriteNumber(m_CreationFrameIndex);
7213 
7214  json.WriteString("LastUseFrameIndex");
7215  json.WriteNumber(GetLastUseFrameIndex());
7216 
7217  if(m_BufferImageUsage != 0)
7218  {
7219  json.WriteString("Usage");
7220  json.WriteNumber(m_BufferImageUsage);
7221  }
7222 }
7223 
7224 #endif
7225 
7226 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7227 {
7228  VMA_ASSERT(IsUserDataString());
7229  if(m_pUserData != VMA_NULL)
7230  {
7231  char* const oldStr = (char*)m_pUserData;
7232  const size_t oldStrLen = strlen(oldStr);
7233  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7234  m_pUserData = VMA_NULL;
7235  }
7236 }
7237 
7238 void VmaAllocation_T::BlockAllocMap()
7239 {
7240  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7241 
7242  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7243  {
7244  ++m_MapCount;
7245  }
7246  else
7247  {
7248  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7249  }
7250 }
7251 
7252 void VmaAllocation_T::BlockAllocUnmap()
7253 {
7254  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7255 
7256  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7257  {
7258  --m_MapCount;
7259  }
7260  else
7261  {
7262  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7263  }
7264 }
7265 
7266 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7267 {
7268  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7269 
7270  if(m_MapCount != 0)
7271  {
7272  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7273  {
7274  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7275  *ppData = m_DedicatedAllocation.m_pMappedData;
7276  ++m_MapCount;
7277  return VK_SUCCESS;
7278  }
7279  else
7280  {
7281  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7282  return VK_ERROR_MEMORY_MAP_FAILED;
7283  }
7284  }
7285  else
7286  {
7287  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7288  hAllocator->m_hDevice,
7289  m_DedicatedAllocation.m_hMemory,
7290  0, // offset
7291  VK_WHOLE_SIZE,
7292  0, // flags
7293  ppData);
7294  if(result == VK_SUCCESS)
7295  {
7296  m_DedicatedAllocation.m_pMappedData = *ppData;
7297  m_MapCount = 1;
7298  }
7299  return result;
7300  }
7301 }
7302 
7303 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7304 {
7305  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7306 
7307  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7308  {
7309  --m_MapCount;
7310  if(m_MapCount == 0)
7311  {
7312  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7313  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7314  hAllocator->m_hDevice,
7315  m_DedicatedAllocation.m_hMemory);
7316  }
7317  }
7318  else
7319  {
7320  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7321  }
7322 }
7323 
7324 #if VMA_STATS_STRING_ENABLED
7325 
7326 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7327 {
7328  json.BeginObject();
7329 
7330  json.WriteString("Blocks");
7331  json.WriteNumber(stat.blockCount);
7332 
7333  json.WriteString("Allocations");
7334  json.WriteNumber(stat.allocationCount);
7335 
7336  json.WriteString("UnusedRanges");
7337  json.WriteNumber(stat.unusedRangeCount);
7338 
7339  json.WriteString("UsedBytes");
7340  json.WriteNumber(stat.usedBytes);
7341 
7342  json.WriteString("UnusedBytes");
7343  json.WriteNumber(stat.unusedBytes);
7344 
7345  if(stat.allocationCount > 1)
7346  {
7347  json.WriteString("AllocationSize");
7348  json.BeginObject(true);
7349  json.WriteString("Min");
7350  json.WriteNumber(stat.allocationSizeMin);
7351  json.WriteString("Avg");
7352  json.WriteNumber(stat.allocationSizeAvg);
7353  json.WriteString("Max");
7354  json.WriteNumber(stat.allocationSizeMax);
7355  json.EndObject();
7356  }
7357 
7358  if(stat.unusedRangeCount > 1)
7359  {
7360  json.WriteString("UnusedRangeSize");
7361  json.BeginObject(true);
7362  json.WriteString("Min");
7363  json.WriteNumber(stat.unusedRangeSizeMin);
7364  json.WriteString("Avg");
7365  json.WriteNumber(stat.unusedRangeSizeAvg);
7366  json.WriteString("Max");
7367  json.WriteNumber(stat.unusedRangeSizeMax);
7368  json.EndObject();
7369  }
7370 
7371  json.EndObject();
7372 }
7373 
7374 #endif // #if VMA_STATS_STRING_ENABLED
7375 
7376 struct VmaSuballocationItemSizeLess
7377 {
7378  bool operator()(
7379  const VmaSuballocationList::iterator lhs,
7380  const VmaSuballocationList::iterator rhs) const
7381  {
7382  return lhs->size < rhs->size;
7383  }
7384  bool operator()(
7385  const VmaSuballocationList::iterator lhs,
7386  VkDeviceSize rhsSize) const
7387  {
7388  return lhs->size < rhsSize;
7389  }
7390 };
7391 
7392 
7394 // class VmaBlockMetadata
7395 
7396 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7397  m_Size(0),
7398  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7399 {
7400 }
7401 
7402 #if VMA_STATS_STRING_ENABLED
7403 
7404 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7405  VkDeviceSize unusedBytes,
7406  size_t allocationCount,
7407  size_t unusedRangeCount) const
7408 {
7409  json.BeginObject();
7410 
7411  json.WriteString("TotalBytes");
7412  json.WriteNumber(GetSize());
7413 
7414  json.WriteString("UnusedBytes");
7415  json.WriteNumber(unusedBytes);
7416 
7417  json.WriteString("Allocations");
7418  json.WriteNumber((uint64_t)allocationCount);
7419 
7420  json.WriteString("UnusedRanges");
7421  json.WriteNumber((uint64_t)unusedRangeCount);
7422 
7423  json.WriteString("Suballocations");
7424  json.BeginArray();
7425 }
7426 
7427 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7428  VkDeviceSize offset,
7429  VmaAllocation hAllocation) const
7430 {
7431  json.BeginObject(true);
7432 
7433  json.WriteString("Offset");
7434  json.WriteNumber(offset);
7435 
7436  hAllocation->PrintParameters(json);
7437 
7438  json.EndObject();
7439 }
7440 
7441 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7442  VkDeviceSize offset,
7443  VkDeviceSize size) const
7444 {
7445  json.BeginObject(true);
7446 
7447  json.WriteString("Offset");
7448  json.WriteNumber(offset);
7449 
7450  json.WriteString("Type");
7451  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7452 
7453  json.WriteString("Size");
7454  json.WriteNumber(size);
7455 
7456  json.EndObject();
7457 }
7458 
7459 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7460 {
7461  json.EndArray();
7462  json.EndObject();
7463 }
7464 
7465 #endif // #if VMA_STATS_STRING_ENABLED
7466 
7468 // class VmaBlockMetadata_Generic
7469 
7470 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7471  VmaBlockMetadata(hAllocator),
7472  m_FreeCount(0),
7473  m_SumFreeSize(0),
7474  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7475  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7476 {
7477 }
7478 
7479 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7480 {
7481 }
7482 
7483 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7484 {
7485  VmaBlockMetadata::Init(size);
7486 
7487  m_FreeCount = 1;
7488  m_SumFreeSize = size;
7489 
7490  VmaSuballocation suballoc = {};
7491  suballoc.offset = 0;
7492  suballoc.size = size;
7493  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7494  suballoc.hAllocation = VK_NULL_HANDLE;
7495 
7496  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7497  m_Suballocations.push_back(suballoc);
7498  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7499  --suballocItem;
7500  m_FreeSuballocationsBySize.push_back(suballocItem);
7501 }
7502 
7503 bool VmaBlockMetadata_Generic::Validate() const
7504 {
7505  VMA_VALIDATE(!m_Suballocations.empty());
7506 
7507  // Expected offset of new suballocation as calculated from previous ones.
7508  VkDeviceSize calculatedOffset = 0;
7509  // Expected number of free suballocations as calculated from traversing their list.
7510  uint32_t calculatedFreeCount = 0;
7511  // Expected sum size of free suballocations as calculated from traversing their list.
7512  VkDeviceSize calculatedSumFreeSize = 0;
7513  // Expected number of free suballocations that should be registered in
7514  // m_FreeSuballocationsBySize calculated from traversing their list.
7515  size_t freeSuballocationsToRegister = 0;
7516  // True if previous visited suballocation was free.
7517  bool prevFree = false;
7518 
7519  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7520  suballocItem != m_Suballocations.cend();
7521  ++suballocItem)
7522  {
7523  const VmaSuballocation& subAlloc = *suballocItem;
7524 
7525  // Actual offset of this suballocation doesn't match expected one.
7526  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7527 
7528  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7529  // Two adjacent free suballocations are invalid. They should be merged.
7530  VMA_VALIDATE(!prevFree || !currFree);
7531 
7532  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7533 
7534  if(currFree)
7535  {
7536  calculatedSumFreeSize += subAlloc.size;
7537  ++calculatedFreeCount;
7538  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7539  {
7540  ++freeSuballocationsToRegister;
7541  }
7542 
7543  // Margin required between allocations - every free space must be at least that large.
7544  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7545  }
7546  else
7547  {
7548  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7549  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7550 
7551  // Margin required between allocations - previous allocation must be free.
7552  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7553  }
7554 
7555  calculatedOffset += subAlloc.size;
7556  prevFree = currFree;
7557  }
7558 
7559  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7560  // match expected one.
7561  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7562 
7563  VkDeviceSize lastSize = 0;
7564  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7565  {
7566  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7567 
7568  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7569  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7570  // They must be sorted by size ascending.
7571  VMA_VALIDATE(suballocItem->size >= lastSize);
7572 
7573  lastSize = suballocItem->size;
7574  }
7575 
7576  // Check if totals match calculacted values.
7577  VMA_VALIDATE(ValidateFreeSuballocationList());
7578  VMA_VALIDATE(calculatedOffset == GetSize());
7579  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7580  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7581 
7582  return true;
7583 }
7584 
7585 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7586 {
7587  if(!m_FreeSuballocationsBySize.empty())
7588  {
7589  return m_FreeSuballocationsBySize.back()->size;
7590  }
7591  else
7592  {
7593  return 0;
7594  }
7595 }
7596 
7597 bool VmaBlockMetadata_Generic::IsEmpty() const
7598 {
7599  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7600 }
7601 
7602 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7603 {
7604  outInfo.blockCount = 1;
7605 
7606  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7607  outInfo.allocationCount = rangeCount - m_FreeCount;
7608  outInfo.unusedRangeCount = m_FreeCount;
7609 
7610  outInfo.unusedBytes = m_SumFreeSize;
7611  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7612 
7613  outInfo.allocationSizeMin = UINT64_MAX;
7614  outInfo.allocationSizeMax = 0;
7615  outInfo.unusedRangeSizeMin = UINT64_MAX;
7616  outInfo.unusedRangeSizeMax = 0;
7617 
7618  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7619  suballocItem != m_Suballocations.cend();
7620  ++suballocItem)
7621  {
7622  const VmaSuballocation& suballoc = *suballocItem;
7623  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7624  {
7625  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7626  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7627  }
7628  else
7629  {
7630  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7631  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7632  }
7633  }
7634 }
7635 
7636 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7637 {
7638  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7639 
7640  inoutStats.size += GetSize();
7641  inoutStats.unusedSize += m_SumFreeSize;
7642  inoutStats.allocationCount += rangeCount - m_FreeCount;
7643  inoutStats.unusedRangeCount += m_FreeCount;
7644  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7645 }
7646 
7647 #if VMA_STATS_STRING_ENABLED
7648 
7649 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7650 {
7651  PrintDetailedMap_Begin(json,
7652  m_SumFreeSize, // unusedBytes
7653  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7654  m_FreeCount); // unusedRangeCount
7655 
7656  size_t i = 0;
7657  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7658  suballocItem != m_Suballocations.cend();
7659  ++suballocItem, ++i)
7660  {
7661  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7662  {
7663  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7664  }
7665  else
7666  {
7667  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7668  }
7669  }
7670 
7671  PrintDetailedMap_End(json);
7672 }
7673 
7674 #endif // #if VMA_STATS_STRING_ENABLED
7675 
7676 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7677  uint32_t currentFrameIndex,
7678  uint32_t frameInUseCount,
7679  VkDeviceSize bufferImageGranularity,
7680  VkDeviceSize allocSize,
7681  VkDeviceSize allocAlignment,
7682  bool upperAddress,
7683  VmaSuballocationType allocType,
7684  bool canMakeOtherLost,
7685  uint32_t strategy,
7686  VmaAllocationRequest* pAllocationRequest)
7687 {
7688  VMA_ASSERT(allocSize > 0);
7689  VMA_ASSERT(!upperAddress);
7690  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7691  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7692  VMA_HEAVY_ASSERT(Validate());
7693 
7694  // There is not enough total free space in this block to fullfill the request: Early return.
7695  if(canMakeOtherLost == false &&
7696  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7697  {
7698  return false;
7699  }
7700 
7701  // New algorithm, efficiently searching freeSuballocationsBySize.
7702  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7703  if(freeSuballocCount > 0)
7704  {
7706  {
7707  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7708  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7709  m_FreeSuballocationsBySize.data(),
7710  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7711  allocSize + 2 * VMA_DEBUG_MARGIN,
7712  VmaSuballocationItemSizeLess());
7713  size_t index = it - m_FreeSuballocationsBySize.data();
7714  for(; index < freeSuballocCount; ++index)
7715  {
7716  if(CheckAllocation(
7717  currentFrameIndex,
7718  frameInUseCount,
7719  bufferImageGranularity,
7720  allocSize,
7721  allocAlignment,
7722  allocType,
7723  m_FreeSuballocationsBySize[index],
7724  false, // canMakeOtherLost
7725  &pAllocationRequest->offset,
7726  &pAllocationRequest->itemsToMakeLostCount,
7727  &pAllocationRequest->sumFreeSize,
7728  &pAllocationRequest->sumItemSize))
7729  {
7730  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7731  return true;
7732  }
7733  }
7734  }
7735  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7736  {
7737  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7738  it != m_Suballocations.end();
7739  ++it)
7740  {
7741  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7742  currentFrameIndex,
7743  frameInUseCount,
7744  bufferImageGranularity,
7745  allocSize,
7746  allocAlignment,
7747  allocType,
7748  it,
7749  false, // canMakeOtherLost
7750  &pAllocationRequest->offset,
7751  &pAllocationRequest->itemsToMakeLostCount,
7752  &pAllocationRequest->sumFreeSize,
7753  &pAllocationRequest->sumItemSize))
7754  {
7755  pAllocationRequest->item = it;
7756  return true;
7757  }
7758  }
7759  }
7760  else // WORST_FIT, FIRST_FIT
7761  {
7762  // Search staring from biggest suballocations.
7763  for(size_t index = freeSuballocCount; index--; )
7764  {
7765  if(CheckAllocation(
7766  currentFrameIndex,
7767  frameInUseCount,
7768  bufferImageGranularity,
7769  allocSize,
7770  allocAlignment,
7771  allocType,
7772  m_FreeSuballocationsBySize[index],
7773  false, // canMakeOtherLost
7774  &pAllocationRequest->offset,
7775  &pAllocationRequest->itemsToMakeLostCount,
7776  &pAllocationRequest->sumFreeSize,
7777  &pAllocationRequest->sumItemSize))
7778  {
7779  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7780  return true;
7781  }
7782  }
7783  }
7784  }
7785 
7786  if(canMakeOtherLost)
7787  {
7788  // Brute-force algorithm. TODO: Come up with something better.
7789 
7790  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7791  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7792 
7793  VmaAllocationRequest tmpAllocRequest = {};
7794  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7795  suballocIt != m_Suballocations.end();
7796  ++suballocIt)
7797  {
7798  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7799  suballocIt->hAllocation->CanBecomeLost())
7800  {
7801  if(CheckAllocation(
7802  currentFrameIndex,
7803  frameInUseCount,
7804  bufferImageGranularity,
7805  allocSize,
7806  allocAlignment,
7807  allocType,
7808  suballocIt,
7809  canMakeOtherLost,
7810  &tmpAllocRequest.offset,
7811  &tmpAllocRequest.itemsToMakeLostCount,
7812  &tmpAllocRequest.sumFreeSize,
7813  &tmpAllocRequest.sumItemSize))
7814  {
7815  tmpAllocRequest.item = suballocIt;
7816 
7817  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7819  {
7820  *pAllocationRequest = tmpAllocRequest;
7821  }
7822  }
7823  }
7824  }
7825 
7826  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7827  {
7828  return true;
7829  }
7830  }
7831 
7832  return false;
7833 }
7834 
7835 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7836  uint32_t currentFrameIndex,
7837  uint32_t frameInUseCount,
7838  VmaAllocationRequest* pAllocationRequest)
7839 {
7840  while(pAllocationRequest->itemsToMakeLostCount > 0)
7841  {
7842  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7843  {
7844  ++pAllocationRequest->item;
7845  }
7846  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7847  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7848  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7849  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7850  {
7851  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7852  --pAllocationRequest->itemsToMakeLostCount;
7853  }
7854  else
7855  {
7856  return false;
7857  }
7858  }
7859 
7860  VMA_HEAVY_ASSERT(Validate());
7861  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7862  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7863 
7864  return true;
7865 }
7866 
7867 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7868 {
7869  uint32_t lostAllocationCount = 0;
7870  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7871  it != m_Suballocations.end();
7872  ++it)
7873  {
7874  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7875  it->hAllocation->CanBecomeLost() &&
7876  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7877  {
7878  it = FreeSuballocation(it);
7879  ++lostAllocationCount;
7880  }
7881  }
7882  return lostAllocationCount;
7883 }
7884 
7885 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7886 {
7887  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7888  it != m_Suballocations.end();
7889  ++it)
7890  {
7891  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7892  {
7893  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7894  {
7895  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7896  return VK_ERROR_VALIDATION_FAILED_EXT;
7897  }
7898  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7899  {
7900  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7901  return VK_ERROR_VALIDATION_FAILED_EXT;
7902  }
7903  }
7904  }
7905 
7906  return VK_SUCCESS;
7907 }
7908 
7909 void VmaBlockMetadata_Generic::Alloc(
7910  const VmaAllocationRequest& request,
7911  VmaSuballocationType type,
7912  VkDeviceSize allocSize,
7913  bool upperAddress,
7914  VmaAllocation hAllocation)
7915 {
7916  VMA_ASSERT(!upperAddress);
7917  VMA_ASSERT(request.item != m_Suballocations.end());
7918  VmaSuballocation& suballoc = *request.item;
7919  // Given suballocation is a free block.
7920  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7921  // Given offset is inside this suballocation.
7922  VMA_ASSERT(request.offset >= suballoc.offset);
7923  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7924  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7925  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7926 
7927  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7928  // it to become used.
7929  UnregisterFreeSuballocation(request.item);
7930 
7931  suballoc.offset = request.offset;
7932  suballoc.size = allocSize;
7933  suballoc.type = type;
7934  suballoc.hAllocation = hAllocation;
7935 
7936  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7937  if(paddingEnd)
7938  {
7939  VmaSuballocation paddingSuballoc = {};
7940  paddingSuballoc.offset = request.offset + allocSize;
7941  paddingSuballoc.size = paddingEnd;
7942  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7943  VmaSuballocationList::iterator next = request.item;
7944  ++next;
7945  const VmaSuballocationList::iterator paddingEndItem =
7946  m_Suballocations.insert(next, paddingSuballoc);
7947  RegisterFreeSuballocation(paddingEndItem);
7948  }
7949 
7950  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7951  if(paddingBegin)
7952  {
7953  VmaSuballocation paddingSuballoc = {};
7954  paddingSuballoc.offset = request.offset - paddingBegin;
7955  paddingSuballoc.size = paddingBegin;
7956  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7957  const VmaSuballocationList::iterator paddingBeginItem =
7958  m_Suballocations.insert(request.item, paddingSuballoc);
7959  RegisterFreeSuballocation(paddingBeginItem);
7960  }
7961 
7962  // Update totals.
7963  m_FreeCount = m_FreeCount - 1;
7964  if(paddingBegin > 0)
7965  {
7966  ++m_FreeCount;
7967  }
7968  if(paddingEnd > 0)
7969  {
7970  ++m_FreeCount;
7971  }
7972  m_SumFreeSize -= allocSize;
7973 }
7974 
7975 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7976 {
7977  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7978  suballocItem != m_Suballocations.end();
7979  ++suballocItem)
7980  {
7981  VmaSuballocation& suballoc = *suballocItem;
7982  if(suballoc.hAllocation == allocation)
7983  {
7984  FreeSuballocation(suballocItem);
7985  VMA_HEAVY_ASSERT(Validate());
7986  return;
7987  }
7988  }
7989  VMA_ASSERT(0 && "Not found!");
7990 }
7991 
7992 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7993 {
7994  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7995  suballocItem != m_Suballocations.end();
7996  ++suballocItem)
7997  {
7998  VmaSuballocation& suballoc = *suballocItem;
7999  if(suballoc.offset == offset)
8000  {
8001  FreeSuballocation(suballocItem);
8002  return;
8003  }
8004  }
8005  VMA_ASSERT(0 && "Not found!");
8006 }
8007 
8008 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8009 {
8010  typedef VmaSuballocationList::iterator iter_type;
8011  for(iter_type suballocItem = m_Suballocations.begin();
8012  suballocItem != m_Suballocations.end();
8013  ++suballocItem)
8014  {
8015  VmaSuballocation& suballoc = *suballocItem;
8016  if(suballoc.hAllocation == alloc)
8017  {
8018  iter_type nextItem = suballocItem;
8019  ++nextItem;
8020 
8021  // Should have been ensured on higher level.
8022  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8023 
8024  // Shrinking.
8025  if(newSize < alloc->GetSize())
8026  {
8027  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8028 
8029  // There is next item.
8030  if(nextItem != m_Suballocations.end())
8031  {
8032  // Next item is free.
8033  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8034  {
8035  // Grow this next item backward.
8036  UnregisterFreeSuballocation(nextItem);
8037  nextItem->offset -= sizeDiff;
8038  nextItem->size += sizeDiff;
8039  RegisterFreeSuballocation(nextItem);
8040  }
8041  // Next item is not free.
8042  else
8043  {
8044  // Create free item after current one.
8045  VmaSuballocation newFreeSuballoc;
8046  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8047  newFreeSuballoc.offset = suballoc.offset + newSize;
8048  newFreeSuballoc.size = sizeDiff;
8049  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8050  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8051  RegisterFreeSuballocation(newFreeSuballocIt);
8052 
8053  ++m_FreeCount;
8054  }
8055  }
8056  // This is the last item.
8057  else
8058  {
8059  // Create free item at the end.
8060  VmaSuballocation newFreeSuballoc;
8061  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8062  newFreeSuballoc.offset = suballoc.offset + newSize;
8063  newFreeSuballoc.size = sizeDiff;
8064  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8065  m_Suballocations.push_back(newFreeSuballoc);
8066 
8067  iter_type newFreeSuballocIt = m_Suballocations.end();
8068  RegisterFreeSuballocation(--newFreeSuballocIt);
8069 
8070  ++m_FreeCount;
8071  }
8072 
8073  suballoc.size = newSize;
8074  m_SumFreeSize += sizeDiff;
8075  }
8076  // Growing.
8077  else
8078  {
8079  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8080 
8081  // There is next item.
8082  if(nextItem != m_Suballocations.end())
8083  {
8084  // Next item is free.
8085  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8086  {
8087  // There is not enough free space, including margin.
8088  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8089  {
8090  return false;
8091  }
8092 
8093  // There is more free space than required.
8094  if(nextItem->size > sizeDiff)
8095  {
8096  // Move and shrink this next item.
8097  UnregisterFreeSuballocation(nextItem);
8098  nextItem->offset += sizeDiff;
8099  nextItem->size -= sizeDiff;
8100  RegisterFreeSuballocation(nextItem);
8101  }
8102  // There is exactly the amount of free space required.
8103  else
8104  {
8105  // Remove this next free item.
8106  UnregisterFreeSuballocation(nextItem);
8107  m_Suballocations.erase(nextItem);
8108  --m_FreeCount;
8109  }
8110  }
8111  // Next item is not free - there is no space to grow.
8112  else
8113  {
8114  return false;
8115  }
8116  }
8117  // This is the last item - there is no space to grow.
8118  else
8119  {
8120  return false;
8121  }
8122 
8123  suballoc.size = newSize;
8124  m_SumFreeSize -= sizeDiff;
8125  }
8126 
8127  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8128  return true;
8129  }
8130  }
8131  VMA_ASSERT(0 && "Not found!");
8132  return false;
8133 }
8134 
8135 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8136 {
8137  VkDeviceSize lastSize = 0;
8138  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8139  {
8140  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8141 
8142  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8143  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8144  VMA_VALIDATE(it->size >= lastSize);
8145  lastSize = it->size;
8146  }
8147  return true;
8148 }
8149 
8150 bool VmaBlockMetadata_Generic::CheckAllocation(
8151  uint32_t currentFrameIndex,
8152  uint32_t frameInUseCount,
8153  VkDeviceSize bufferImageGranularity,
8154  VkDeviceSize allocSize,
8155  VkDeviceSize allocAlignment,
8156  VmaSuballocationType allocType,
8157  VmaSuballocationList::const_iterator suballocItem,
8158  bool canMakeOtherLost,
8159  VkDeviceSize* pOffset,
8160  size_t* itemsToMakeLostCount,
8161  VkDeviceSize* pSumFreeSize,
8162  VkDeviceSize* pSumItemSize) const
8163 {
8164  VMA_ASSERT(allocSize > 0);
8165  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8166  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8167  VMA_ASSERT(pOffset != VMA_NULL);
8168 
8169  *itemsToMakeLostCount = 0;
8170  *pSumFreeSize = 0;
8171  *pSumItemSize = 0;
8172 
8173  if(canMakeOtherLost)
8174  {
8175  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8176  {
8177  *pSumFreeSize = suballocItem->size;
8178  }
8179  else
8180  {
8181  if(suballocItem->hAllocation->CanBecomeLost() &&
8182  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8183  {
8184  ++*itemsToMakeLostCount;
8185  *pSumItemSize = suballocItem->size;
8186  }
8187  else
8188  {
8189  return false;
8190  }
8191  }
8192 
8193  // Remaining size is too small for this request: Early return.
8194  if(GetSize() - suballocItem->offset < allocSize)
8195  {
8196  return false;
8197  }
8198 
8199  // Start from offset equal to beginning of this suballocation.
8200  *pOffset = suballocItem->offset;
8201 
8202  // Apply VMA_DEBUG_MARGIN at the beginning.
8203  if(VMA_DEBUG_MARGIN > 0)
8204  {
8205  *pOffset += VMA_DEBUG_MARGIN;
8206  }
8207 
8208  // Apply alignment.
8209  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8210 
8211  // Check previous suballocations for BufferImageGranularity conflicts.
8212  // Make bigger alignment if necessary.
8213  if(bufferImageGranularity > 1)
8214  {
8215  bool bufferImageGranularityConflict = false;
8216  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8217  while(prevSuballocItem != m_Suballocations.cbegin())
8218  {
8219  --prevSuballocItem;
8220  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8221  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8222  {
8223  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8224  {
8225  bufferImageGranularityConflict = true;
8226  break;
8227  }
8228  }
8229  else
8230  // Already on previous page.
8231  break;
8232  }
8233  if(bufferImageGranularityConflict)
8234  {
8235  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8236  }
8237  }
8238 
8239  // Now that we have final *pOffset, check if we are past suballocItem.
8240  // If yes, return false - this function should be called for another suballocItem as starting point.
8241  if(*pOffset >= suballocItem->offset + suballocItem->size)
8242  {
8243  return false;
8244  }
8245 
8246  // Calculate padding at the beginning based on current offset.
8247  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8248 
8249  // Calculate required margin at the end.
8250  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8251 
8252  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8253  // Another early return check.
8254  if(suballocItem->offset + totalSize > GetSize())
8255  {
8256  return false;
8257  }
8258 
8259  // Advance lastSuballocItem until desired size is reached.
8260  // Update itemsToMakeLostCount.
8261  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8262  if(totalSize > suballocItem->size)
8263  {
8264  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8265  while(remainingSize > 0)
8266  {
8267  ++lastSuballocItem;
8268  if(lastSuballocItem == m_Suballocations.cend())
8269  {
8270  return false;
8271  }
8272  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8273  {
8274  *pSumFreeSize += lastSuballocItem->size;
8275  }
8276  else
8277  {
8278  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8279  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8280  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8281  {
8282  ++*itemsToMakeLostCount;
8283  *pSumItemSize += lastSuballocItem->size;
8284  }
8285  else
8286  {
8287  return false;
8288  }
8289  }
8290  remainingSize = (lastSuballocItem->size < remainingSize) ?
8291  remainingSize - lastSuballocItem->size : 0;
8292  }
8293  }
8294 
8295  // Check next suballocations for BufferImageGranularity conflicts.
8296  // If conflict exists, we must mark more allocations lost or fail.
8297  if(bufferImageGranularity > 1)
8298  {
8299  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8300  ++nextSuballocItem;
8301  while(nextSuballocItem != m_Suballocations.cend())
8302  {
8303  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8304  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8305  {
8306  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8307  {
8308  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8309  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8310  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8311  {
8312  ++*itemsToMakeLostCount;
8313  }
8314  else
8315  {
8316  return false;
8317  }
8318  }
8319  }
8320  else
8321  {
8322  // Already on next page.
8323  break;
8324  }
8325  ++nextSuballocItem;
8326  }
8327  }
8328  }
8329  else
8330  {
8331  const VmaSuballocation& suballoc = *suballocItem;
8332  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8333 
8334  *pSumFreeSize = suballoc.size;
8335 
8336  // Size of this suballocation is too small for this request: Early return.
8337  if(suballoc.size < allocSize)
8338  {
8339  return false;
8340  }
8341 
8342  // Start from offset equal to beginning of this suballocation.
8343  *pOffset = suballoc.offset;
8344 
8345  // Apply VMA_DEBUG_MARGIN at the beginning.
8346  if(VMA_DEBUG_MARGIN > 0)
8347  {
8348  *pOffset += VMA_DEBUG_MARGIN;
8349  }
8350 
8351  // Apply alignment.
8352  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8353 
8354  // Check previous suballocations for BufferImageGranularity conflicts.
8355  // Make bigger alignment if necessary.
8356  if(bufferImageGranularity > 1)
8357  {
8358  bool bufferImageGranularityConflict = false;
8359  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8360  while(prevSuballocItem != m_Suballocations.cbegin())
8361  {
8362  --prevSuballocItem;
8363  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8364  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8365  {
8366  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8367  {
8368  bufferImageGranularityConflict = true;
8369  break;
8370  }
8371  }
8372  else
8373  // Already on previous page.
8374  break;
8375  }
8376  if(bufferImageGranularityConflict)
8377  {
8378  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8379  }
8380  }
8381 
8382  // Calculate padding at the beginning based on current offset.
8383  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8384 
8385  // Calculate required margin at the end.
8386  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8387 
8388  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8389  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8390  {
8391  return false;
8392  }
8393 
8394  // Check next suballocations for BufferImageGranularity conflicts.
8395  // If conflict exists, allocation cannot be made here.
8396  if(bufferImageGranularity > 1)
8397  {
8398  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8399  ++nextSuballocItem;
8400  while(nextSuballocItem != m_Suballocations.cend())
8401  {
8402  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8403  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8404  {
8405  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8406  {
8407  return false;
8408  }
8409  }
8410  else
8411  {
8412  // Already on next page.
8413  break;
8414  }
8415  ++nextSuballocItem;
8416  }
8417  }
8418  }
8419 
8420  // All tests passed: Success. pOffset is already filled.
8421  return true;
8422 }
8423 
8424 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8425 {
8426  VMA_ASSERT(item != m_Suballocations.end());
8427  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8428 
8429  VmaSuballocationList::iterator nextItem = item;
8430  ++nextItem;
8431  VMA_ASSERT(nextItem != m_Suballocations.end());
8432  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8433 
8434  item->size += nextItem->size;
8435  --m_FreeCount;
8436  m_Suballocations.erase(nextItem);
8437 }
8438 
8439 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8440 {
8441  // Change this suballocation to be marked as free.
8442  VmaSuballocation& suballoc = *suballocItem;
8443  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8444  suballoc.hAllocation = VK_NULL_HANDLE;
8445 
8446  // Update totals.
8447  ++m_FreeCount;
8448  m_SumFreeSize += suballoc.size;
8449 
8450  // Merge with previous and/or next suballocation if it's also free.
8451  bool mergeWithNext = false;
8452  bool mergeWithPrev = false;
8453 
8454  VmaSuballocationList::iterator nextItem = suballocItem;
8455  ++nextItem;
8456  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8457  {
8458  mergeWithNext = true;
8459  }
8460 
8461  VmaSuballocationList::iterator prevItem = suballocItem;
8462  if(suballocItem != m_Suballocations.begin())
8463  {
8464  --prevItem;
8465  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8466  {
8467  mergeWithPrev = true;
8468  }
8469  }
8470 
8471  if(mergeWithNext)
8472  {
8473  UnregisterFreeSuballocation(nextItem);
8474  MergeFreeWithNext(suballocItem);
8475  }
8476 
8477  if(mergeWithPrev)
8478  {
8479  UnregisterFreeSuballocation(prevItem);
8480  MergeFreeWithNext(prevItem);
8481  RegisterFreeSuballocation(prevItem);
8482  return prevItem;
8483  }
8484  else
8485  {
8486  RegisterFreeSuballocation(suballocItem);
8487  return suballocItem;
8488  }
8489 }
8490 
8491 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8492 {
8493  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8494  VMA_ASSERT(item->size > 0);
8495 
8496  // You may want to enable this validation at the beginning or at the end of
8497  // this function, depending on what do you want to check.
8498  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8499 
8500  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8501  {
8502  if(m_FreeSuballocationsBySize.empty())
8503  {
8504  m_FreeSuballocationsBySize.push_back(item);
8505  }
8506  else
8507  {
8508  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8509  }
8510  }
8511 
8512  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8513 }
8514 
8515 
8516 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8517 {
8518  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8519  VMA_ASSERT(item->size > 0);
8520 
8521  // You may want to enable this validation at the beginning or at the end of
8522  // this function, depending on what do you want to check.
8523  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8524 
8525  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8526  {
8527  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8528  m_FreeSuballocationsBySize.data(),
8529  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8530  item,
8531  VmaSuballocationItemSizeLess());
8532  for(size_t index = it - m_FreeSuballocationsBySize.data();
8533  index < m_FreeSuballocationsBySize.size();
8534  ++index)
8535  {
8536  if(m_FreeSuballocationsBySize[index] == item)
8537  {
8538  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8539  return;
8540  }
8541  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8542  }
8543  VMA_ASSERT(0 && "Not found.");
8544  }
8545 
8546  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8547 }
8548 
8549 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8550  VkDeviceSize bufferImageGranularity,
8551  VmaSuballocationType& inOutPrevSuballocType) const
8552 {
8553  if(bufferImageGranularity == 1 || IsEmpty())
8554  {
8555  return false;
8556  }
8557 
8558  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8559  bool typeConflictFound = false;
8560  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8561  it != m_Suballocations.cend();
8562  ++it)
8563  {
8564  const VmaSuballocationType suballocType = it->type;
8565  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8566  {
8567  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8568  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8569  {
8570  typeConflictFound = true;
8571  }
8572  inOutPrevSuballocType = suballocType;
8573  }
8574  }
8575 
8576  return typeConflictFound || minAlignment >= bufferImageGranularity;
8577 }
8578 
8580 // class VmaBlockMetadata_Linear
8581 
8582 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8583  VmaBlockMetadata(hAllocator),
8584  m_SumFreeSize(0),
8585  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8586  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8587  m_1stVectorIndex(0),
8588  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8589  m_1stNullItemsBeginCount(0),
8590  m_1stNullItemsMiddleCount(0),
8591  m_2ndNullItemsCount(0)
8592 {
8593 }
8594 
8595 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8596 {
8597 }
8598 
8599 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8600 {
8601  VmaBlockMetadata::Init(size);
8602  m_SumFreeSize = size;
8603 }
8604 
8605 bool VmaBlockMetadata_Linear::Validate() const
8606 {
8607  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8608  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8609 
8610  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8611  VMA_VALIDATE(!suballocations1st.empty() ||
8612  suballocations2nd.empty() ||
8613  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8614 
8615  if(!suballocations1st.empty())
8616  {
8617  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8618  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8619  // Null item at the end should be just pop_back().
8620  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8621  }
8622  if(!suballocations2nd.empty())
8623  {
8624  // Null item at the end should be just pop_back().
8625  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8626  }
8627 
8628  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8629  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8630 
8631  VkDeviceSize sumUsedSize = 0;
8632  const size_t suballoc1stCount = suballocations1st.size();
8633  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8634 
8635  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8636  {
8637  const size_t suballoc2ndCount = suballocations2nd.size();
8638  size_t nullItem2ndCount = 0;
8639  for(size_t i = 0; i < suballoc2ndCount; ++i)
8640  {
8641  const VmaSuballocation& suballoc = suballocations2nd[i];
8642  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8643 
8644  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8645  VMA_VALIDATE(suballoc.offset >= offset);
8646 
8647  if(!currFree)
8648  {
8649  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8650  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8651  sumUsedSize += suballoc.size;
8652  }
8653  else
8654  {
8655  ++nullItem2ndCount;
8656  }
8657 
8658  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8659  }
8660 
8661  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8662  }
8663 
8664  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8665  {
8666  const VmaSuballocation& suballoc = suballocations1st[i];
8667  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8668  suballoc.hAllocation == VK_NULL_HANDLE);
8669  }
8670 
8671  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8672 
8673  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8674  {
8675  const VmaSuballocation& suballoc = suballocations1st[i];
8676  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8677 
8678  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8679  VMA_VALIDATE(suballoc.offset >= offset);
8680  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8681 
8682  if(!currFree)
8683  {
8684  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8685  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8686  sumUsedSize += suballoc.size;
8687  }
8688  else
8689  {
8690  ++nullItem1stCount;
8691  }
8692 
8693  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8694  }
8695  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8696 
8697  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8698  {
8699  const size_t suballoc2ndCount = suballocations2nd.size();
8700  size_t nullItem2ndCount = 0;
8701  for(size_t i = suballoc2ndCount; i--; )
8702  {
8703  const VmaSuballocation& suballoc = suballocations2nd[i];
8704  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8705 
8706  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8707  VMA_VALIDATE(suballoc.offset >= offset);
8708 
8709  if(!currFree)
8710  {
8711  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8712  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8713  sumUsedSize += suballoc.size;
8714  }
8715  else
8716  {
8717  ++nullItem2ndCount;
8718  }
8719 
8720  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8721  }
8722 
8723  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8724  }
8725 
8726  VMA_VALIDATE(offset <= GetSize());
8727  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8728 
8729  return true;
8730 }
8731 
8732 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8733 {
8734  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8735  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8736 }
8737 
8738 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8739 {
8740  const VkDeviceSize size = GetSize();
8741 
8742  /*
8743  We don't consider gaps inside allocation vectors with freed allocations because
8744  they are not suitable for reuse in linear allocator. We consider only space that
8745  is available for new allocations.
8746  */
8747  if(IsEmpty())
8748  {
8749  return size;
8750  }
8751 
8752  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8753 
8754  switch(m_2ndVectorMode)
8755  {
8756  case SECOND_VECTOR_EMPTY:
8757  /*
8758  Available space is after end of 1st, as well as before beginning of 1st (which
8759  whould make it a ring buffer).
8760  */
8761  {
8762  const size_t suballocations1stCount = suballocations1st.size();
8763  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8764  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8765  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8766  return VMA_MAX(
8767  firstSuballoc.offset,
8768  size - (lastSuballoc.offset + lastSuballoc.size));
8769  }
8770  break;
8771 
8772  case SECOND_VECTOR_RING_BUFFER:
8773  /*
8774  Available space is only between end of 2nd and beginning of 1st.
8775  */
8776  {
8777  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8778  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8779  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8780  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8781  }
8782  break;
8783 
8784  case SECOND_VECTOR_DOUBLE_STACK:
8785  /*
8786  Available space is only between end of 1st and top of 2nd.
8787  */
8788  {
8789  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8790  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8791  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8792  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8793  }
8794  break;
8795 
8796  default:
8797  VMA_ASSERT(0);
8798  return 0;
8799  }
8800 }
8801 
8802 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8803 {
8804  const VkDeviceSize size = GetSize();
8805  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8806  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8807  const size_t suballoc1stCount = suballocations1st.size();
8808  const size_t suballoc2ndCount = suballocations2nd.size();
8809 
8810  outInfo.blockCount = 1;
8811  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8812  outInfo.unusedRangeCount = 0;
8813  outInfo.usedBytes = 0;
8814  outInfo.allocationSizeMin = UINT64_MAX;
8815  outInfo.allocationSizeMax = 0;
8816  outInfo.unusedRangeSizeMin = UINT64_MAX;
8817  outInfo.unusedRangeSizeMax = 0;
8818 
8819  VkDeviceSize lastOffset = 0;
8820 
8821  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8822  {
8823  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8824  size_t nextAlloc2ndIndex = 0;
8825  while(lastOffset < freeSpace2ndTo1stEnd)
8826  {
8827  // Find next non-null allocation or move nextAllocIndex to the end.
8828  while(nextAlloc2ndIndex < suballoc2ndCount &&
8829  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8830  {
8831  ++nextAlloc2ndIndex;
8832  }
8833 
8834  // Found non-null allocation.
8835  if(nextAlloc2ndIndex < suballoc2ndCount)
8836  {
8837  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8838 
8839  // 1. Process free space before this allocation.
8840  if(lastOffset < suballoc.offset)
8841  {
8842  // There is free space from lastOffset to suballoc.offset.
8843  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8844  ++outInfo.unusedRangeCount;
8845  outInfo.unusedBytes += unusedRangeSize;
8846  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8847  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8848  }
8849 
8850  // 2. Process this allocation.
8851  // There is allocation with suballoc.offset, suballoc.size.
8852  outInfo.usedBytes += suballoc.size;
8853  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8854  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8855 
8856  // 3. Prepare for next iteration.
8857  lastOffset = suballoc.offset + suballoc.size;
8858  ++nextAlloc2ndIndex;
8859  }
8860  // We are at the end.
8861  else
8862  {
8863  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8864  if(lastOffset < freeSpace2ndTo1stEnd)
8865  {
8866  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8867  ++outInfo.unusedRangeCount;
8868  outInfo.unusedBytes += unusedRangeSize;
8869  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8870  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8871  }
8872 
8873  // End of loop.
8874  lastOffset = freeSpace2ndTo1stEnd;
8875  }
8876  }
8877  }
8878 
8879  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8880  const VkDeviceSize freeSpace1stTo2ndEnd =
8881  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8882  while(lastOffset < freeSpace1stTo2ndEnd)
8883  {
8884  // Find next non-null allocation or move nextAllocIndex to the end.
8885  while(nextAlloc1stIndex < suballoc1stCount &&
8886  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8887  {
8888  ++nextAlloc1stIndex;
8889  }
8890 
8891  // Found non-null allocation.
8892  if(nextAlloc1stIndex < suballoc1stCount)
8893  {
8894  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8895 
8896  // 1. Process free space before this allocation.
8897  if(lastOffset < suballoc.offset)
8898  {
8899  // There is free space from lastOffset to suballoc.offset.
8900  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8901  ++outInfo.unusedRangeCount;
8902  outInfo.unusedBytes += unusedRangeSize;
8903  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8904  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8905  }
8906 
8907  // 2. Process this allocation.
8908  // There is allocation with suballoc.offset, suballoc.size.
8909  outInfo.usedBytes += suballoc.size;
8910  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8911  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8912 
8913  // 3. Prepare for next iteration.
8914  lastOffset = suballoc.offset + suballoc.size;
8915  ++nextAlloc1stIndex;
8916  }
8917  // We are at the end.
8918  else
8919  {
8920  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8921  if(lastOffset < freeSpace1stTo2ndEnd)
8922  {
8923  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8924  ++outInfo.unusedRangeCount;
8925  outInfo.unusedBytes += unusedRangeSize;
8926  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8927  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8928  }
8929 
8930  // End of loop.
8931  lastOffset = freeSpace1stTo2ndEnd;
8932  }
8933  }
8934 
8935  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8936  {
8937  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8938  while(lastOffset < size)
8939  {
8940  // Find next non-null allocation or move nextAllocIndex to the end.
8941  while(nextAlloc2ndIndex != SIZE_MAX &&
8942  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8943  {
8944  --nextAlloc2ndIndex;
8945  }
8946 
8947  // Found non-null allocation.
8948  if(nextAlloc2ndIndex != SIZE_MAX)
8949  {
8950  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8951 
8952  // 1. Process free space before this allocation.
8953  if(lastOffset < suballoc.offset)
8954  {
8955  // There is free space from lastOffset to suballoc.offset.
8956  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8957  ++outInfo.unusedRangeCount;
8958  outInfo.unusedBytes += unusedRangeSize;
8959  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8960  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8961  }
8962 
8963  // 2. Process this allocation.
8964  // There is allocation with suballoc.offset, suballoc.size.
8965  outInfo.usedBytes += suballoc.size;
8966  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8967  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8968 
8969  // 3. Prepare for next iteration.
8970  lastOffset = suballoc.offset + suballoc.size;
8971  --nextAlloc2ndIndex;
8972  }
8973  // We are at the end.
8974  else
8975  {
8976  // There is free space from lastOffset to size.
8977  if(lastOffset < size)
8978  {
8979  const VkDeviceSize unusedRangeSize = size - lastOffset;
8980  ++outInfo.unusedRangeCount;
8981  outInfo.unusedBytes += unusedRangeSize;
8982  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8983  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8984  }
8985 
8986  // End of loop.
8987  lastOffset = size;
8988  }
8989  }
8990  }
8991 
8992  outInfo.unusedBytes = size - outInfo.usedBytes;
8993 }
8994 
8995 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8996 {
8997  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8998  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8999  const VkDeviceSize size = GetSize();
9000  const size_t suballoc1stCount = suballocations1st.size();
9001  const size_t suballoc2ndCount = suballocations2nd.size();
9002 
9003  inoutStats.size += size;
9004 
9005  VkDeviceSize lastOffset = 0;
9006 
9007  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9008  {
9009  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9010  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9011  while(lastOffset < freeSpace2ndTo1stEnd)
9012  {
9013  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9014  while(nextAlloc2ndIndex < suballoc2ndCount &&
9015  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9016  {
9017  ++nextAlloc2ndIndex;
9018  }
9019 
9020  // Found non-null allocation.
9021  if(nextAlloc2ndIndex < suballoc2ndCount)
9022  {
9023  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9024 
9025  // 1. Process free space before this allocation.
9026  if(lastOffset < suballoc.offset)
9027  {
9028  // There is free space from lastOffset to suballoc.offset.
9029  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9030  inoutStats.unusedSize += unusedRangeSize;
9031  ++inoutStats.unusedRangeCount;
9032  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9033  }
9034 
9035  // 2. Process this allocation.
9036  // There is allocation with suballoc.offset, suballoc.size.
9037  ++inoutStats.allocationCount;
9038 
9039  // 3. Prepare for next iteration.
9040  lastOffset = suballoc.offset + suballoc.size;
9041  ++nextAlloc2ndIndex;
9042  }
9043  // We are at the end.
9044  else
9045  {
9046  if(lastOffset < freeSpace2ndTo1stEnd)
9047  {
9048  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9049  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9050  inoutStats.unusedSize += unusedRangeSize;
9051  ++inoutStats.unusedRangeCount;
9052  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9053  }
9054 
9055  // End of loop.
9056  lastOffset = freeSpace2ndTo1stEnd;
9057  }
9058  }
9059  }
9060 
9061  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9062  const VkDeviceSize freeSpace1stTo2ndEnd =
9063  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9064  while(lastOffset < freeSpace1stTo2ndEnd)
9065  {
9066  // Find next non-null allocation or move nextAllocIndex to the end.
9067  while(nextAlloc1stIndex < suballoc1stCount &&
9068  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9069  {
9070  ++nextAlloc1stIndex;
9071  }
9072 
9073  // Found non-null allocation.
9074  if(nextAlloc1stIndex < suballoc1stCount)
9075  {
9076  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9077 
9078  // 1. Process free space before this allocation.
9079  if(lastOffset < suballoc.offset)
9080  {
9081  // There is free space from lastOffset to suballoc.offset.
9082  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9083  inoutStats.unusedSize += unusedRangeSize;
9084  ++inoutStats.unusedRangeCount;
9085  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9086  }
9087 
9088  // 2. Process this allocation.
9089  // There is allocation with suballoc.offset, suballoc.size.
9090  ++inoutStats.allocationCount;
9091 
9092  // 3. Prepare for next iteration.
9093  lastOffset = suballoc.offset + suballoc.size;
9094  ++nextAlloc1stIndex;
9095  }
9096  // We are at the end.
9097  else
9098  {
9099  if(lastOffset < freeSpace1stTo2ndEnd)
9100  {
9101  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9102  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9103  inoutStats.unusedSize += unusedRangeSize;
9104  ++inoutStats.unusedRangeCount;
9105  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9106  }
9107 
9108  // End of loop.
9109  lastOffset = freeSpace1stTo2ndEnd;
9110  }
9111  }
9112 
9113  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9114  {
9115  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9116  while(lastOffset < size)
9117  {
9118  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9119  while(nextAlloc2ndIndex != SIZE_MAX &&
9120  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9121  {
9122  --nextAlloc2ndIndex;
9123  }
9124 
9125  // Found non-null allocation.
9126  if(nextAlloc2ndIndex != SIZE_MAX)
9127  {
9128  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9129 
9130  // 1. Process free space before this allocation.
9131  if(lastOffset < suballoc.offset)
9132  {
9133  // There is free space from lastOffset to suballoc.offset.
9134  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9135  inoutStats.unusedSize += unusedRangeSize;
9136  ++inoutStats.unusedRangeCount;
9137  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9138  }
9139 
9140  // 2. Process this allocation.
9141  // There is allocation with suballoc.offset, suballoc.size.
9142  ++inoutStats.allocationCount;
9143 
9144  // 3. Prepare for next iteration.
9145  lastOffset = suballoc.offset + suballoc.size;
9146  --nextAlloc2ndIndex;
9147  }
9148  // We are at the end.
9149  else
9150  {
9151  if(lastOffset < size)
9152  {
9153  // There is free space from lastOffset to size.
9154  const VkDeviceSize unusedRangeSize = size - lastOffset;
9155  inoutStats.unusedSize += unusedRangeSize;
9156  ++inoutStats.unusedRangeCount;
9157  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9158  }
9159 
9160  // End of loop.
9161  lastOffset = size;
9162  }
9163  }
9164  }
9165 }
9166 
9167 #if VMA_STATS_STRING_ENABLED
9168 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9169 {
9170  const VkDeviceSize size = GetSize();
9171  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9172  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9173  const size_t suballoc1stCount = suballocations1st.size();
9174  const size_t suballoc2ndCount = suballocations2nd.size();
9175 
9176  // FIRST PASS
9177 
9178  size_t unusedRangeCount = 0;
9179  VkDeviceSize usedBytes = 0;
9180 
9181  VkDeviceSize lastOffset = 0;
9182 
9183  size_t alloc2ndCount = 0;
9184  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9185  {
9186  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9187  size_t nextAlloc2ndIndex = 0;
9188  while(lastOffset < freeSpace2ndTo1stEnd)
9189  {
9190  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9191  while(nextAlloc2ndIndex < suballoc2ndCount &&
9192  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9193  {
9194  ++nextAlloc2ndIndex;
9195  }
9196 
9197  // Found non-null allocation.
9198  if(nextAlloc2ndIndex < suballoc2ndCount)
9199  {
9200  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9201 
9202  // 1. Process free space before this allocation.
9203  if(lastOffset < suballoc.offset)
9204  {
9205  // There is free space from lastOffset to suballoc.offset.
9206  ++unusedRangeCount;
9207  }
9208 
9209  // 2. Process this allocation.
9210  // There is allocation with suballoc.offset, suballoc.size.
9211  ++alloc2ndCount;
9212  usedBytes += suballoc.size;
9213 
9214  // 3. Prepare for next iteration.
9215  lastOffset = suballoc.offset + suballoc.size;
9216  ++nextAlloc2ndIndex;
9217  }
9218  // We are at the end.
9219  else
9220  {
9221  if(lastOffset < freeSpace2ndTo1stEnd)
9222  {
9223  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9224  ++unusedRangeCount;
9225  }
9226 
9227  // End of loop.
9228  lastOffset = freeSpace2ndTo1stEnd;
9229  }
9230  }
9231  }
9232 
9233  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9234  size_t alloc1stCount = 0;
9235  const VkDeviceSize freeSpace1stTo2ndEnd =
9236  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9237  while(lastOffset < freeSpace1stTo2ndEnd)
9238  {
9239  // Find next non-null allocation or move nextAllocIndex to the end.
9240  while(nextAlloc1stIndex < suballoc1stCount &&
9241  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9242  {
9243  ++nextAlloc1stIndex;
9244  }
9245 
9246  // Found non-null allocation.
9247  if(nextAlloc1stIndex < suballoc1stCount)
9248  {
9249  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9250 
9251  // 1. Process free space before this allocation.
9252  if(lastOffset < suballoc.offset)
9253  {
9254  // There is free space from lastOffset to suballoc.offset.
9255  ++unusedRangeCount;
9256  }
9257 
9258  // 2. Process this allocation.
9259  // There is allocation with suballoc.offset, suballoc.size.
9260  ++alloc1stCount;
9261  usedBytes += suballoc.size;
9262 
9263  // 3. Prepare for next iteration.
9264  lastOffset = suballoc.offset + suballoc.size;
9265  ++nextAlloc1stIndex;
9266  }
9267  // We are at the end.
9268  else
9269  {
9270  if(lastOffset < size)
9271  {
9272  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9273  ++unusedRangeCount;
9274  }
9275 
9276  // End of loop.
9277  lastOffset = freeSpace1stTo2ndEnd;
9278  }
9279  }
9280 
9281  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9282  {
9283  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9284  while(lastOffset < size)
9285  {
9286  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9287  while(nextAlloc2ndIndex != SIZE_MAX &&
9288  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9289  {
9290  --nextAlloc2ndIndex;
9291  }
9292 
9293  // Found non-null allocation.
9294  if(nextAlloc2ndIndex != SIZE_MAX)
9295  {
9296  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9297 
9298  // 1. Process free space before this allocation.
9299  if(lastOffset < suballoc.offset)
9300  {
9301  // There is free space from lastOffset to suballoc.offset.
9302  ++unusedRangeCount;
9303  }
9304 
9305  // 2. Process this allocation.
9306  // There is allocation with suballoc.offset, suballoc.size.
9307  ++alloc2ndCount;
9308  usedBytes += suballoc.size;
9309 
9310  // 3. Prepare for next iteration.
9311  lastOffset = suballoc.offset + suballoc.size;
9312  --nextAlloc2ndIndex;
9313  }
9314  // We are at the end.
9315  else
9316  {
9317  if(lastOffset < size)
9318  {
9319  // There is free space from lastOffset to size.
9320  ++unusedRangeCount;
9321  }
9322 
9323  // End of loop.
9324  lastOffset = size;
9325  }
9326  }
9327  }
9328 
9329  const VkDeviceSize unusedBytes = size - usedBytes;
9330  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9331 
9332  // SECOND PASS
9333  lastOffset = 0;
9334 
9335  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9336  {
9337  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9338  size_t nextAlloc2ndIndex = 0;
9339  while(lastOffset < freeSpace2ndTo1stEnd)
9340  {
9341  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9342  while(nextAlloc2ndIndex < suballoc2ndCount &&
9343  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9344  {
9345  ++nextAlloc2ndIndex;
9346  }
9347 
9348  // Found non-null allocation.
9349  if(nextAlloc2ndIndex < suballoc2ndCount)
9350  {
9351  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9352 
9353  // 1. Process free space before this allocation.
9354  if(lastOffset < suballoc.offset)
9355  {
9356  // There is free space from lastOffset to suballoc.offset.
9357  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9358  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9359  }
9360 
9361  // 2. Process this allocation.
9362  // There is allocation with suballoc.offset, suballoc.size.
9363  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9364 
9365  // 3. Prepare for next iteration.
9366  lastOffset = suballoc.offset + suballoc.size;
9367  ++nextAlloc2ndIndex;
9368  }
9369  // We are at the end.
9370  else
9371  {
9372  if(lastOffset < freeSpace2ndTo1stEnd)
9373  {
9374  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9375  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9376  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9377  }
9378 
9379  // End of loop.
9380  lastOffset = freeSpace2ndTo1stEnd;
9381  }
9382  }
9383  }
9384 
9385  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9386  while(lastOffset < freeSpace1stTo2ndEnd)
9387  {
9388  // Find next non-null allocation or move nextAllocIndex to the end.
9389  while(nextAlloc1stIndex < suballoc1stCount &&
9390  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9391  {
9392  ++nextAlloc1stIndex;
9393  }
9394 
9395  // Found non-null allocation.
9396  if(nextAlloc1stIndex < suballoc1stCount)
9397  {
9398  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9399 
9400  // 1. Process free space before this allocation.
9401  if(lastOffset < suballoc.offset)
9402  {
9403  // There is free space from lastOffset to suballoc.offset.
9404  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9405  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9406  }
9407 
9408  // 2. Process this allocation.
9409  // There is allocation with suballoc.offset, suballoc.size.
9410  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9411 
9412  // 3. Prepare for next iteration.
9413  lastOffset = suballoc.offset + suballoc.size;
9414  ++nextAlloc1stIndex;
9415  }
9416  // We are at the end.
9417  else
9418  {
9419  if(lastOffset < freeSpace1stTo2ndEnd)
9420  {
9421  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9422  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9423  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9424  }
9425 
9426  // End of loop.
9427  lastOffset = freeSpace1stTo2ndEnd;
9428  }
9429  }
9430 
9431  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9432  {
9433  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9434  while(lastOffset < size)
9435  {
9436  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9437  while(nextAlloc2ndIndex != SIZE_MAX &&
9438  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9439  {
9440  --nextAlloc2ndIndex;
9441  }
9442 
9443  // Found non-null allocation.
9444  if(nextAlloc2ndIndex != SIZE_MAX)
9445  {
9446  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9447 
9448  // 1. Process free space before this allocation.
9449  if(lastOffset < suballoc.offset)
9450  {
9451  // There is free space from lastOffset to suballoc.offset.
9452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9454  }
9455 
9456  // 2. Process this allocation.
9457  // There is allocation with suballoc.offset, suballoc.size.
9458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9459 
9460  // 3. Prepare for next iteration.
9461  lastOffset = suballoc.offset + suballoc.size;
9462  --nextAlloc2ndIndex;
9463  }
9464  // We are at the end.
9465  else
9466  {
9467  if(lastOffset < size)
9468  {
9469  // There is free space from lastOffset to size.
9470  const VkDeviceSize unusedRangeSize = size - lastOffset;
9471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9472  }
9473 
9474  // End of loop.
9475  lastOffset = size;
9476  }
9477  }
9478  }
9479 
9480  PrintDetailedMap_End(json);
9481 }
9482 #endif // #if VMA_STATS_STRING_ENABLED
9483 
9484 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9485  uint32_t currentFrameIndex,
9486  uint32_t frameInUseCount,
9487  VkDeviceSize bufferImageGranularity,
9488  VkDeviceSize allocSize,
9489  VkDeviceSize allocAlignment,
9490  bool upperAddress,
9491  VmaSuballocationType allocType,
9492  bool canMakeOtherLost,
9493  uint32_t strategy,
9494  VmaAllocationRequest* pAllocationRequest)
9495 {
9496  VMA_ASSERT(allocSize > 0);
9497  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9498  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9499  VMA_HEAVY_ASSERT(Validate());
9500 
9501  const VkDeviceSize size = GetSize();
9502  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9503  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9504 
9505  if(upperAddress)
9506  {
9507  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9508  {
9509  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9510  return false;
9511  }
9512 
9513  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9514  if(allocSize > size)
9515  {
9516  return false;
9517  }
9518  VkDeviceSize resultBaseOffset = size - allocSize;
9519  if(!suballocations2nd.empty())
9520  {
9521  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9522  resultBaseOffset = lastSuballoc.offset - allocSize;
9523  if(allocSize > lastSuballoc.offset)
9524  {
9525  return false;
9526  }
9527  }
9528 
9529  // Start from offset equal to end of free space.
9530  VkDeviceSize resultOffset = resultBaseOffset;
9531 
9532  // Apply VMA_DEBUG_MARGIN at the end.
9533  if(VMA_DEBUG_MARGIN > 0)
9534  {
9535  if(resultOffset < VMA_DEBUG_MARGIN)
9536  {
9537  return false;
9538  }
9539  resultOffset -= VMA_DEBUG_MARGIN;
9540  }
9541 
9542  // Apply alignment.
9543  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9544 
9545  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9546  // Make bigger alignment if necessary.
9547  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9548  {
9549  bool bufferImageGranularityConflict = false;
9550  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9551  {
9552  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9553  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9554  {
9555  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9556  {
9557  bufferImageGranularityConflict = true;
9558  break;
9559  }
9560  }
9561  else
9562  // Already on previous page.
9563  break;
9564  }
9565  if(bufferImageGranularityConflict)
9566  {
9567  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9568  }
9569  }
9570 
9571  // There is enough free space.
9572  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9573  suballocations1st.back().offset + suballocations1st.back().size :
9574  0;
9575  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9576  {
9577  // Check previous suballocations for BufferImageGranularity conflicts.
9578  // If conflict exists, allocation cannot be made here.
9579  if(bufferImageGranularity > 1)
9580  {
9581  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9582  {
9583  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9584  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9585  {
9586  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9587  {
9588  return false;
9589  }
9590  }
9591  else
9592  {
9593  // Already on next page.
9594  break;
9595  }
9596  }
9597  }
9598 
9599  // All tests passed: Success.
9600  pAllocationRequest->offset = resultOffset;
9601  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9602  pAllocationRequest->sumItemSize = 0;
9603  // pAllocationRequest->item unused.
9604  pAllocationRequest->itemsToMakeLostCount = 0;
9605  return true;
9606  }
9607  }
9608  else // !upperAddress
9609  {
9610  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9611  {
9612  // Try to allocate at the end of 1st vector.
9613 
9614  VkDeviceSize resultBaseOffset = 0;
9615  if(!suballocations1st.empty())
9616  {
9617  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9618  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9619  }
9620 
9621  // Start from offset equal to beginning of free space.
9622  VkDeviceSize resultOffset = resultBaseOffset;
9623 
9624  // Apply VMA_DEBUG_MARGIN at the beginning.
9625  if(VMA_DEBUG_MARGIN > 0)
9626  {
9627  resultOffset += VMA_DEBUG_MARGIN;
9628  }
9629 
9630  // Apply alignment.
9631  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9632 
9633  // Check previous suballocations for BufferImageGranularity conflicts.
9634  // Make bigger alignment if necessary.
9635  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9636  {
9637  bool bufferImageGranularityConflict = false;
9638  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9639  {
9640  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9641  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9642  {
9643  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9644  {
9645  bufferImageGranularityConflict = true;
9646  break;
9647  }
9648  }
9649  else
9650  // Already on previous page.
9651  break;
9652  }
9653  if(bufferImageGranularityConflict)
9654  {
9655  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9656  }
9657  }
9658 
9659  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9660  suballocations2nd.back().offset : size;
9661 
9662  // There is enough free space at the end after alignment.
9663  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9664  {
9665  // Check next suballocations for BufferImageGranularity conflicts.
9666  // If conflict exists, allocation cannot be made here.
9667  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9668  {
9669  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9670  {
9671  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9672  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9673  {
9674  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9675  {
9676  return false;
9677  }
9678  }
9679  else
9680  {
9681  // Already on previous page.
9682  break;
9683  }
9684  }
9685  }
9686 
9687  // All tests passed: Success.
9688  pAllocationRequest->offset = resultOffset;
9689  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9690  pAllocationRequest->sumItemSize = 0;
9691  // pAllocationRequest->item unused.
9692  pAllocationRequest->itemsToMakeLostCount = 0;
9693  return true;
9694  }
9695  }
9696 
9697  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9698  // beginning of 1st vector as the end of free space.
9699  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9700  {
9701  VMA_ASSERT(!suballocations1st.empty());
9702 
9703  VkDeviceSize resultBaseOffset = 0;
9704  if(!suballocations2nd.empty())
9705  {
9706  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9707  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9708  }
9709 
9710  // Start from offset equal to beginning of free space.
9711  VkDeviceSize resultOffset = resultBaseOffset;
9712 
9713  // Apply VMA_DEBUG_MARGIN at the beginning.
9714  if(VMA_DEBUG_MARGIN > 0)
9715  {
9716  resultOffset += VMA_DEBUG_MARGIN;
9717  }
9718 
9719  // Apply alignment.
9720  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9721 
9722  // Check previous suballocations for BufferImageGranularity conflicts.
9723  // Make bigger alignment if necessary.
9724  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9725  {
9726  bool bufferImageGranularityConflict = false;
9727  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9728  {
9729  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9730  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9731  {
9732  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9733  {
9734  bufferImageGranularityConflict = true;
9735  break;
9736  }
9737  }
9738  else
9739  // Already on previous page.
9740  break;
9741  }
9742  if(bufferImageGranularityConflict)
9743  {
9744  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9745  }
9746  }
9747 
9748  pAllocationRequest->itemsToMakeLostCount = 0;
9749  pAllocationRequest->sumItemSize = 0;
9750  size_t index1st = m_1stNullItemsBeginCount;
9751 
9752  if(canMakeOtherLost)
9753  {
9754  while(index1st < suballocations1st.size() &&
9755  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9756  {
9757  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9758  const VmaSuballocation& suballoc = suballocations1st[index1st];
9759  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9760  {
9761  // No problem.
9762  }
9763  else
9764  {
9765  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9766  if(suballoc.hAllocation->CanBecomeLost() &&
9767  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9768  {
9769  ++pAllocationRequest->itemsToMakeLostCount;
9770  pAllocationRequest->sumItemSize += suballoc.size;
9771  }
9772  else
9773  {
9774  return false;
9775  }
9776  }
9777  ++index1st;
9778  }
9779 
9780  // Check next suballocations for BufferImageGranularity conflicts.
9781  // If conflict exists, we must mark more allocations lost or fail.
9782  if(bufferImageGranularity > 1)
9783  {
9784  while(index1st < suballocations1st.size())
9785  {
9786  const VmaSuballocation& suballoc = suballocations1st[index1st];
9787  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9788  {
9789  if(suballoc.hAllocation != VK_NULL_HANDLE)
9790  {
9791  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9792  if(suballoc.hAllocation->CanBecomeLost() &&
9793  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9794  {
9795  ++pAllocationRequest->itemsToMakeLostCount;
9796  pAllocationRequest->sumItemSize += suballoc.size;
9797  }
9798  else
9799  {
9800  return false;
9801  }
9802  }
9803  }
9804  else
9805  {
9806  // Already on next page.
9807  break;
9808  }
9809  ++index1st;
9810  }
9811  }
9812  }
9813 
9814  // There is enough free space at the end after alignment.
9815  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9816  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9817  {
9818  // Check next suballocations for BufferImageGranularity conflicts.
9819  // If conflict exists, allocation cannot be made here.
9820  if(bufferImageGranularity > 1)
9821  {
9822  for(size_t nextSuballocIndex = index1st;
9823  nextSuballocIndex < suballocations1st.size();
9824  nextSuballocIndex++)
9825  {
9826  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9827  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9828  {
9829  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9830  {
9831  return false;
9832  }
9833  }
9834  else
9835  {
9836  // Already on next page.
9837  break;
9838  }
9839  }
9840  }
9841 
9842  // All tests passed: Success.
9843  pAllocationRequest->offset = resultOffset;
9844  pAllocationRequest->sumFreeSize =
9845  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9846  - resultBaseOffset
9847  - pAllocationRequest->sumItemSize;
9848  // pAllocationRequest->item unused.
9849  return true;
9850  }
9851  }
9852  }
9853 
9854  return false;
9855 }
9856 
9857 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9858  uint32_t currentFrameIndex,
9859  uint32_t frameInUseCount,
9860  VmaAllocationRequest* pAllocationRequest)
9861 {
9862  if(pAllocationRequest->itemsToMakeLostCount == 0)
9863  {
9864  return true;
9865  }
9866 
9867  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9868 
9869  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9870  size_t index1st = m_1stNullItemsBeginCount;
9871  size_t madeLostCount = 0;
9872  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9873  {
9874  VMA_ASSERT(index1st < suballocations1st.size());
9875  VmaSuballocation& suballoc = suballocations1st[index1st];
9876  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9877  {
9878  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9879  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9880  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9881  {
9882  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9883  suballoc.hAllocation = VK_NULL_HANDLE;
9884  m_SumFreeSize += suballoc.size;
9885  ++m_1stNullItemsMiddleCount;
9886  ++madeLostCount;
9887  }
9888  else
9889  {
9890  return false;
9891  }
9892  }
9893  ++index1st;
9894  }
9895 
9896  CleanupAfterFree();
9897  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9898 
9899  return true;
9900 }
9901 
9902 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9903 {
9904  uint32_t lostAllocationCount = 0;
9905 
9906  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9907  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9908  {
9909  VmaSuballocation& suballoc = suballocations1st[i];
9910  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9911  suballoc.hAllocation->CanBecomeLost() &&
9912  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9913  {
9914  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9915  suballoc.hAllocation = VK_NULL_HANDLE;
9916  ++m_1stNullItemsMiddleCount;
9917  m_SumFreeSize += suballoc.size;
9918  ++lostAllocationCount;
9919  }
9920  }
9921 
9922  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9923  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9924  {
9925  VmaSuballocation& suballoc = suballocations2nd[i];
9926  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9927  suballoc.hAllocation->CanBecomeLost() &&
9928  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9929  {
9930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9931  suballoc.hAllocation = VK_NULL_HANDLE;
9932  ++m_2ndNullItemsCount;
9933  ++lostAllocationCount;
9934  }
9935  }
9936 
9937  if(lostAllocationCount)
9938  {
9939  CleanupAfterFree();
9940  }
9941 
9942  return lostAllocationCount;
9943 }
9944 
9945 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9946 {
9947  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9948  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9949  {
9950  const VmaSuballocation& suballoc = suballocations1st[i];
9951  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9952  {
9953  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9954  {
9955  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9956  return VK_ERROR_VALIDATION_FAILED_EXT;
9957  }
9958  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9959  {
9960  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9961  return VK_ERROR_VALIDATION_FAILED_EXT;
9962  }
9963  }
9964  }
9965 
9966  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9967  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9968  {
9969  const VmaSuballocation& suballoc = suballocations2nd[i];
9970  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9971  {
9972  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9973  {
9974  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9975  return VK_ERROR_VALIDATION_FAILED_EXT;
9976  }
9977  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9978  {
9979  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9980  return VK_ERROR_VALIDATION_FAILED_EXT;
9981  }
9982  }
9983  }
9984 
9985  return VK_SUCCESS;
9986 }
9987 
9988 void VmaBlockMetadata_Linear::Alloc(
9989  const VmaAllocationRequest& request,
9990  VmaSuballocationType type,
9991  VkDeviceSize allocSize,
9992  bool upperAddress,
9993  VmaAllocation hAllocation)
9994 {
9995  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9996 
9997  if(upperAddress)
9998  {
9999  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10000  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10002  suballocations2nd.push_back(newSuballoc);
10003  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10004  }
10005  else
10006  {
10007  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10008 
10009  // First allocation.
10010  if(suballocations1st.empty())
10011  {
10012  suballocations1st.push_back(newSuballoc);
10013  }
10014  else
10015  {
10016  // New allocation at the end of 1st vector.
10017  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10018  {
10019  // Check if it fits before the end of the block.
10020  VMA_ASSERT(request.offset + allocSize <= GetSize());
10021  suballocations1st.push_back(newSuballoc);
10022  }
10023  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10024  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10025  {
10026  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10027 
10028  switch(m_2ndVectorMode)
10029  {
10030  case SECOND_VECTOR_EMPTY:
10031  // First allocation from second part ring buffer.
10032  VMA_ASSERT(suballocations2nd.empty());
10033  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10034  break;
10035  case SECOND_VECTOR_RING_BUFFER:
10036  // 2-part ring buffer is already started.
10037  VMA_ASSERT(!suballocations2nd.empty());
10038  break;
10039  case SECOND_VECTOR_DOUBLE_STACK:
10040  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10041  break;
10042  default:
10043  VMA_ASSERT(0);
10044  }
10045 
10046  suballocations2nd.push_back(newSuballoc);
10047  }
10048  else
10049  {
10050  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10051  }
10052  }
10053  }
10054 
10055  m_SumFreeSize -= newSuballoc.size;
10056 }
10057 
10058 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10059 {
10060  FreeAtOffset(allocation->GetOffset());
10061 }
10062 
10063 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10064 {
10065  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10066  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10067 
10068  if(!suballocations1st.empty())
10069  {
10070  // First allocation: Mark it as next empty at the beginning.
10071  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10072  if(firstSuballoc.offset == offset)
10073  {
10074  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10075  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10076  m_SumFreeSize += firstSuballoc.size;
10077  ++m_1stNullItemsBeginCount;
10078  CleanupAfterFree();
10079  return;
10080  }
10081  }
10082 
10083  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10084  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10085  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10086  {
10087  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10088  if(lastSuballoc.offset == offset)
10089  {
10090  m_SumFreeSize += lastSuballoc.size;
10091  suballocations2nd.pop_back();
10092  CleanupAfterFree();
10093  return;
10094  }
10095  }
10096  // Last allocation in 1st vector.
10097  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10098  {
10099  VmaSuballocation& lastSuballoc = suballocations1st.back();
10100  if(lastSuballoc.offset == offset)
10101  {
10102  m_SumFreeSize += lastSuballoc.size;
10103  suballocations1st.pop_back();
10104  CleanupAfterFree();
10105  return;
10106  }
10107  }
10108 
10109  // Item from the middle of 1st vector.
10110  {
10111  VmaSuballocation refSuballoc;
10112  refSuballoc.offset = offset;
10113  // Rest of members stays uninitialized intentionally for better performance.
10114  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10115  suballocations1st.begin() + m_1stNullItemsBeginCount,
10116  suballocations1st.end(),
10117  refSuballoc);
10118  if(it != suballocations1st.end())
10119  {
10120  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10121  it->hAllocation = VK_NULL_HANDLE;
10122  ++m_1stNullItemsMiddleCount;
10123  m_SumFreeSize += it->size;
10124  CleanupAfterFree();
10125  return;
10126  }
10127  }
10128 
10129  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10130  {
10131  // Item from the middle of 2nd vector.
10132  VmaSuballocation refSuballoc;
10133  refSuballoc.offset = offset;
10134  // Rest of members stays uninitialized intentionally for better performance.
10135  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10136  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10137  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10138  if(it != suballocations2nd.end())
10139  {
10140  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10141  it->hAllocation = VK_NULL_HANDLE;
10142  ++m_2ndNullItemsCount;
10143  m_SumFreeSize += it->size;
10144  CleanupAfterFree();
10145  return;
10146  }
10147  }
10148 
10149  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10150 }
10151 
10152 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10153 {
10154  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10155  const size_t suballocCount = AccessSuballocations1st().size();
10156  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10157 }
10158 
10159 void VmaBlockMetadata_Linear::CleanupAfterFree()
10160 {
10161  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10162  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10163 
10164  if(IsEmpty())
10165  {
10166  suballocations1st.clear();
10167  suballocations2nd.clear();
10168  m_1stNullItemsBeginCount = 0;
10169  m_1stNullItemsMiddleCount = 0;
10170  m_2ndNullItemsCount = 0;
10171  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10172  }
10173  else
10174  {
10175  const size_t suballoc1stCount = suballocations1st.size();
10176  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10177  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10178 
10179  // Find more null items at the beginning of 1st vector.
10180  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10181  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10182  {
10183  ++m_1stNullItemsBeginCount;
10184  --m_1stNullItemsMiddleCount;
10185  }
10186 
10187  // Find more null items at the end of 1st vector.
10188  while(m_1stNullItemsMiddleCount > 0 &&
10189  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10190  {
10191  --m_1stNullItemsMiddleCount;
10192  suballocations1st.pop_back();
10193  }
10194 
10195  // Find more null items at the end of 2nd vector.
10196  while(m_2ndNullItemsCount > 0 &&
10197  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10198  {
10199  --m_2ndNullItemsCount;
10200  suballocations2nd.pop_back();
10201  }
10202 
10203  if(ShouldCompact1st())
10204  {
10205  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10206  size_t srcIndex = m_1stNullItemsBeginCount;
10207  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10208  {
10209  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10210  {
10211  ++srcIndex;
10212  }
10213  if(dstIndex != srcIndex)
10214  {
10215  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10216  }
10217  ++srcIndex;
10218  }
10219  suballocations1st.resize(nonNullItemCount);
10220  m_1stNullItemsBeginCount = 0;
10221  m_1stNullItemsMiddleCount = 0;
10222  }
10223 
10224  // 2nd vector became empty.
10225  if(suballocations2nd.empty())
10226  {
10227  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10228  }
10229 
10230  // 1st vector became empty.
10231  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10232  {
10233  suballocations1st.clear();
10234  m_1stNullItemsBeginCount = 0;
10235 
10236  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10237  {
10238  // Swap 1st with 2nd. Now 2nd is empty.
10239  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10240  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10241  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10242  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10243  {
10244  ++m_1stNullItemsBeginCount;
10245  --m_1stNullItemsMiddleCount;
10246  }
10247  m_2ndNullItemsCount = 0;
10248  m_1stVectorIndex ^= 1;
10249  }
10250  }
10251  }
10252 
10253  VMA_HEAVY_ASSERT(Validate());
10254 }
10255 
10256 
10258 // class VmaBlockMetadata_Buddy
10259 
10260 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10261  VmaBlockMetadata(hAllocator),
10262  m_Root(VMA_NULL),
10263  m_AllocationCount(0),
10264  m_FreeCount(1),
10265  m_SumFreeSize(0)
10266 {
10267  memset(m_FreeList, 0, sizeof(m_FreeList));
10268 }
10269 
10270 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10271 {
10272  DeleteNode(m_Root);
10273 }
10274 
10275 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10276 {
10277  VmaBlockMetadata::Init(size);
10278 
10279  m_UsableSize = VmaPrevPow2(size);
10280  m_SumFreeSize = m_UsableSize;
10281 
10282  // Calculate m_LevelCount.
10283  m_LevelCount = 1;
10284  while(m_LevelCount < MAX_LEVELS &&
10285  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10286  {
10287  ++m_LevelCount;
10288  }
10289 
10290  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10291  rootNode->offset = 0;
10292  rootNode->type = Node::TYPE_FREE;
10293  rootNode->parent = VMA_NULL;
10294  rootNode->buddy = VMA_NULL;
10295 
10296  m_Root = rootNode;
10297  AddToFreeListFront(0, rootNode);
10298 }
10299 
10300 bool VmaBlockMetadata_Buddy::Validate() const
10301 {
10302  // Validate tree.
10303  ValidationContext ctx;
10304  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10305  {
10306  VMA_VALIDATE(false && "ValidateNode failed.");
10307  }
10308  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10309  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10310 
10311  // Validate free node lists.
10312  for(uint32_t level = 0; level < m_LevelCount; ++level)
10313  {
10314  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10315  m_FreeList[level].front->free.prev == VMA_NULL);
10316 
10317  for(Node* node = m_FreeList[level].front;
10318  node != VMA_NULL;
10319  node = node->free.next)
10320  {
10321  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10322 
10323  if(node->free.next == VMA_NULL)
10324  {
10325  VMA_VALIDATE(m_FreeList[level].back == node);
10326  }
10327  else
10328  {
10329  VMA_VALIDATE(node->free.next->free.prev == node);
10330  }
10331  }
10332  }
10333 
10334  // Validate that free lists ar higher levels are empty.
10335  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10336  {
10337  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10338  }
10339 
10340  return true;
10341 }
10342 
10343 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10344 {
10345  for(uint32_t level = 0; level < m_LevelCount; ++level)
10346  {
10347  if(m_FreeList[level].front != VMA_NULL)
10348  {
10349  return LevelToNodeSize(level);
10350  }
10351  }
10352  return 0;
10353 }
10354 
10355 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10356 {
10357  const VkDeviceSize unusableSize = GetUnusableSize();
10358 
10359  outInfo.blockCount = 1;
10360 
10361  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10362  outInfo.usedBytes = outInfo.unusedBytes = 0;
10363 
10364  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10365  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10366  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10367 
10368  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10369 
10370  if(unusableSize > 0)
10371  {
10372  ++outInfo.unusedRangeCount;
10373  outInfo.unusedBytes += unusableSize;
10374  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10375  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10376  }
10377 }
10378 
10379 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10380 {
10381  const VkDeviceSize unusableSize = GetUnusableSize();
10382 
10383  inoutStats.size += GetSize();
10384  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10385  inoutStats.allocationCount += m_AllocationCount;
10386  inoutStats.unusedRangeCount += m_FreeCount;
10387  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10388 
10389  if(unusableSize > 0)
10390  {
10391  ++inoutStats.unusedRangeCount;
10392  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10393  }
10394 }
10395 
10396 #if VMA_STATS_STRING_ENABLED
10397 
10398 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10399 {
10400  // TODO optimize
10401  VmaStatInfo stat;
10402  CalcAllocationStatInfo(stat);
10403 
10404  PrintDetailedMap_Begin(
10405  json,
10406  stat.unusedBytes,
10407  stat.allocationCount,
10408  stat.unusedRangeCount);
10409 
10410  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10411 
10412  const VkDeviceSize unusableSize = GetUnusableSize();
10413  if(unusableSize > 0)
10414  {
10415  PrintDetailedMap_UnusedRange(json,
10416  m_UsableSize, // offset
10417  unusableSize); // size
10418  }
10419 
10420  PrintDetailedMap_End(json);
10421 }
10422 
10423 #endif // #if VMA_STATS_STRING_ENABLED
10424 
10425 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10426  uint32_t currentFrameIndex,
10427  uint32_t frameInUseCount,
10428  VkDeviceSize bufferImageGranularity,
10429  VkDeviceSize allocSize,
10430  VkDeviceSize allocAlignment,
10431  bool upperAddress,
10432  VmaSuballocationType allocType,
10433  bool canMakeOtherLost,
10434  uint32_t strategy,
10435  VmaAllocationRequest* pAllocationRequest)
10436 {
10437  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10438 
10439  // Simple way to respect bufferImageGranularity. May be optimized some day.
10440  // Whenever it might be an OPTIMAL image...
10441  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10442  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10443  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10444  {
10445  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10446  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10447  }
10448 
10449  if(allocSize > m_UsableSize)
10450  {
10451  return false;
10452  }
10453 
10454  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10455  for(uint32_t level = targetLevel + 1; level--; )
10456  {
10457  for(Node* freeNode = m_FreeList[level].front;
10458  freeNode != VMA_NULL;
10459  freeNode = freeNode->free.next)
10460  {
10461  if(freeNode->offset % allocAlignment == 0)
10462  {
10463  pAllocationRequest->offset = freeNode->offset;
10464  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10465  pAllocationRequest->sumItemSize = 0;
10466  pAllocationRequest->itemsToMakeLostCount = 0;
10467  pAllocationRequest->customData = (void*)(uintptr_t)level;
10468  return true;
10469  }
10470  }
10471  }
10472 
10473  return false;
10474 }
10475 
10476 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10477  uint32_t currentFrameIndex,
10478  uint32_t frameInUseCount,
10479  VmaAllocationRequest* pAllocationRequest)
10480 {
10481  /*
10482  Lost allocations are not supported in buddy allocator at the moment.
10483  Support might be added in the future.
10484  */
10485  return pAllocationRequest->itemsToMakeLostCount == 0;
10486 }
10487 
10488 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10489 {
10490  /*
10491  Lost allocations are not supported in buddy allocator at the moment.
10492  Support might be added in the future.
10493  */
10494  return 0;
10495 }
10496 
10497 void VmaBlockMetadata_Buddy::Alloc(
10498  const VmaAllocationRequest& request,
10499  VmaSuballocationType type,
10500  VkDeviceSize allocSize,
10501  bool upperAddress,
10502  VmaAllocation hAllocation)
10503 {
10504  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10505  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10506 
10507  Node* currNode = m_FreeList[currLevel].front;
10508  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10509  while(currNode->offset != request.offset)
10510  {
10511  currNode = currNode->free.next;
10512  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10513  }
10514 
10515  // Go down, splitting free nodes.
10516  while(currLevel < targetLevel)
10517  {
10518  // currNode is already first free node at currLevel.
10519  // Remove it from list of free nodes at this currLevel.
10520  RemoveFromFreeList(currLevel, currNode);
10521 
10522  const uint32_t childrenLevel = currLevel + 1;
10523 
10524  // Create two free sub-nodes.
10525  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10526  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10527 
10528  leftChild->offset = currNode->offset;
10529  leftChild->type = Node::TYPE_FREE;
10530  leftChild->parent = currNode;
10531  leftChild->buddy = rightChild;
10532 
10533  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10534  rightChild->type = Node::TYPE_FREE;
10535  rightChild->parent = currNode;
10536  rightChild->buddy = leftChild;
10537 
10538  // Convert current currNode to split type.
10539  currNode->type = Node::TYPE_SPLIT;
10540  currNode->split.leftChild = leftChild;
10541 
10542  // Add child nodes to free list. Order is important!
10543  AddToFreeListFront(childrenLevel, rightChild);
10544  AddToFreeListFront(childrenLevel, leftChild);
10545 
10546  ++m_FreeCount;
10547  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10548  ++currLevel;
10549  currNode = m_FreeList[currLevel].front;
10550 
10551  /*
10552  We can be sure that currNode, as left child of node previously split,
10553  also fullfills the alignment requirement.
10554  */
10555  }
10556 
10557  // Remove from free list.
10558  VMA_ASSERT(currLevel == targetLevel &&
10559  currNode != VMA_NULL &&
10560  currNode->type == Node::TYPE_FREE);
10561  RemoveFromFreeList(currLevel, currNode);
10562 
10563  // Convert to allocation node.
10564  currNode->type = Node::TYPE_ALLOCATION;
10565  currNode->allocation.alloc = hAllocation;
10566 
10567  ++m_AllocationCount;
10568  --m_FreeCount;
10569  m_SumFreeSize -= allocSize;
10570 }
10571 
10572 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10573 {
10574  if(node->type == Node::TYPE_SPLIT)
10575  {
10576  DeleteNode(node->split.leftChild->buddy);
10577  DeleteNode(node->split.leftChild);
10578  }
10579 
10580  vma_delete(GetAllocationCallbacks(), node);
10581 }
10582 
10583 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10584 {
10585  VMA_VALIDATE(level < m_LevelCount);
10586  VMA_VALIDATE(curr->parent == parent);
10587  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10588  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10589  switch(curr->type)
10590  {
10591  case Node::TYPE_FREE:
10592  // curr->free.prev, next are validated separately.
10593  ctx.calculatedSumFreeSize += levelNodeSize;
10594  ++ctx.calculatedFreeCount;
10595  break;
10596  case Node::TYPE_ALLOCATION:
10597  ++ctx.calculatedAllocationCount;
10598  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10599  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10600  break;
10601  case Node::TYPE_SPLIT:
10602  {
10603  const uint32_t childrenLevel = level + 1;
10604  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10605  const Node* const leftChild = curr->split.leftChild;
10606  VMA_VALIDATE(leftChild != VMA_NULL);
10607  VMA_VALIDATE(leftChild->offset == curr->offset);
10608  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10609  {
10610  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10611  }
10612  const Node* const rightChild = leftChild->buddy;
10613  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10614  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10615  {
10616  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10617  }
10618  }
10619  break;
10620  default:
10621  return false;
10622  }
10623 
10624  return true;
10625 }
10626 
10627 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10628 {
10629  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10630  uint32_t level = 0;
10631  VkDeviceSize currLevelNodeSize = m_UsableSize;
10632  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10633  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10634  {
10635  ++level;
10636  currLevelNodeSize = nextLevelNodeSize;
10637  nextLevelNodeSize = currLevelNodeSize >> 1;
10638  }
10639  return level;
10640 }
10641 
10642 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10643 {
10644  // Find node and level.
10645  Node* node = m_Root;
10646  VkDeviceSize nodeOffset = 0;
10647  uint32_t level = 0;
10648  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10649  while(node->type == Node::TYPE_SPLIT)
10650  {
10651  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10652  if(offset < nodeOffset + nextLevelSize)
10653  {
10654  node = node->split.leftChild;
10655  }
10656  else
10657  {
10658  node = node->split.leftChild->buddy;
10659  nodeOffset += nextLevelSize;
10660  }
10661  ++level;
10662  levelNodeSize = nextLevelSize;
10663  }
10664 
10665  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10666  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10667 
10668  ++m_FreeCount;
10669  --m_AllocationCount;
10670  m_SumFreeSize += alloc->GetSize();
10671 
10672  node->type = Node::TYPE_FREE;
10673 
10674  // Join free nodes if possible.
10675  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10676  {
10677  RemoveFromFreeList(level, node->buddy);
10678  Node* const parent = node->parent;
10679 
10680  vma_delete(GetAllocationCallbacks(), node->buddy);
10681  vma_delete(GetAllocationCallbacks(), node);
10682  parent->type = Node::TYPE_FREE;
10683 
10684  node = parent;
10685  --level;
10686  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10687  --m_FreeCount;
10688  }
10689 
10690  AddToFreeListFront(level, node);
10691 }
10692 
10693 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10694 {
10695  switch(node->type)
10696  {
10697  case Node::TYPE_FREE:
10698  ++outInfo.unusedRangeCount;
10699  outInfo.unusedBytes += levelNodeSize;
10700  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10701  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10702  break;
10703  case Node::TYPE_ALLOCATION:
10704  {
10705  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10706  ++outInfo.allocationCount;
10707  outInfo.usedBytes += allocSize;
10708  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10709  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10710 
10711  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10712  if(unusedRangeSize > 0)
10713  {
10714  ++outInfo.unusedRangeCount;
10715  outInfo.unusedBytes += unusedRangeSize;
10716  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10717  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10718  }
10719  }
10720  break;
10721  case Node::TYPE_SPLIT:
10722  {
10723  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10724  const Node* const leftChild = node->split.leftChild;
10725  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10726  const Node* const rightChild = leftChild->buddy;
10727  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10728  }
10729  break;
10730  default:
10731  VMA_ASSERT(0);
10732  }
10733 }
10734 
10735 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10736 {
10737  VMA_ASSERT(node->type == Node::TYPE_FREE);
10738 
10739  // List is empty.
10740  Node* const frontNode = m_FreeList[level].front;
10741  if(frontNode == VMA_NULL)
10742  {
10743  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10744  node->free.prev = node->free.next = VMA_NULL;
10745  m_FreeList[level].front = m_FreeList[level].back = node;
10746  }
10747  else
10748  {
10749  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10750  node->free.prev = VMA_NULL;
10751  node->free.next = frontNode;
10752  frontNode->free.prev = node;
10753  m_FreeList[level].front = node;
10754  }
10755 }
10756 
10757 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10758 {
10759  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10760 
10761  // It is at the front.
10762  if(node->free.prev == VMA_NULL)
10763  {
10764  VMA_ASSERT(m_FreeList[level].front == node);
10765  m_FreeList[level].front = node->free.next;
10766  }
10767  else
10768  {
10769  Node* const prevFreeNode = node->free.prev;
10770  VMA_ASSERT(prevFreeNode->free.next == node);
10771  prevFreeNode->free.next = node->free.next;
10772  }
10773 
10774  // It is at the back.
10775  if(node->free.next == VMA_NULL)
10776  {
10777  VMA_ASSERT(m_FreeList[level].back == node);
10778  m_FreeList[level].back = node->free.prev;
10779  }
10780  else
10781  {
10782  Node* const nextFreeNode = node->free.next;
10783  VMA_ASSERT(nextFreeNode->free.prev == node);
10784  nextFreeNode->free.prev = node->free.prev;
10785  }
10786 }
10787 
10788 #if VMA_STATS_STRING_ENABLED
10789 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10790 {
10791  switch(node->type)
10792  {
10793  case Node::TYPE_FREE:
10794  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10795  break;
10796  case Node::TYPE_ALLOCATION:
10797  {
10798  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10799  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10800  if(allocSize < levelNodeSize)
10801  {
10802  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10803  }
10804  }
10805  break;
10806  case Node::TYPE_SPLIT:
10807  {
10808  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10809  const Node* const leftChild = node->split.leftChild;
10810  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10811  const Node* const rightChild = leftChild->buddy;
10812  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10813  }
10814  break;
10815  default:
10816  VMA_ASSERT(0);
10817  }
10818 }
10819 #endif // #if VMA_STATS_STRING_ENABLED
10820 
10821 
10823 // class VmaDeviceMemoryBlock
10824 
10825 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10826  m_pMetadata(VMA_NULL),
10827  m_MemoryTypeIndex(UINT32_MAX),
10828  m_Id(0),
10829  m_hMemory(VK_NULL_HANDLE),
10830  m_MapCount(0),
10831  m_pMappedData(VMA_NULL)
10832 {
10833 }
10834 
10835 void VmaDeviceMemoryBlock::Init(
10836  VmaAllocator hAllocator,
10837  uint32_t newMemoryTypeIndex,
10838  VkDeviceMemory newMemory,
10839  VkDeviceSize newSize,
10840  uint32_t id,
10841  uint32_t algorithm)
10842 {
10843  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10844 
10845  m_MemoryTypeIndex = newMemoryTypeIndex;
10846  m_Id = id;
10847  m_hMemory = newMemory;
10848 
10849  switch(algorithm)
10850  {
10852  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10853  break;
10855  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10856  break;
10857  default:
10858  VMA_ASSERT(0);
10859  // Fall-through.
10860  case 0:
10861  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10862  }
10863  m_pMetadata->Init(newSize);
10864 }
10865 
10866 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10867 {
10868  // This is the most important assert in the entire library.
10869  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10870  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10871 
10872  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10873  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10874  m_hMemory = VK_NULL_HANDLE;
10875 
10876  vma_delete(allocator, m_pMetadata);
10877  m_pMetadata = VMA_NULL;
10878 }
10879 
10880 bool VmaDeviceMemoryBlock::Validate() const
10881 {
10882  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10883  (m_pMetadata->GetSize() != 0));
10884 
10885  return m_pMetadata->Validate();
10886 }
10887 
10888 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10889 {
10890  void* pData = nullptr;
10891  VkResult res = Map(hAllocator, 1, &pData);
10892  if(res != VK_SUCCESS)
10893  {
10894  return res;
10895  }
10896 
10897  res = m_pMetadata->CheckCorruption(pData);
10898 
10899  Unmap(hAllocator, 1);
10900 
10901  return res;
10902 }
10903 
10904 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10905 {
10906  if(count == 0)
10907  {
10908  return VK_SUCCESS;
10909  }
10910 
10911  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10912  if(m_MapCount != 0)
10913  {
10914  m_MapCount += count;
10915  VMA_ASSERT(m_pMappedData != VMA_NULL);
10916  if(ppData != VMA_NULL)
10917  {
10918  *ppData = m_pMappedData;
10919  }
10920  return VK_SUCCESS;
10921  }
10922  else
10923  {
10924  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10925  hAllocator->m_hDevice,
10926  m_hMemory,
10927  0, // offset
10928  VK_WHOLE_SIZE,
10929  0, // flags
10930  &m_pMappedData);
10931  if(result == VK_SUCCESS)
10932  {
10933  if(ppData != VMA_NULL)
10934  {
10935  *ppData = m_pMappedData;
10936  }
10937  m_MapCount = count;
10938  }
10939  return result;
10940  }
10941 }
10942 
10943 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10944 {
10945  if(count == 0)
10946  {
10947  return;
10948  }
10949 
10950  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10951  if(m_MapCount >= count)
10952  {
10953  m_MapCount -= count;
10954  if(m_MapCount == 0)
10955  {
10956  m_pMappedData = VMA_NULL;
10957  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10958  }
10959  }
10960  else
10961  {
10962  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10963  }
10964 }
10965 
10966 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10967 {
10968  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10969  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10970 
10971  void* pData;
10972  VkResult res = Map(hAllocator, 1, &pData);
10973  if(res != VK_SUCCESS)
10974  {
10975  return res;
10976  }
10977 
10978  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10979  VmaWriteMagicValue(pData, allocOffset + allocSize);
10980 
10981  Unmap(hAllocator, 1);
10982 
10983  return VK_SUCCESS;
10984 }
10985 
10986 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10987 {
10988  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10989  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10990 
10991  void* pData;
10992  VkResult res = Map(hAllocator, 1, &pData);
10993  if(res != VK_SUCCESS)
10994  {
10995  return res;
10996  }
10997 
10998  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10999  {
11000  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11001  }
11002  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11003  {
11004  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11005  }
11006 
11007  Unmap(hAllocator, 1);
11008 
11009  return VK_SUCCESS;
11010 }
11011 
11012 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11013  const VmaAllocator hAllocator,
11014  const VmaAllocation hAllocation,
11015  VkBuffer hBuffer)
11016 {
11017  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11018  hAllocation->GetBlock() == this);
11019  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11020  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11021  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11022  hAllocator->m_hDevice,
11023  hBuffer,
11024  m_hMemory,
11025  hAllocation->GetOffset());
11026 }
11027 
11028 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11029  const VmaAllocator hAllocator,
11030  const VmaAllocation hAllocation,
11031  VkImage hImage)
11032 {
11033  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11034  hAllocation->GetBlock() == this);
11035  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11036  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11037  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11038  hAllocator->m_hDevice,
11039  hImage,
11040  m_hMemory,
11041  hAllocation->GetOffset());
11042 }
11043 
11044 static void InitStatInfo(VmaStatInfo& outInfo)
11045 {
11046  memset(&outInfo, 0, sizeof(outInfo));
11047  outInfo.allocationSizeMin = UINT64_MAX;
11048  outInfo.unusedRangeSizeMin = UINT64_MAX;
11049 }
11050 
11051 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11052 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11053 {
11054  inoutInfo.blockCount += srcInfo.blockCount;
11055  inoutInfo.allocationCount += srcInfo.allocationCount;
11056  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11057  inoutInfo.usedBytes += srcInfo.usedBytes;
11058  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11059  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11060  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11061  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11062  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11063 }
11064 
11065 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11066 {
11067  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11068  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11069  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11070  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11071 }
11072 
11073 VmaPool_T::VmaPool_T(
11074  VmaAllocator hAllocator,
11075  const VmaPoolCreateInfo& createInfo,
11076  VkDeviceSize preferredBlockSize) :
11077  m_BlockVector(
11078  hAllocator,
11079  createInfo.memoryTypeIndex,
11080  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11081  createInfo.minBlockCount,
11082  createInfo.maxBlockCount,
11083  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11084  createInfo.frameInUseCount,
11085  true, // isCustomPool
11086  createInfo.blockSize != 0, // explicitBlockSize
11087  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11088  m_Id(0)
11089 {
11090 }
11091 
11092 VmaPool_T::~VmaPool_T()
11093 {
11094 }
11095 
11096 #if VMA_STATS_STRING_ENABLED
11097 
11098 #endif // #if VMA_STATS_STRING_ENABLED
11099 
11100 VmaBlockVector::VmaBlockVector(
11101  VmaAllocator hAllocator,
11102  uint32_t memoryTypeIndex,
11103  VkDeviceSize preferredBlockSize,
11104  size_t minBlockCount,
11105  size_t maxBlockCount,
11106  VkDeviceSize bufferImageGranularity,
11107  uint32_t frameInUseCount,
11108  bool isCustomPool,
11109  bool explicitBlockSize,
11110  uint32_t algorithm) :
11111  m_hAllocator(hAllocator),
11112  m_MemoryTypeIndex(memoryTypeIndex),
11113  m_PreferredBlockSize(preferredBlockSize),
11114  m_MinBlockCount(minBlockCount),
11115  m_MaxBlockCount(maxBlockCount),
11116  m_BufferImageGranularity(bufferImageGranularity),
11117  m_FrameInUseCount(frameInUseCount),
11118  m_IsCustomPool(isCustomPool),
11119  m_ExplicitBlockSize(explicitBlockSize),
11120  m_Algorithm(algorithm),
11121  m_HasEmptyBlock(false),
11122  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11123  m_NextBlockId(0)
11124 {
11125 }
11126 
11127 VmaBlockVector::~VmaBlockVector()
11128 {
11129  for(size_t i = m_Blocks.size(); i--; )
11130  {
11131  m_Blocks[i]->Destroy(m_hAllocator);
11132  vma_delete(m_hAllocator, m_Blocks[i]);
11133  }
11134 }
11135 
11136 VkResult VmaBlockVector::CreateMinBlocks()
11137 {
11138  for(size_t i = 0; i < m_MinBlockCount; ++i)
11139  {
11140  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11141  if(res != VK_SUCCESS)
11142  {
11143  return res;
11144  }
11145  }
11146  return VK_SUCCESS;
11147 }
11148 
11149 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11150 {
11151  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11152 
11153  const size_t blockCount = m_Blocks.size();
11154 
11155  pStats->size = 0;
11156  pStats->unusedSize = 0;
11157  pStats->allocationCount = 0;
11158  pStats->unusedRangeCount = 0;
11159  pStats->unusedRangeSizeMax = 0;
11160  pStats->blockCount = blockCount;
11161 
11162  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11163  {
11164  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11165  VMA_ASSERT(pBlock);
11166  VMA_HEAVY_ASSERT(pBlock->Validate());
11167  pBlock->m_pMetadata->AddPoolStats(*pStats);
11168  }
11169 }
11170 
11171 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11172 {
11173  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11174  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11175  (VMA_DEBUG_MARGIN > 0) &&
11176  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11177 }
11178 
11179 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11180 
11181 VkResult VmaBlockVector::Allocate(
11182  VmaPool hCurrentPool,
11183  uint32_t currentFrameIndex,
11184  VkDeviceSize size,
11185  VkDeviceSize alignment,
11186  const VmaAllocationCreateInfo& createInfo,
11187  VmaSuballocationType suballocType,
11188  VmaAllocation* pAllocation)
11189 {
11190  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11191  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11192  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11193  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11194  const bool canCreateNewBlock =
11195  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11196  (m_Blocks.size() < m_MaxBlockCount);
11197  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11198 
11199  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11200  // Which in turn is available only when maxBlockCount = 1.
11201  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11202  {
11203  canMakeOtherLost = false;
11204  }
11205 
11206  // Upper address can only be used with linear allocator and within single memory block.
11207  if(isUpperAddress &&
11208  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11209  {
11210  return VK_ERROR_FEATURE_NOT_PRESENT;
11211  }
11212 
11213  // Validate strategy.
11214  switch(strategy)
11215  {
11216  case 0:
11218  break;
11222  break;
11223  default:
11224  return VK_ERROR_FEATURE_NOT_PRESENT;
11225  }
11226 
11227  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11228  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11229  {
11230  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11231  }
11232 
11233  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11234 
11235  /*
11236  Under certain condition, this whole section can be skipped for optimization, so
11237  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11238  e.g. for custom pools with linear algorithm.
11239  */
11240  if(!canMakeOtherLost || canCreateNewBlock)
11241  {
11242  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11243  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11245 
11246  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11247  {
11248  // Use only last block.
11249  if(!m_Blocks.empty())
11250  {
11251  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11252  VMA_ASSERT(pCurrBlock);
11253  VkResult res = AllocateFromBlock(
11254  pCurrBlock,
11255  hCurrentPool,
11256  currentFrameIndex,
11257  size,
11258  alignment,
11259  allocFlagsCopy,
11260  createInfo.pUserData,
11261  suballocType,
11262  strategy,
11263  pAllocation);
11264  if(res == VK_SUCCESS)
11265  {
11266  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11267  return VK_SUCCESS;
11268  }
11269  }
11270  }
11271  else
11272  {
11274  {
11275  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11276  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11277  {
11278  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11279  VMA_ASSERT(pCurrBlock);
11280  VkResult res = AllocateFromBlock(
11281  pCurrBlock,
11282  hCurrentPool,
11283  currentFrameIndex,
11284  size,
11285  alignment,
11286  allocFlagsCopy,
11287  createInfo.pUserData,
11288  suballocType,
11289  strategy,
11290  pAllocation);
11291  if(res == VK_SUCCESS)
11292  {
11293  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11294  return VK_SUCCESS;
11295  }
11296  }
11297  }
11298  else // WORST_FIT, FIRST_FIT
11299  {
11300  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11301  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11302  {
11303  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11304  VMA_ASSERT(pCurrBlock);
11305  VkResult res = AllocateFromBlock(
11306  pCurrBlock,
11307  hCurrentPool,
11308  currentFrameIndex,
11309  size,
11310  alignment,
11311  allocFlagsCopy,
11312  createInfo.pUserData,
11313  suballocType,
11314  strategy,
11315  pAllocation);
11316  if(res == VK_SUCCESS)
11317  {
11318  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11319  return VK_SUCCESS;
11320  }
11321  }
11322  }
11323  }
11324 
11325  // 2. Try to create new block.
11326  if(canCreateNewBlock)
11327  {
11328  // Calculate optimal size for new block.
11329  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11330  uint32_t newBlockSizeShift = 0;
11331  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11332 
11333  if(!m_ExplicitBlockSize)
11334  {
11335  // Allocate 1/8, 1/4, 1/2 as first blocks.
11336  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11337  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11338  {
11339  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11340  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11341  {
11342  newBlockSize = smallerNewBlockSize;
11343  ++newBlockSizeShift;
11344  }
11345  else
11346  {
11347  break;
11348  }
11349  }
11350  }
11351 
11352  size_t newBlockIndex = 0;
11353  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11354  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11355  if(!m_ExplicitBlockSize)
11356  {
11357  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11358  {
11359  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11360  if(smallerNewBlockSize >= size)
11361  {
11362  newBlockSize = smallerNewBlockSize;
11363  ++newBlockSizeShift;
11364  res = CreateBlock(newBlockSize, &newBlockIndex);
11365  }
11366  else
11367  {
11368  break;
11369  }
11370  }
11371  }
11372 
11373  if(res == VK_SUCCESS)
11374  {
11375  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11376  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11377 
11378  res = AllocateFromBlock(
11379  pBlock,
11380  hCurrentPool,
11381  currentFrameIndex,
11382  size,
11383  alignment,
11384  allocFlagsCopy,
11385  createInfo.pUserData,
11386  suballocType,
11387  strategy,
11388  pAllocation);
11389  if(res == VK_SUCCESS)
11390  {
11391  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11392  return VK_SUCCESS;
11393  }
11394  else
11395  {
11396  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11397  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11398  }
11399  }
11400  }
11401  }
11402 
11403  // 3. Try to allocate from existing blocks with making other allocations lost.
11404  if(canMakeOtherLost)
11405  {
11406  uint32_t tryIndex = 0;
11407  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11408  {
11409  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11410  VmaAllocationRequest bestRequest = {};
11411  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11412 
11413  // 1. Search existing allocations.
11415  {
11416  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11417  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11418  {
11419  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11420  VMA_ASSERT(pCurrBlock);
11421  VmaAllocationRequest currRequest = {};
11422  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11423  currentFrameIndex,
11424  m_FrameInUseCount,
11425  m_BufferImageGranularity,
11426  size,
11427  alignment,
11428  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11429  suballocType,
11430  canMakeOtherLost,
11431  strategy,
11432  &currRequest))
11433  {
11434  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11435  if(pBestRequestBlock == VMA_NULL ||
11436  currRequestCost < bestRequestCost)
11437  {
11438  pBestRequestBlock = pCurrBlock;
11439  bestRequest = currRequest;
11440  bestRequestCost = currRequestCost;
11441 
11442  if(bestRequestCost == 0)
11443  {
11444  break;
11445  }
11446  }
11447  }
11448  }
11449  }
11450  else // WORST_FIT, FIRST_FIT
11451  {
11452  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11453  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11454  {
11455  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11456  VMA_ASSERT(pCurrBlock);
11457  VmaAllocationRequest currRequest = {};
11458  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11459  currentFrameIndex,
11460  m_FrameInUseCount,
11461  m_BufferImageGranularity,
11462  size,
11463  alignment,
11464  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11465  suballocType,
11466  canMakeOtherLost,
11467  strategy,
11468  &currRequest))
11469  {
11470  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11471  if(pBestRequestBlock == VMA_NULL ||
11472  currRequestCost < bestRequestCost ||
11474  {
11475  pBestRequestBlock = pCurrBlock;
11476  bestRequest = currRequest;
11477  bestRequestCost = currRequestCost;
11478 
11479  if(bestRequestCost == 0 ||
11481  {
11482  break;
11483  }
11484  }
11485  }
11486  }
11487  }
11488 
11489  if(pBestRequestBlock != VMA_NULL)
11490  {
11491  if(mapped)
11492  {
11493  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11494  if(res != VK_SUCCESS)
11495  {
11496  return res;
11497  }
11498  }
11499 
11500  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11501  currentFrameIndex,
11502  m_FrameInUseCount,
11503  &bestRequest))
11504  {
11505  // We no longer have an empty Allocation.
11506  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11507  {
11508  m_HasEmptyBlock = false;
11509  }
11510  // Allocate from this pBlock.
11511  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11512  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11513  (*pAllocation)->InitBlockAllocation(
11514  hCurrentPool,
11515  pBestRequestBlock,
11516  bestRequest.offset,
11517  alignment,
11518  size,
11519  suballocType,
11520  mapped,
11521  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11522  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11523  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11524  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11525  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11526  {
11527  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11528  }
11529  if(IsCorruptionDetectionEnabled())
11530  {
11531  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11532  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11533  }
11534  return VK_SUCCESS;
11535  }
11536  // else: Some allocations must have been touched while we are here. Next try.
11537  }
11538  else
11539  {
11540  // Could not find place in any of the blocks - break outer loop.
11541  break;
11542  }
11543  }
11544  /* Maximum number of tries exceeded - a very unlike event when many other
11545  threads are simultaneously touching allocations making it impossible to make
11546  lost at the same time as we try to allocate. */
11547  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11548  {
11549  return VK_ERROR_TOO_MANY_OBJECTS;
11550  }
11551  }
11552 
11553  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11554 }
11555 
11556 void VmaBlockVector::Free(
11557  VmaAllocation hAllocation)
11558 {
11559  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11560 
11561  // Scope for lock.
11562  {
11563  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11564 
11565  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11566 
11567  if(IsCorruptionDetectionEnabled())
11568  {
11569  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11570  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11571  }
11572 
11573  if(hAllocation->IsPersistentMap())
11574  {
11575  pBlock->Unmap(m_hAllocator, 1);
11576  }
11577 
11578  pBlock->m_pMetadata->Free(hAllocation);
11579  VMA_HEAVY_ASSERT(pBlock->Validate());
11580 
11581  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11582 
11583  // pBlock became empty after this deallocation.
11584  if(pBlock->m_pMetadata->IsEmpty())
11585  {
11586  // Already has empty Allocation. We don't want to have two, so delete this one.
11587  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11588  {
11589  pBlockToDelete = pBlock;
11590  Remove(pBlock);
11591  }
11592  // We now have first empty block.
11593  else
11594  {
11595  m_HasEmptyBlock = true;
11596  }
11597  }
11598  // pBlock didn't become empty, but we have another empty block - find and free that one.
11599  // (This is optional, heuristics.)
11600  else if(m_HasEmptyBlock)
11601  {
11602  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11603  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11604  {
11605  pBlockToDelete = pLastBlock;
11606  m_Blocks.pop_back();
11607  m_HasEmptyBlock = false;
11608  }
11609  }
11610 
11611  IncrementallySortBlocks();
11612  }
11613 
11614  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11615  // lock, for performance reason.
11616  if(pBlockToDelete != VMA_NULL)
11617  {
11618  VMA_DEBUG_LOG(" Deleted empty allocation");
11619  pBlockToDelete->Destroy(m_hAllocator);
11620  vma_delete(m_hAllocator, pBlockToDelete);
11621  }
11622 }
11623 
11624 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11625 {
11626  VkDeviceSize result = 0;
11627  for(size_t i = m_Blocks.size(); i--; )
11628  {
11629  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11630  if(result >= m_PreferredBlockSize)
11631  {
11632  break;
11633  }
11634  }
11635  return result;
11636 }
11637 
11638 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11639 {
11640  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11641  {
11642  if(m_Blocks[blockIndex] == pBlock)
11643  {
11644  VmaVectorRemove(m_Blocks, blockIndex);
11645  return;
11646  }
11647  }
11648  VMA_ASSERT(0);
11649 }
11650 
11651 void VmaBlockVector::IncrementallySortBlocks()
11652 {
11653  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11654  {
11655  // Bubble sort only until first swap.
11656  for(size_t i = 1; i < m_Blocks.size(); ++i)
11657  {
11658  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11659  {
11660  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11661  return;
11662  }
11663  }
11664  }
11665 }
11666 
11667 VkResult VmaBlockVector::AllocateFromBlock(
11668  VmaDeviceMemoryBlock* pBlock,
11669  VmaPool hCurrentPool,
11670  uint32_t currentFrameIndex,
11671  VkDeviceSize size,
11672  VkDeviceSize alignment,
11673  VmaAllocationCreateFlags allocFlags,
11674  void* pUserData,
11675  VmaSuballocationType suballocType,
11676  uint32_t strategy,
11677  VmaAllocation* pAllocation)
11678 {
11679  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11680  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11681  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11682  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11683 
11684  VmaAllocationRequest currRequest = {};
11685  if(pBlock->m_pMetadata->CreateAllocationRequest(
11686  currentFrameIndex,
11687  m_FrameInUseCount,
11688  m_BufferImageGranularity,
11689  size,
11690  alignment,
11691  isUpperAddress,
11692  suballocType,
11693  false, // canMakeOtherLost
11694  strategy,
11695  &currRequest))
11696  {
11697  // Allocate from pCurrBlock.
11698  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11699 
11700  if(mapped)
11701  {
11702  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11703  if(res != VK_SUCCESS)
11704  {
11705  return res;
11706  }
11707  }
11708 
11709  // We no longer have an empty Allocation.
11710  if(pBlock->m_pMetadata->IsEmpty())
11711  {
11712  m_HasEmptyBlock = false;
11713  }
11714 
11715  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11716  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11717  (*pAllocation)->InitBlockAllocation(
11718  hCurrentPool,
11719  pBlock,
11720  currRequest.offset,
11721  alignment,
11722  size,
11723  suballocType,
11724  mapped,
11725  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11726  VMA_HEAVY_ASSERT(pBlock->Validate());
11727  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11728  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11729  {
11730  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11731  }
11732  if(IsCorruptionDetectionEnabled())
11733  {
11734  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11735  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11736  }
11737  return VK_SUCCESS;
11738  }
11739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11740 }
11741 
11742 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11743 {
11744  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11745  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11746  allocInfo.allocationSize = blockSize;
11747  VkDeviceMemory mem = VK_NULL_HANDLE;
11748  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11749  if(res < 0)
11750  {
11751  return res;
11752  }
11753 
11754  // New VkDeviceMemory successfully created.
11755 
11756  // Create new Allocation for it.
11757  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11758  pBlock->Init(
11759  m_hAllocator,
11760  m_MemoryTypeIndex,
11761  mem,
11762  allocInfo.allocationSize,
11763  m_NextBlockId++,
11764  m_Algorithm);
11765 
11766  m_Blocks.push_back(pBlock);
11767  if(pNewBlockIndex != VMA_NULL)
11768  {
11769  *pNewBlockIndex = m_Blocks.size() - 1;
11770  }
11771 
11772  return VK_SUCCESS;
11773 }
11774 
11775 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11776  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11777  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11778 {
11779  const size_t blockCount = m_Blocks.size();
11780  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11781 
11782  enum BLOCK_FLAG
11783  {
11784  BLOCK_FLAG_USED = 0x00000001,
11785  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11786  };
11787 
11788  struct BlockInfo
11789  {
11790  uint32_t flags;
11791  void* pMappedData;
11792  };
11793  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11794  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11795  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11796 
11797  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11798  const size_t moveCount = moves.size();
11799  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11800  {
11801  const VmaDefragmentationMove& move = moves[moveIndex];
11802  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11803  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11804  }
11805 
11806  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11807 
11808  // Go over all blocks. Get mapped pointer or map if necessary.
11809  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11810  {
11811  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11812  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11813  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11814  {
11815  currBlockInfo.pMappedData = pBlock->GetMappedData();
11816  // It is not originally mapped - map it.
11817  if(currBlockInfo.pMappedData == VMA_NULL)
11818  {
11819  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11820  if(pDefragCtx->res == VK_SUCCESS)
11821  {
11822  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11823  }
11824  }
11825  }
11826  }
11827 
11828  // Go over all moves. Do actual data transfer.
11829  if(pDefragCtx->res == VK_SUCCESS)
11830  {
11831  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11832  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11833 
11834  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11835  {
11836  const VmaDefragmentationMove& move = moves[moveIndex];
11837 
11838  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
11839  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
11840 
11841  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
11842 
11843  // Invalidate source.
11844  if(isNonCoherent)
11845  {
11846  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
11847  memRange.memory = pSrcBlock->GetDeviceMemory();
11848  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
11849  memRange.size = VMA_MIN(
11850  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
11851  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
11852  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11853  }
11854 
11855  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11856  memmove(
11857  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
11858  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
11859  static_cast<size_t>(move.size));
11860 
11861  if(IsCorruptionDetectionEnabled())
11862  {
11863  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
11864  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
11865  }
11866 
11867  // Flush destination.
11868  if(isNonCoherent)
11869  {
11870  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
11871  memRange.memory = pDstBlock->GetDeviceMemory();
11872  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
11873  memRange.size = VMA_MIN(
11874  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
11875  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
11876  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11877  }
11878  }
11879  }
11880 
11881  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
11882  // Regardless of pCtx->res == VK_SUCCESS.
11883  for(size_t blockIndex = blockCount; blockIndex--; )
11884  {
11885  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
11886  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
11887  {
11888  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11889  pBlock->Unmap(m_hAllocator, 1);
11890  }
11891  }
11892 }
11893 
11894 void VmaBlockVector::ApplyDefragmentationMovesGpu(
11895  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11896  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
11897  VkCommandBuffer commandBuffer)
11898 {
11899  const size_t blockCount = m_Blocks.size();
11900 
11901  pDefragCtx->blockContexts.resize(blockCount);
11902  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
11903 
11904  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11905  const size_t moveCount = moves.size();
11906  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11907  {
11908  const VmaDefragmentationMove& move = moves[moveIndex];
11909  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11910  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11911  }
11912 
11913  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11914 
11915  // Go over all blocks. Create and bind buffer for whole block if necessary.
11916  {
11917  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
11918  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
11919  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
11920 
11921  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11922  {
11923  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
11924  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11925  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
11926  {
11927  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
11928  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
11929  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
11930  if(pDefragCtx->res == VK_SUCCESS)
11931  {
11932  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
11933  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
11934  }
11935  }
11936  }
11937  }
11938 
11939  // Go over all moves. Post data transfer commands to command buffer.
11940  if(pDefragCtx->res == VK_SUCCESS)
11941  {
11942  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11943  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11944 
11945  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11946  {
11947  const VmaDefragmentationMove& move = moves[moveIndex];
11948 
11949  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
11950  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
11951 
11952  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
11953 
11954  VkBufferCopy region = {
11955  move.srcOffset,
11956  move.dstOffset,
11957  move.size };
11958  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
11959  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
11960  }
11961  }
11962 
11963  // Save buffers to defrag context for later destruction.
11964  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
11965  {
11966  pDefragCtx->res = VK_NOT_READY;
11967  }
11968 }
11969 
11970 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
11971 {
11972  m_HasEmptyBlock = false;
11973  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11974  {
11975  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11976  if(pBlock->m_pMetadata->IsEmpty())
11977  {
11978  if(m_Blocks.size() > m_MinBlockCount)
11979  {
11980  if(pDefragmentationStats != VMA_NULL)
11981  {
11982  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11983  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11984  }
11985 
11986  VmaVectorRemove(m_Blocks, blockIndex);
11987  pBlock->Destroy(m_hAllocator);
11988  vma_delete(m_hAllocator, pBlock);
11989  }
11990  else
11991  {
11992  m_HasEmptyBlock = true;
11993  }
11994  }
11995  }
11996 }
11997 
11998 #if VMA_STATS_STRING_ENABLED
11999 
12000 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12001 {
12002  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12003 
12004  json.BeginObject();
12005 
12006  if(m_IsCustomPool)
12007  {
12008  json.WriteString("MemoryTypeIndex");
12009  json.WriteNumber(m_MemoryTypeIndex);
12010 
12011  json.WriteString("BlockSize");
12012  json.WriteNumber(m_PreferredBlockSize);
12013 
12014  json.WriteString("BlockCount");
12015  json.BeginObject(true);
12016  if(m_MinBlockCount > 0)
12017  {
12018  json.WriteString("Min");
12019  json.WriteNumber((uint64_t)m_MinBlockCount);
12020  }
12021  if(m_MaxBlockCount < SIZE_MAX)
12022  {
12023  json.WriteString("Max");
12024  json.WriteNumber((uint64_t)m_MaxBlockCount);
12025  }
12026  json.WriteString("Cur");
12027  json.WriteNumber((uint64_t)m_Blocks.size());
12028  json.EndObject();
12029 
12030  if(m_FrameInUseCount > 0)
12031  {
12032  json.WriteString("FrameInUseCount");
12033  json.WriteNumber(m_FrameInUseCount);
12034  }
12035 
12036  if(m_Algorithm != 0)
12037  {
12038  json.WriteString("Algorithm");
12039  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12040  }
12041  }
12042  else
12043  {
12044  json.WriteString("PreferredBlockSize");
12045  json.WriteNumber(m_PreferredBlockSize);
12046  }
12047 
12048  json.WriteString("Blocks");
12049  json.BeginObject();
12050  for(size_t i = 0; i < m_Blocks.size(); ++i)
12051  {
12052  json.BeginString();
12053  json.ContinueString(m_Blocks[i]->GetId());
12054  json.EndString();
12055 
12056  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12057  }
12058  json.EndObject();
12059 
12060  json.EndObject();
12061 }
12062 
12063 #endif // #if VMA_STATS_STRING_ENABLED
12064 
12065 void VmaBlockVector::Defragment(
12066  class VmaBlockVectorDefragmentationContext* pCtx,
12067  VmaDefragmentationStats* pStats,
12068  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12069  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12070  VkCommandBuffer commandBuffer)
12071 {
12072  pCtx->res = VK_SUCCESS;
12073 
12074  const VkMemoryPropertyFlags memPropFlags =
12075  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12076  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12077  (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12078  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0;
12079 
12080  // There are options to defragment this memory type.
12081  if(canDefragmentOnCpu || canDefragmentOnGpu)
12082  {
12083  bool defragmentOnGpu;
12084  // There is only one option to defragment this memory type.
12085  if(canDefragmentOnGpu != canDefragmentOnCpu)
12086  {
12087  defragmentOnGpu = canDefragmentOnGpu;
12088  }
12089  // Both options are available: Heuristics to choose the best one.
12090  else
12091  {
12092  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12093  m_hAllocator->IsIntegratedGpu();
12094  }
12095 
12096  bool overlappingMoveSupported = !defragmentOnGpu;
12097 
12098  if(m_hAllocator->m_UseMutex)
12099  {
12100  m_Mutex.LockWrite();
12101  pCtx->mutexLocked = true;
12102  }
12103 
12104  pCtx->Begin(overlappingMoveSupported);
12105 
12106  // Defragment.
12107 
12108  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12109  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12110  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12111  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12112  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12113 
12114  // Accumulate statistics.
12115  if(pStats != VMA_NULL)
12116  {
12117  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12118  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12119  pStats->bytesMoved += bytesMoved;
12120  pStats->allocationsMoved += allocationsMoved;
12121  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12122  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12123  if(defragmentOnGpu)
12124  {
12125  maxGpuBytesToMove -= bytesMoved;
12126  maxGpuAllocationsToMove -= allocationsMoved;
12127  }
12128  else
12129  {
12130  maxCpuBytesToMove -= bytesMoved;
12131  maxCpuAllocationsToMove -= allocationsMoved;
12132  }
12133  }
12134 
12135  if(pCtx->res >= VK_SUCCESS)
12136  {
12137  if(defragmentOnGpu)
12138  {
12139  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12140  }
12141  else
12142  {
12143  ApplyDefragmentationMovesCpu(pCtx, moves);
12144  }
12145  }
12146  }
12147 }
12148 
12149 void VmaBlockVector::DefragmentationEnd(
12150  class VmaBlockVectorDefragmentationContext* pCtx,
12151  VmaDefragmentationStats* pStats)
12152 {
12153  // Destroy buffers.
12154  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12155  {
12156  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12157  if(blockCtx.hBuffer)
12158  {
12159  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12160  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12161  }
12162  }
12163 
12164  if(pCtx->res >= VK_SUCCESS)
12165  {
12166  FreeEmptyBlocks(pStats);
12167  }
12168 
12169  if(pCtx->mutexLocked)
12170  {
12171  VMA_ASSERT(m_hAllocator->m_UseMutex);
12172  m_Mutex.UnlockWrite();
12173  }
12174 }
12175 
12176 size_t VmaBlockVector::CalcAllocationCount() const
12177 {
12178  size_t result = 0;
12179  for(size_t i = 0; i < m_Blocks.size(); ++i)
12180  {
12181  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12182  }
12183  return result;
12184 }
12185 
12186 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12187 {
12188  if(m_BufferImageGranularity == 1)
12189  {
12190  return false;
12191  }
12192  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12193  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12194  {
12195  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12196  VMA_ASSERT(m_Algorithm == 0);
12197  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12198  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12199  {
12200  return true;
12201  }
12202  }
12203  return false;
12204 }
12205 
12206 void VmaBlockVector::MakePoolAllocationsLost(
12207  uint32_t currentFrameIndex,
12208  size_t* pLostAllocationCount)
12209 {
12210  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12211  size_t lostAllocationCount = 0;
12212  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12213  {
12214  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12215  VMA_ASSERT(pBlock);
12216  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12217  }
12218  if(pLostAllocationCount != VMA_NULL)
12219  {
12220  *pLostAllocationCount = lostAllocationCount;
12221  }
12222 }
12223 
12224 VkResult VmaBlockVector::CheckCorruption()
12225 {
12226  if(!IsCorruptionDetectionEnabled())
12227  {
12228  return VK_ERROR_FEATURE_NOT_PRESENT;
12229  }
12230 
12231  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12232  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12233  {
12234  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12235  VMA_ASSERT(pBlock);
12236  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12237  if(res != VK_SUCCESS)
12238  {
12239  return res;
12240  }
12241  }
12242  return VK_SUCCESS;
12243 }
12244 
12245 void VmaBlockVector::AddStats(VmaStats* pStats)
12246 {
12247  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12248  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12249 
12250  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12251 
12252  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12253  {
12254  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12255  VMA_ASSERT(pBlock);
12256  VMA_HEAVY_ASSERT(pBlock->Validate());
12257  VmaStatInfo allocationStatInfo;
12258  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12259  VmaAddStatInfo(pStats->total, allocationStatInfo);
12260  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12261  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12262  }
12263 }
12264 
12266 // VmaDefragmentationAlgorithm_Generic members definition
12267 
12268 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12269  VmaAllocator hAllocator,
12270  VmaBlockVector* pBlockVector,
12271  uint32_t currentFrameIndex,
12272  bool overlappingMoveSupported) :
12273  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12274  m_AllAllocations(false),
12275  m_AllocationCount(0),
12276  m_BytesMoved(0),
12277  m_AllocationsMoved(0),
12278  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12279 {
12280  // Create block info for each block.
12281  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12283  {
12284  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12285  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12286  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12287  m_Blocks.push_back(pBlockInfo);
12288  }
12289 
12290  // Sort them by m_pBlock pointer value.
12291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12292 }
12293 
12294 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12295 {
12296  for(size_t i = m_Blocks.size(); i--; )
12297  {
12298  vma_delete(m_hAllocator, m_Blocks[i]);
12299  }
12300 }
12301 
12302 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12303 {
12304  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12305  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12306  {
12307  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12308  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12309  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12310  {
12311  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12312  (*it)->m_Allocations.push_back(allocInfo);
12313  }
12314  else
12315  {
12316  VMA_ASSERT(0);
12317  }
12318 
12319  ++m_AllocationCount;
12320  }
12321 }
12322 
12323 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12324  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12325  VkDeviceSize maxBytesToMove,
12326  uint32_t maxAllocationsToMove)
12327 {
12328  if(m_Blocks.empty())
12329  {
12330  return VK_SUCCESS;
12331  }
12332 
12333  // This is a choice based on research.
12334  // Option 1:
12335  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12336  // Option 2:
12337  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12338  // Option 3:
12339  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12340 
12341  size_t srcBlockMinIndex = 0;
12342  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12343  /*
12344  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12345  {
12346  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12347  if(blocksWithNonMovableCount > 0)
12348  {
12349  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12350  }
12351  }
12352  */
12353 
12354  size_t srcBlockIndex = m_Blocks.size() - 1;
12355  size_t srcAllocIndex = SIZE_MAX;
12356  for(;;)
12357  {
12358  // 1. Find next allocation to move.
12359  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12360  // 1.2. Then start from last to first m_Allocations.
12361  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12362  {
12363  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12364  {
12365  // Finished: no more allocations to process.
12366  if(srcBlockIndex == srcBlockMinIndex)
12367  {
12368  return VK_SUCCESS;
12369  }
12370  else
12371  {
12372  --srcBlockIndex;
12373  srcAllocIndex = SIZE_MAX;
12374  }
12375  }
12376  else
12377  {
12378  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12379  }
12380  }
12381 
12382  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12383  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12384 
12385  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12386  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12387  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12388  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12389 
12390  // 2. Try to find new place for this allocation in preceding or current block.
12391  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12392  {
12393  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12394  VmaAllocationRequest dstAllocRequest;
12395  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12396  m_CurrentFrameIndex,
12397  m_pBlockVector->GetFrameInUseCount(),
12398  m_pBlockVector->GetBufferImageGranularity(),
12399  size,
12400  alignment,
12401  false, // upperAddress
12402  suballocType,
12403  false, // canMakeOtherLost
12404  strategy,
12405  &dstAllocRequest) &&
12406  MoveMakesSense(
12407  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12408  {
12409  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12410 
12411  // Reached limit on number of allocations or bytes to move.
12412  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12413  (m_BytesMoved + size > maxBytesToMove))
12414  {
12415  return VK_SUCCESS;
12416  }
12417 
12418  VmaDefragmentationMove move;
12419  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12420  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12421  move.srcOffset = srcOffset;
12422  move.dstOffset = dstAllocRequest.offset;
12423  move.size = size;
12424  moves.push_back(move);
12425 
12426  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12427  dstAllocRequest,
12428  suballocType,
12429  size,
12430  false, // upperAddress
12431  allocInfo.m_hAllocation);
12432  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12433 
12434  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12435 
12436  if(allocInfo.m_pChanged != VMA_NULL)
12437  {
12438  *allocInfo.m_pChanged = VK_TRUE;
12439  }
12440 
12441  ++m_AllocationsMoved;
12442  m_BytesMoved += size;
12443 
12444  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12445 
12446  break;
12447  }
12448  }
12449 
12450  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12451 
12452  if(srcAllocIndex > 0)
12453  {
12454  --srcAllocIndex;
12455  }
12456  else
12457  {
12458  if(srcBlockIndex > 0)
12459  {
12460  --srcBlockIndex;
12461  srcAllocIndex = SIZE_MAX;
12462  }
12463  else
12464  {
12465  return VK_SUCCESS;
12466  }
12467  }
12468  }
12469 }
12470 
12471 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12472 {
12473  size_t result = 0;
12474  for(size_t i = 0; i < m_Blocks.size(); ++i)
12475  {
12476  if(m_Blocks[i]->m_HasNonMovableAllocations)
12477  {
12478  ++result;
12479  }
12480  }
12481  return result;
12482 }
12483 
12484 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12485  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12486  VkDeviceSize maxBytesToMove,
12487  uint32_t maxAllocationsToMove)
12488 {
12489  if(!m_AllAllocations && m_AllocationCount == 0)
12490  {
12491  return VK_SUCCESS;
12492  }
12493 
12494  const size_t blockCount = m_Blocks.size();
12495  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12496  {
12497  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12498 
12499  if(m_AllAllocations)
12500  {
12501  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12502  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12503  it != pMetadata->m_Suballocations.end();
12504  ++it)
12505  {
12506  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12507  {
12508  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12509  pBlockInfo->m_Allocations.push_back(allocInfo);
12510  }
12511  }
12512  }
12513 
12514  pBlockInfo->CalcHasNonMovableAllocations();
12515 
12516  // This is a choice based on research.
12517  // Option 1:
12518  pBlockInfo->SortAllocationsByOffsetDescending();
12519  // Option 2:
12520  //pBlockInfo->SortAllocationsBySizeDescending();
12521  }
12522 
12523  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12524  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12525 
12526  // This is a choice based on research.
12527  const uint32_t roundCount = 2;
12528 
12529  // Execute defragmentation rounds (the main part).
12530  VkResult result = VK_SUCCESS;
12531  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12532  {
12533  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12534  }
12535 
12536  return result;
12537 }
12538 
12539 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12540  size_t dstBlockIndex, VkDeviceSize dstOffset,
12541  size_t srcBlockIndex, VkDeviceSize srcOffset)
12542 {
12543  if(dstBlockIndex < srcBlockIndex)
12544  {
12545  return true;
12546  }
12547  if(dstBlockIndex > srcBlockIndex)
12548  {
12549  return false;
12550  }
12551  if(dstOffset < srcOffset)
12552  {
12553  return true;
12554  }
12555  return false;
12556 }
12557 
12559 // VmaDefragmentationAlgorithm_Fast
12560 
12561 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12562  VmaAllocator hAllocator,
12563  VmaBlockVector* pBlockVector,
12564  uint32_t currentFrameIndex,
12565  bool overlappingMoveSupported) :
12566  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12567  m_OverlappingMoveSupported(overlappingMoveSupported),
12568  m_AllocationCount(0),
12569  m_AllAllocations(false),
12570  m_BytesMoved(0),
12571  m_AllocationsMoved(0),
12572  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12573 {
12574  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12575 
12576 }
12577 
12578 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12579 {
12580 }
12581 
12582 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12583  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12584  VkDeviceSize maxBytesToMove,
12585  uint32_t maxAllocationsToMove)
12586 {
12587  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12588 
12589  const size_t blockCount = m_pBlockVector->GetBlockCount();
12590  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12591  {
12592  return VK_SUCCESS;
12593  }
12594 
12595  PreprocessMetadata();
12596 
12597  // Sort blocks in order from most destination.
12598 
12599  m_BlockInfos.resize(blockCount);
12600  for(size_t i = 0; i < blockCount; ++i)
12601  {
12602  m_BlockInfos[i].origBlockIndex = i;
12603  }
12604 
12605  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12606  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12607  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12608  });
12609 
12610  // THE MAIN ALGORITHM
12611 
12612  FreeSpaceDatabase freeSpaceDb;
12613 
12614  size_t dstBlockInfoIndex = 0;
12615  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12616  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12617  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12618  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12619  VkDeviceSize dstOffset = 0;
12620 
12621  bool end = false;
12622  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12623  {
12624  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12625  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12626  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12627  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12628  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12629  {
12630  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12631  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12632  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12633  if(m_AllocationsMoved == maxAllocationsToMove ||
12634  m_BytesMoved + srcAllocSize > maxBytesToMove)
12635  {
12636  end = true;
12637  break;
12638  }
12639  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12640 
12641  // Try to place it in one of free spaces from the database.
12642  size_t freeSpaceInfoIndex;
12643  VkDeviceSize dstAllocOffset;
12644  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12645  freeSpaceInfoIndex, dstAllocOffset))
12646  {
12647  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12648  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12649  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12650  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12651 
12652  // Same block
12653  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12654  {
12655  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12656 
12657  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12658 
12659  VmaSuballocation suballoc = *srcSuballocIt;
12660  suballoc.offset = dstAllocOffset;
12661  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12662  m_BytesMoved += srcAllocSize;
12663  ++m_AllocationsMoved;
12664 
12665  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12666  ++nextSuballocIt;
12667  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12668  srcSuballocIt = nextSuballocIt;
12669 
12670  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12671 
12672  VmaDefragmentationMove move = {
12673  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12674  srcAllocOffset, dstAllocOffset,
12675  srcAllocSize };
12676  moves.push_back(move);
12677  }
12678  // Different block
12679  else
12680  {
12681  // MOVE OPTION 2: Move the allocation to a different block.
12682 
12683  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12684 
12685  VmaSuballocation suballoc = *srcSuballocIt;
12686  suballoc.offset = dstAllocOffset;
12687  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12688  m_BytesMoved += srcAllocSize;
12689  ++m_AllocationsMoved;
12690 
12691  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12692  ++nextSuballocIt;
12693  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12694  srcSuballocIt = nextSuballocIt;
12695 
12696  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12697 
12698  VmaDefragmentationMove move = {
12699  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12700  srcAllocOffset, dstAllocOffset,
12701  srcAllocSize };
12702  moves.push_back(move);
12703  }
12704  }
12705  else
12706  {
12707  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12708 
12709  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12710  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12711  dstAllocOffset + srcAllocSize > dstBlockSize)
12712  {
12713  // But before that, register remaining free space at the end of dst block.
12714  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12715 
12716  ++dstBlockInfoIndex;
12717  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12718  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12719  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12720  dstBlockSize = pDstMetadata->GetSize();
12721  dstOffset = 0;
12722  dstAllocOffset = 0;
12723  }
12724 
12725  // Same block
12726  if(dstBlockInfoIndex == srcBlockInfoIndex)
12727  {
12728  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12729 
12730  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12731 
12732  bool skipOver = overlap;
12733  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12734  {
12735  // If destination and source place overlap, skip if it would move it
12736  // by only < 1/64 of its size.
12737  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12738  }
12739 
12740  if(skipOver)
12741  {
12742  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12743 
12744  dstOffset = srcAllocOffset + srcAllocSize;
12745  ++srcSuballocIt;
12746  }
12747  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12748  else
12749  {
12750  srcSuballocIt->offset = dstAllocOffset;
12751  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12752  dstOffset = dstAllocOffset + srcAllocSize;
12753  m_BytesMoved += srcAllocSize;
12754  ++m_AllocationsMoved;
12755  ++srcSuballocIt;
12756  VmaDefragmentationMove move = {
12757  srcOrigBlockIndex, dstOrigBlockIndex,
12758  srcAllocOffset, dstAllocOffset,
12759  srcAllocSize };
12760  moves.push_back(move);
12761  }
12762  }
12763  // Different block
12764  else
12765  {
12766  // MOVE OPTION 2: Move the allocation to a different block.
12767 
12768  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12769  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12770 
12771  VmaSuballocation suballoc = *srcSuballocIt;
12772  suballoc.offset = dstAllocOffset;
12773  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12774  dstOffset = dstAllocOffset + srcAllocSize;
12775  m_BytesMoved += srcAllocSize;
12776  ++m_AllocationsMoved;
12777 
12778  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12779  ++nextSuballocIt;
12780  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12781  srcSuballocIt = nextSuballocIt;
12782 
12783  pDstMetadata->m_Suballocations.push_back(suballoc);
12784 
12785  VmaDefragmentationMove move = {
12786  srcOrigBlockIndex, dstOrigBlockIndex,
12787  srcAllocOffset, dstAllocOffset,
12788  srcAllocSize };
12789  moves.push_back(move);
12790  }
12791  }
12792  }
12793  }
12794 
12795  m_BlockInfos.clear();
12796 
12797  PostprocessMetadata();
12798 
12799  return VK_SUCCESS;
12800 }
12801 
12802 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12803 {
12804  const size_t blockCount = m_pBlockVector->GetBlockCount();
12805  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12806  {
12807  VmaBlockMetadata_Generic* const pMetadata =
12808  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12809  pMetadata->m_FreeCount = 0;
12810  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12811  pMetadata->m_FreeSuballocationsBySize.clear();
12812  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12813  it != pMetadata->m_Suballocations.end(); )
12814  {
12815  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12816  {
12817  VmaSuballocationList::iterator nextIt = it;
12818  ++nextIt;
12819  pMetadata->m_Suballocations.erase(it);
12820  it = nextIt;
12821  }
12822  else
12823  {
12824  ++it;
12825  }
12826  }
12827  }
12828 }
12829 
12830 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
12831 {
12832  const size_t blockCount = m_pBlockVector->GetBlockCount();
12833  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12834  {
12835  VmaBlockMetadata_Generic* const pMetadata =
12836  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12837  const VkDeviceSize blockSize = pMetadata->GetSize();
12838 
12839  // No allocations in this block - entire area is free.
12840  if(pMetadata->m_Suballocations.empty())
12841  {
12842  pMetadata->m_FreeCount = 1;
12843  //pMetadata->m_SumFreeSize is already set to blockSize.
12844  VmaSuballocation suballoc = {
12845  0, // offset
12846  blockSize, // size
12847  VMA_NULL, // hAllocation
12848  VMA_SUBALLOCATION_TYPE_FREE };
12849  pMetadata->m_Suballocations.push_back(suballoc);
12850  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
12851  }
12852  // There are some allocations in this block.
12853  else
12854  {
12855  VkDeviceSize offset = 0;
12856  VmaSuballocationList::iterator it;
12857  for(it = pMetadata->m_Suballocations.begin();
12858  it != pMetadata->m_Suballocations.end();
12859  ++it)
12860  {
12861  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
12862  VMA_ASSERT(it->offset >= offset);
12863 
12864  // Need to insert preceding free space.
12865  if(it->offset > offset)
12866  {
12867  ++pMetadata->m_FreeCount;
12868  const VkDeviceSize freeSize = it->offset - offset;
12869  VmaSuballocation suballoc = {
12870  offset, // offset
12871  freeSize, // size
12872  VMA_NULL, // hAllocation
12873  VMA_SUBALLOCATION_TYPE_FREE };
12874  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12875  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12876  {
12877  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
12878  }
12879  }
12880 
12881  pMetadata->m_SumFreeSize -= it->size;
12882  offset = it->offset + it->size;
12883  }
12884 
12885  // Need to insert trailing free space.
12886  if(offset < blockSize)
12887  {
12888  ++pMetadata->m_FreeCount;
12889  const VkDeviceSize freeSize = blockSize - offset;
12890  VmaSuballocation suballoc = {
12891  offset, // offset
12892  freeSize, // size
12893  VMA_NULL, // hAllocation
12894  VMA_SUBALLOCATION_TYPE_FREE };
12895  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
12896  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12897  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12898  {
12899  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
12900  }
12901  }
12902 
12903  VMA_SORT(
12904  pMetadata->m_FreeSuballocationsBySize.begin(),
12905  pMetadata->m_FreeSuballocationsBySize.end(),
12906  VmaSuballocationItemSizeLess());
12907  }
12908 
12909  VMA_HEAVY_ASSERT(pMetadata->Validate());
12910  }
12911 }
12912 
12913 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
12914 {
12915  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
12916  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12917  while(it != pMetadata->m_Suballocations.end())
12918  {
12919  if(it->offset < suballoc.offset)
12920  {
12921  ++it;
12922  }
12923  }
12924  pMetadata->m_Suballocations.insert(it, suballoc);
12925 }
12926 
12928 // VmaBlockVectorDefragmentationContext
12929 
12930 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
12931  VmaAllocator hAllocator,
12932  VmaPool hCustomPool,
12933  VmaBlockVector* pBlockVector,
12934  uint32_t currFrameIndex,
12935  uint32_t algorithmFlags) :
12936  res(VK_SUCCESS),
12937  mutexLocked(false),
12938  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
12939  m_hAllocator(hAllocator),
12940  m_hCustomPool(hCustomPool),
12941  m_pBlockVector(pBlockVector),
12942  m_CurrFrameIndex(currFrameIndex),
12943  m_AlgorithmFlags(algorithmFlags),
12944  m_pAlgorithm(VMA_NULL),
12945  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
12946  m_AllAllocations(false)
12947 {
12948 }
12949 
12950 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
12951 {
12952  vma_delete(m_hAllocator, m_pAlgorithm);
12953 }
12954 
12955 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12956 {
12957  AllocInfo info = { hAlloc, pChanged };
12958  m_Allocations.push_back(info);
12959 }
12960 
12961 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
12962 {
12963  const bool allAllocations = m_AllAllocations ||
12964  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
12965 
12966  /********************************
12967  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
12968  ********************************/
12969 
12970  /*
12971  Fast algorithm is supported only when certain criteria are met:
12972  - VMA_DEBUG_MARGIN is 0.
12973  - All allocations in this block vector are moveable.
12974  - There is no possibility of image/buffer granularity conflict.
12975  */
12976  if(VMA_DEBUG_MARGIN == 0 &&
12977  allAllocations &&
12978  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
12979  {
12980  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
12981  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
12982  }
12983  else
12984  {
12985  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
12986  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
12987  }
12988 
12989  if(allAllocations)
12990  {
12991  m_pAlgorithm->AddAll();
12992  }
12993  else
12994  {
12995  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
12996  {
12997  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
12998  }
12999  }
13000 }
13001 
13003 // VmaDefragmentationContext
13004 
13005 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13006  VmaAllocator hAllocator,
13007  uint32_t currFrameIndex,
13008  uint32_t flags,
13009  VmaDefragmentationStats* pStats) :
13010  m_hAllocator(hAllocator),
13011  m_CurrFrameIndex(currFrameIndex),
13012  m_Flags(flags),
13013  m_pStats(pStats),
13014  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13015 {
13016  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13017 }
13018 
13019 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13020 {
13021  for(size_t i = m_CustomPoolContexts.size(); i--; )
13022  {
13023  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13024  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13025  vma_delete(m_hAllocator, pBlockVectorCtx);
13026  }
13027  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13028  {
13029  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13030  if(pBlockVectorCtx)
13031  {
13032  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13033  vma_delete(m_hAllocator, pBlockVectorCtx);
13034  }
13035  }
13036 }
13037 
13038 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13039 {
13040  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13041  {
13042  VmaPool pool = pPools[poolIndex];
13043  VMA_ASSERT(pool);
13044  // Pools with algorithm other than default are not defragmented.
13045  if(pool->m_BlockVector.GetAlgorithm() == 0)
13046  {
13047  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13048 
13049  for(size_t i = m_CustomPoolContexts.size(); i--; )
13050  {
13051  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13052  {
13053  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13054  break;
13055  }
13056  }
13057 
13058  if(!pBlockVectorDefragCtx)
13059  {
13060  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13061  m_hAllocator,
13062  pool,
13063  &pool->m_BlockVector,
13064  m_CurrFrameIndex,
13065  m_Flags);
13066  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13067  }
13068 
13069  pBlockVectorDefragCtx->AddAll();
13070  }
13071  }
13072 }
13073 
13074 void VmaDefragmentationContext_T::AddAllocations(
13075  uint32_t allocationCount,
13076  VmaAllocation* pAllocations,
13077  VkBool32* pAllocationsChanged)
13078 {
13079  // Dispatch pAllocations among defragmentators. Create them when necessary.
13080  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13081  {
13082  const VmaAllocation hAlloc = pAllocations[allocIndex];
13083  VMA_ASSERT(hAlloc);
13084  // DedicatedAlloc cannot be defragmented.
13085  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13086  // Lost allocation cannot be defragmented.
13087  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13088  {
13089  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13090 
13091  const VmaPool hAllocPool = hAlloc->GetPool();
13092  // This allocation belongs to custom pool.
13093  if(hAllocPool != VK_NULL_HANDLE)
13094  {
13095  // Pools with algorithm other than default are not defragmented.
13096  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13097  {
13098  for(size_t i = m_CustomPoolContexts.size(); i--; )
13099  {
13100  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13101  {
13102  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13103  break;
13104  }
13105  }
13106  if(!pBlockVectorDefragCtx)
13107  {
13108  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13109  m_hAllocator,
13110  hAllocPool,
13111  &hAllocPool->m_BlockVector,
13112  m_CurrFrameIndex,
13113  m_Flags);
13114  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13115  }
13116  }
13117  }
13118  // This allocation belongs to default pool.
13119  else
13120  {
13121  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13122  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13123  if(!pBlockVectorDefragCtx)
13124  {
13125  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13126  m_hAllocator,
13127  VMA_NULL, // hCustomPool
13128  m_hAllocator->m_pBlockVectors[memTypeIndex],
13129  m_CurrFrameIndex,
13130  m_Flags);
13131  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13132  }
13133  }
13134 
13135  if(pBlockVectorDefragCtx)
13136  {
13137  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13138  &pAllocationsChanged[allocIndex] : VMA_NULL;
13139  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13140  }
13141  }
13142  }
13143 }
13144 
13145 VkResult VmaDefragmentationContext_T::Defragment(
13146  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13147  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13148  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13149 {
13150  if(pStats)
13151  {
13152  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13153  }
13154 
13155  if(commandBuffer == VK_NULL_HANDLE)
13156  {
13157  maxGpuBytesToMove = 0;
13158  maxGpuAllocationsToMove = 0;
13159  }
13160 
13161  VkResult res = VK_SUCCESS;
13162 
13163  // Process default pools.
13164  for(uint32_t memTypeIndex = 0;
13165  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13166  ++memTypeIndex)
13167  {
13168  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13169  if(pBlockVectorCtx)
13170  {
13171  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13172  pBlockVectorCtx->GetBlockVector()->Defragment(
13173  pBlockVectorCtx,
13174  pStats,
13175  maxCpuBytesToMove, maxCpuAllocationsToMove,
13176  maxGpuBytesToMove, maxGpuAllocationsToMove,
13177  commandBuffer);
13178  if(pBlockVectorCtx->res != VK_SUCCESS)
13179  {
13180  res = pBlockVectorCtx->res;
13181  }
13182  }
13183  }
13184 
13185  // Process custom pools.
13186  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13187  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13188  ++customCtxIndex)
13189  {
13190  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13191  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13192  pBlockVectorCtx->GetBlockVector()->Defragment(
13193  pBlockVectorCtx,
13194  pStats,
13195  maxCpuBytesToMove, maxCpuAllocationsToMove,
13196  maxGpuBytesToMove, maxGpuAllocationsToMove,
13197  commandBuffer);
13198  if(pBlockVectorCtx->res != VK_SUCCESS)
13199  {
13200  res = pBlockVectorCtx->res;
13201  }
13202  }
13203 
13204  return res;
13205 }
13206 
13208 // VmaRecorder
13209 
13210 #if VMA_RECORDING_ENABLED
13211 
13212 VmaRecorder::VmaRecorder() :
13213  m_UseMutex(true),
13214  m_Flags(0),
13215  m_File(VMA_NULL),
13216  m_Freq(INT64_MAX),
13217  m_StartCounter(INT64_MAX)
13218 {
13219 }
13220 
13221 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13222 {
13223  m_UseMutex = useMutex;
13224  m_Flags = settings.flags;
13225 
13226  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13227  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13228 
13229  // Open file for writing.
13230  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13231  if(err != 0)
13232  {
13233  return VK_ERROR_INITIALIZATION_FAILED;
13234  }
13235 
13236  // Write header.
13237  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13238  fprintf(m_File, "%s\n", "1,4");
13239 
13240  return VK_SUCCESS;
13241 }
13242 
13243 VmaRecorder::~VmaRecorder()
13244 {
13245  if(m_File != VMA_NULL)
13246  {
13247  fclose(m_File);
13248  }
13249 }
13250 
13251 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13252 {
13253  CallParams callParams;
13254  GetBasicParams(callParams);
13255 
13256  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13257  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13258  Flush();
13259 }
13260 
13261 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13262 {
13263  CallParams callParams;
13264  GetBasicParams(callParams);
13265 
13266  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13267  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13268  Flush();
13269 }
13270 
13271 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13272 {
13273  CallParams callParams;
13274  GetBasicParams(callParams);
13275 
13276  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13277  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13278  createInfo.memoryTypeIndex,
13279  createInfo.flags,
13280  createInfo.blockSize,
13281  (uint64_t)createInfo.minBlockCount,
13282  (uint64_t)createInfo.maxBlockCount,
13283  createInfo.frameInUseCount,
13284  pool);
13285  Flush();
13286 }
13287 
13288 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13289 {
13290  CallParams callParams;
13291  GetBasicParams(callParams);
13292 
13293  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13294  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13295  pool);
13296  Flush();
13297 }
13298 
13299 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13300  const VkMemoryRequirements& vkMemReq,
13301  const VmaAllocationCreateInfo& createInfo,
13302  VmaAllocation allocation)
13303 {
13304  CallParams callParams;
13305  GetBasicParams(callParams);
13306 
13307  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13308  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13309  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13310  vkMemReq.size,
13311  vkMemReq.alignment,
13312  vkMemReq.memoryTypeBits,
13313  createInfo.flags,
13314  createInfo.usage,
13315  createInfo.requiredFlags,
13316  createInfo.preferredFlags,
13317  createInfo.memoryTypeBits,
13318  createInfo.pool,
13319  allocation,
13320  userDataStr.GetString());
13321  Flush();
13322 }
13323 
13324 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13325  const VkMemoryRequirements& vkMemReq,
13326  bool requiresDedicatedAllocation,
13327  bool prefersDedicatedAllocation,
13328  const VmaAllocationCreateInfo& createInfo,
13329  VmaAllocation allocation)
13330 {
13331  CallParams callParams;
13332  GetBasicParams(callParams);
13333 
13334  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13335  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13336  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13337  vkMemReq.size,
13338  vkMemReq.alignment,
13339  vkMemReq.memoryTypeBits,
13340  requiresDedicatedAllocation ? 1 : 0,
13341  prefersDedicatedAllocation ? 1 : 0,
13342  createInfo.flags,
13343  createInfo.usage,
13344  createInfo.requiredFlags,
13345  createInfo.preferredFlags,
13346  createInfo.memoryTypeBits,
13347  createInfo.pool,
13348  allocation,
13349  userDataStr.GetString());
13350  Flush();
13351 }
13352 
13353 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13354  const VkMemoryRequirements& vkMemReq,
13355  bool requiresDedicatedAllocation,
13356  bool prefersDedicatedAllocation,
13357  const VmaAllocationCreateInfo& createInfo,
13358  VmaAllocation allocation)
13359 {
13360  CallParams callParams;
13361  GetBasicParams(callParams);
13362 
13363  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13364  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13365  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13366  vkMemReq.size,
13367  vkMemReq.alignment,
13368  vkMemReq.memoryTypeBits,
13369  requiresDedicatedAllocation ? 1 : 0,
13370  prefersDedicatedAllocation ? 1 : 0,
13371  createInfo.flags,
13372  createInfo.usage,
13373  createInfo.requiredFlags,
13374  createInfo.preferredFlags,
13375  createInfo.memoryTypeBits,
13376  createInfo.pool,
13377  allocation,
13378  userDataStr.GetString());
13379  Flush();
13380 }
13381 
13382 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13383  VmaAllocation allocation)
13384 {
13385  CallParams callParams;
13386  GetBasicParams(callParams);
13387 
13388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13389  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13390  allocation);
13391  Flush();
13392 }
13393 
13394 void VmaRecorder::RecordResizeAllocation(
13395  uint32_t frameIndex,
13396  VmaAllocation allocation,
13397  VkDeviceSize newSize)
13398 {
13399  CallParams callParams;
13400  GetBasicParams(callParams);
13401 
13402  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13403  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13404  allocation, newSize);
13405  Flush();
13406 }
13407 
13408 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13409  VmaAllocation allocation,
13410  const void* pUserData)
13411 {
13412  CallParams callParams;
13413  GetBasicParams(callParams);
13414 
13415  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13416  UserDataString userDataStr(
13417  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13418  pUserData);
13419  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13420  allocation,
13421  userDataStr.GetString());
13422  Flush();
13423 }
13424 
13425 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13426  VmaAllocation allocation)
13427 {
13428  CallParams callParams;
13429  GetBasicParams(callParams);
13430 
13431  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13432  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13433  allocation);
13434  Flush();
13435 }
13436 
13437 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13438  VmaAllocation allocation)
13439 {
13440  CallParams callParams;
13441  GetBasicParams(callParams);
13442 
13443  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13444  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13445  allocation);
13446  Flush();
13447 }
13448 
13449 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13450  VmaAllocation allocation)
13451 {
13452  CallParams callParams;
13453  GetBasicParams(callParams);
13454 
13455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13456  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13457  allocation);
13458  Flush();
13459 }
13460 
13461 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13462  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13463 {
13464  CallParams callParams;
13465  GetBasicParams(callParams);
13466 
13467  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13468  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13469  allocation,
13470  offset,
13471  size);
13472  Flush();
13473 }
13474 
13475 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13476  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13477 {
13478  CallParams callParams;
13479  GetBasicParams(callParams);
13480 
13481  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13482  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13483  allocation,
13484  offset,
13485  size);
13486  Flush();
13487 }
13488 
13489 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13490  const VkBufferCreateInfo& bufCreateInfo,
13491  const VmaAllocationCreateInfo& allocCreateInfo,
13492  VmaAllocation allocation)
13493 {
13494  CallParams callParams;
13495  GetBasicParams(callParams);
13496 
13497  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13498  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13499  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13500  bufCreateInfo.flags,
13501  bufCreateInfo.size,
13502  bufCreateInfo.usage,
13503  bufCreateInfo.sharingMode,
13504  allocCreateInfo.flags,
13505  allocCreateInfo.usage,
13506  allocCreateInfo.requiredFlags,
13507  allocCreateInfo.preferredFlags,
13508  allocCreateInfo.memoryTypeBits,
13509  allocCreateInfo.pool,
13510  allocation,
13511  userDataStr.GetString());
13512  Flush();
13513 }
13514 
13515 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13516  const VkImageCreateInfo& imageCreateInfo,
13517  const VmaAllocationCreateInfo& allocCreateInfo,
13518  VmaAllocation allocation)
13519 {
13520  CallParams callParams;
13521  GetBasicParams(callParams);
13522 
13523  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13524  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13525  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13526  imageCreateInfo.flags,
13527  imageCreateInfo.imageType,
13528  imageCreateInfo.format,
13529  imageCreateInfo.extent.width,
13530  imageCreateInfo.extent.height,
13531  imageCreateInfo.extent.depth,
13532  imageCreateInfo.mipLevels,
13533  imageCreateInfo.arrayLayers,
13534  imageCreateInfo.samples,
13535  imageCreateInfo.tiling,
13536  imageCreateInfo.usage,
13537  imageCreateInfo.sharingMode,
13538  imageCreateInfo.initialLayout,
13539  allocCreateInfo.flags,
13540  allocCreateInfo.usage,
13541  allocCreateInfo.requiredFlags,
13542  allocCreateInfo.preferredFlags,
13543  allocCreateInfo.memoryTypeBits,
13544  allocCreateInfo.pool,
13545  allocation,
13546  userDataStr.GetString());
13547  Flush();
13548 }
13549 
13550 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13551  VmaAllocation allocation)
13552 {
13553  CallParams callParams;
13554  GetBasicParams(callParams);
13555 
13556  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13557  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13558  allocation);
13559  Flush();
13560 }
13561 
13562 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13563  VmaAllocation allocation)
13564 {
13565  CallParams callParams;
13566  GetBasicParams(callParams);
13567 
13568  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13569  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13570  allocation);
13571  Flush();
13572 }
13573 
13574 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13575  VmaAllocation allocation)
13576 {
13577  CallParams callParams;
13578  GetBasicParams(callParams);
13579 
13580  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13581  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13582  allocation);
13583  Flush();
13584 }
13585 
13586 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13587  VmaAllocation allocation)
13588 {
13589  CallParams callParams;
13590  GetBasicParams(callParams);
13591 
13592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13593  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13594  allocation);
13595  Flush();
13596 }
13597 
13598 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13599  VmaPool pool)
13600 {
13601  CallParams callParams;
13602  GetBasicParams(callParams);
13603 
13604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13605  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13606  pool);
13607  Flush();
13608 }
13609 
13610 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13611 {
13612  if(pUserData != VMA_NULL)
13613  {
13614  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13615  {
13616  m_Str = (const char*)pUserData;
13617  }
13618  else
13619  {
13620  sprintf_s(m_PtrStr, "%p", pUserData);
13621  m_Str = m_PtrStr;
13622  }
13623  }
13624  else
13625  {
13626  m_Str = "";
13627  }
13628 }
13629 
13630 void VmaRecorder::WriteConfiguration(
13631  const VkPhysicalDeviceProperties& devProps,
13632  const VkPhysicalDeviceMemoryProperties& memProps,
13633  bool dedicatedAllocationExtensionEnabled)
13634 {
13635  fprintf(m_File, "Config,Begin\n");
13636 
13637  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13638  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13639  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13640  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13641  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13642  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13643 
13644  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13645  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13646  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13647 
13648  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13649  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13650  {
13651  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13652  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13653  }
13654  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13655  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13656  {
13657  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13658  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13659  }
13660 
13661  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13662 
13663  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13664  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13665  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13666  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13667  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13668  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13669  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13670  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13671  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13672 
13673  fprintf(m_File, "Config,End\n");
13674 }
13675 
13676 void VmaRecorder::GetBasicParams(CallParams& outParams)
13677 {
13678  outParams.threadId = GetCurrentThreadId();
13679 
13680  LARGE_INTEGER counter;
13681  QueryPerformanceCounter(&counter);
13682  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13683 }
13684 
13685 void VmaRecorder::Flush()
13686 {
13687  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13688  {
13689  fflush(m_File);
13690  }
13691 }
13692 
13693 #endif // #if VMA_RECORDING_ENABLED
13694 
13696 // VmaAllocator_T
13697 
13698 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13699  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13700  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13701  m_hDevice(pCreateInfo->device),
13702  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13703  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13704  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13705  m_PreferredLargeHeapBlockSize(0),
13706  m_PhysicalDevice(pCreateInfo->physicalDevice),
13707  m_CurrentFrameIndex(0),
13708  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13709  m_NextPoolId(0)
13711  ,m_pRecorder(VMA_NULL)
13712 #endif
13713 {
13714  if(VMA_DEBUG_DETECT_CORRUPTION)
13715  {
13716  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13717  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13718  }
13719 
13720  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13721 
13722 #if !(VMA_DEDICATED_ALLOCATION)
13724  {
13725  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13726  }
13727 #endif
13728 
13729  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13730  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13731  memset(&m_MemProps, 0, sizeof(m_MemProps));
13732 
13733  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13734  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13735 
13736  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13737  {
13738  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13739  }
13740 
13741  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13742  {
13743  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
13744  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
13745  }
13746 
13747  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
13748 
13749  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
13750  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
13751 
13752  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
13753  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
13754  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
13755  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
13756 
13757  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
13758  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13759 
13760  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
13761  {
13762  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
13763  {
13764  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
13765  if(limit != VK_WHOLE_SIZE)
13766  {
13767  m_HeapSizeLimit[heapIndex] = limit;
13768  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
13769  {
13770  m_MemProps.memoryHeaps[heapIndex].size = limit;
13771  }
13772  }
13773  }
13774  }
13775 
13776  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13777  {
13778  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
13779 
13780  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
13781  this,
13782  memTypeIndex,
13783  preferredBlockSize,
13784  0,
13785  SIZE_MAX,
13786  GetBufferImageGranularity(),
13787  pCreateInfo->frameInUseCount,
13788  false, // isCustomPool
13789  false, // explicitBlockSize
13790  false); // linearAlgorithm
13791  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
13792  // becase minBlockCount is 0.
13793  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
13794 
13795  }
13796 }
13797 
13798 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
13799 {
13800  VkResult res = VK_SUCCESS;
13801 
13802  if(pCreateInfo->pRecordSettings != VMA_NULL &&
13803  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
13804  {
13805 #if VMA_RECORDING_ENABLED
13806  m_pRecorder = vma_new(this, VmaRecorder)();
13807  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
13808  if(res != VK_SUCCESS)
13809  {
13810  return res;
13811  }
13812  m_pRecorder->WriteConfiguration(
13813  m_PhysicalDeviceProperties,
13814  m_MemProps,
13815  m_UseKhrDedicatedAllocation);
13816  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
13817 #else
13818  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
13819  return VK_ERROR_FEATURE_NOT_PRESENT;
13820 #endif
13821  }
13822 
13823  return res;
13824 }
13825 
13826 VmaAllocator_T::~VmaAllocator_T()
13827 {
13828 #if VMA_RECORDING_ENABLED
13829  if(m_pRecorder != VMA_NULL)
13830  {
13831  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
13832  vma_delete(this, m_pRecorder);
13833  }
13834 #endif
13835 
13836  VMA_ASSERT(m_Pools.empty());
13837 
13838  for(size_t i = GetMemoryTypeCount(); i--; )
13839  {
13840  vma_delete(this, m_pDedicatedAllocations[i]);
13841  vma_delete(this, m_pBlockVectors[i]);
13842  }
13843 }
13844 
13845 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
13846 {
13847 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13848  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
13849  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
13850  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
13851  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
13852  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
13853  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
13854  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
13855  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
13856  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
13857  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
13858  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
13859  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
13860  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
13861  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
13862  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
13863  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
13864  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
13865 #if VMA_DEDICATED_ALLOCATION
13866  if(m_UseKhrDedicatedAllocation)
13867  {
13868  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
13869  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
13870  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
13871  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
13872  }
13873 #endif // #if VMA_DEDICATED_ALLOCATION
13874 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13875 
13876 #define VMA_COPY_IF_NOT_NULL(funcName) \
13877  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
13878 
13879  if(pVulkanFunctions != VMA_NULL)
13880  {
13881  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
13882  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
13883  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
13884  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
13885  VMA_COPY_IF_NOT_NULL(vkMapMemory);
13886  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
13887  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
13888  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
13889  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
13890  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
13891  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
13892  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
13893  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
13894  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
13895  VMA_COPY_IF_NOT_NULL(vkCreateImage);
13896  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
13897  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
13898 #if VMA_DEDICATED_ALLOCATION
13899  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
13900  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
13901 #endif
13902  }
13903 
13904 #undef VMA_COPY_IF_NOT_NULL
13905 
13906  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
13907  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
13908  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
13909  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
13910  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
13911  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
13912  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
13913  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
13914  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
13915  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
13916  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
13917  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
13918  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
13919  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
13920  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
13921  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
13922  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
13923  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
13924  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
13925 #if VMA_DEDICATED_ALLOCATION
13926  if(m_UseKhrDedicatedAllocation)
13927  {
13928  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
13929  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
13930  }
13931 #endif
13932 }
13933 
13934 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
13935 {
13936  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
13937  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
13938  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
13939  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
13940 }
13941 
13942 VkResult VmaAllocator_T::AllocateMemoryOfType(
13943  VkDeviceSize size,
13944  VkDeviceSize alignment,
13945  bool dedicatedAllocation,
13946  VkBuffer dedicatedBuffer,
13947  VkImage dedicatedImage,
13948  const VmaAllocationCreateInfo& createInfo,
13949  uint32_t memTypeIndex,
13950  VmaSuballocationType suballocType,
13951  VmaAllocation* pAllocation)
13952 {
13953  VMA_ASSERT(pAllocation != VMA_NULL);
13954  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
13955 
13956  VmaAllocationCreateInfo finalCreateInfo = createInfo;
13957 
13958  // If memory type is not HOST_VISIBLE, disable MAPPED.
13959  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
13960  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13961  {
13962  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
13963  }
13964 
13965  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
13966  VMA_ASSERT(blockVector);
13967 
13968  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
13969  bool preferDedicatedMemory =
13970  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
13971  dedicatedAllocation ||
13972  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
13973  size > preferredBlockSize / 2;
13974 
13975  if(preferDedicatedMemory &&
13976  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
13977  finalCreateInfo.pool == VK_NULL_HANDLE)
13978  {
13980  }
13981 
13982  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
13983  {
13984  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
13985  {
13986  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13987  }
13988  else
13989  {
13990  return AllocateDedicatedMemory(
13991  size,
13992  suballocType,
13993  memTypeIndex,
13994  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
13995  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
13996  finalCreateInfo.pUserData,
13997  dedicatedBuffer,
13998  dedicatedImage,
13999  pAllocation);
14000  }
14001  }
14002  else
14003  {
14004  VkResult res = blockVector->Allocate(
14005  VK_NULL_HANDLE, // hCurrentPool
14006  m_CurrentFrameIndex.load(),
14007  size,
14008  alignment,
14009  finalCreateInfo,
14010  suballocType,
14011  pAllocation);
14012  if(res == VK_SUCCESS)
14013  {
14014  return res;
14015  }
14016 
14017  // 5. Try dedicated memory.
14018  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14019  {
14020  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14021  }
14022  else
14023  {
14024  res = AllocateDedicatedMemory(
14025  size,
14026  suballocType,
14027  memTypeIndex,
14028  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14029  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14030  finalCreateInfo.pUserData,
14031  dedicatedBuffer,
14032  dedicatedImage,
14033  pAllocation);
14034  if(res == VK_SUCCESS)
14035  {
14036  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14037  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14038  return VK_SUCCESS;
14039  }
14040  else
14041  {
14042  // Everything failed: Return error code.
14043  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14044  return res;
14045  }
14046  }
14047  }
14048 }
14049 
14050 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14051  VkDeviceSize size,
14052  VmaSuballocationType suballocType,
14053  uint32_t memTypeIndex,
14054  bool map,
14055  bool isUserDataString,
14056  void* pUserData,
14057  VkBuffer dedicatedBuffer,
14058  VkImage dedicatedImage,
14059  VmaAllocation* pAllocation)
14060 {
14061  VMA_ASSERT(pAllocation);
14062 
14063  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14064  allocInfo.memoryTypeIndex = memTypeIndex;
14065  allocInfo.allocationSize = size;
14066 
14067 #if VMA_DEDICATED_ALLOCATION
14068  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14069  if(m_UseKhrDedicatedAllocation)
14070  {
14071  if(dedicatedBuffer != VK_NULL_HANDLE)
14072  {
14073  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14074  dedicatedAllocInfo.buffer = dedicatedBuffer;
14075  allocInfo.pNext = &dedicatedAllocInfo;
14076  }
14077  else if(dedicatedImage != VK_NULL_HANDLE)
14078  {
14079  dedicatedAllocInfo.image = dedicatedImage;
14080  allocInfo.pNext = &dedicatedAllocInfo;
14081  }
14082  }
14083 #endif // #if VMA_DEDICATED_ALLOCATION
14084 
14085  // Allocate VkDeviceMemory.
14086  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14087  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14088  if(res < 0)
14089  {
14090  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14091  return res;
14092  }
14093 
14094  void* pMappedData = VMA_NULL;
14095  if(map)
14096  {
14097  res = (*m_VulkanFunctions.vkMapMemory)(
14098  m_hDevice,
14099  hMemory,
14100  0,
14101  VK_WHOLE_SIZE,
14102  0,
14103  &pMappedData);
14104  if(res < 0)
14105  {
14106  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14107  FreeVulkanMemory(memTypeIndex, size, hMemory);
14108  return res;
14109  }
14110  }
14111 
14112  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14113  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14114  (*pAllocation)->SetUserData(this, pUserData);
14115  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14116  {
14117  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14118  }
14119 
14120  // Register it in m_pDedicatedAllocations.
14121  {
14122  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14123  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14124  VMA_ASSERT(pDedicatedAllocations);
14125  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
14126  }
14127 
14128  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
14129 
14130  return VK_SUCCESS;
14131 }
14132 
14133 void VmaAllocator_T::GetBufferMemoryRequirements(
14134  VkBuffer hBuffer,
14135  VkMemoryRequirements& memReq,
14136  bool& requiresDedicatedAllocation,
14137  bool& prefersDedicatedAllocation) const
14138 {
14139 #if VMA_DEDICATED_ALLOCATION
14140  if(m_UseKhrDedicatedAllocation)
14141  {
14142  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14143  memReqInfo.buffer = hBuffer;
14144 
14145  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14146 
14147  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14148  memReq2.pNext = &memDedicatedReq;
14149 
14150  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14151 
14152  memReq = memReq2.memoryRequirements;
14153  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14154  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14155  }
14156  else
14157 #endif // #if VMA_DEDICATED_ALLOCATION
14158  {
14159  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14160  requiresDedicatedAllocation = false;
14161  prefersDedicatedAllocation = false;
14162  }
14163 }
14164 
14165 void VmaAllocator_T::GetImageMemoryRequirements(
14166  VkImage hImage,
14167  VkMemoryRequirements& memReq,
14168  bool& requiresDedicatedAllocation,
14169  bool& prefersDedicatedAllocation) const
14170 {
14171 #if VMA_DEDICATED_ALLOCATION
14172  if(m_UseKhrDedicatedAllocation)
14173  {
14174  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14175  memReqInfo.image = hImage;
14176 
14177  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14178 
14179  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14180  memReq2.pNext = &memDedicatedReq;
14181 
14182  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14183 
14184  memReq = memReq2.memoryRequirements;
14185  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14186  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14187  }
14188  else
14189 #endif // #if VMA_DEDICATED_ALLOCATION
14190  {
14191  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14192  requiresDedicatedAllocation = false;
14193  prefersDedicatedAllocation = false;
14194  }
14195 }
14196 
14197 VkResult VmaAllocator_T::AllocateMemory(
14198  const VkMemoryRequirements& vkMemReq,
14199  bool requiresDedicatedAllocation,
14200  bool prefersDedicatedAllocation,
14201  VkBuffer dedicatedBuffer,
14202  VkImage dedicatedImage,
14203  const VmaAllocationCreateInfo& createInfo,
14204  VmaSuballocationType suballocType,
14205  VmaAllocation* pAllocation)
14206 {
14207  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14208 
14209  if(vkMemReq.size == 0)
14210  {
14211  return VK_ERROR_VALIDATION_FAILED_EXT;
14212  }
14213  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14214  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14215  {
14216  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14217  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14218  }
14219  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14221  {
14222  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14223  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14224  }
14225  if(requiresDedicatedAllocation)
14226  {
14227  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14228  {
14229  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14230  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14231  }
14232  if(createInfo.pool != VK_NULL_HANDLE)
14233  {
14234  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14235  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14236  }
14237  }
14238  if((createInfo.pool != VK_NULL_HANDLE) &&
14239  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14240  {
14241  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14242  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14243  }
14244 
14245  if(createInfo.pool != VK_NULL_HANDLE)
14246  {
14247  const VkDeviceSize alignmentForPool = VMA_MAX(
14248  vkMemReq.alignment,
14249  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14250  return createInfo.pool->m_BlockVector.Allocate(
14251  createInfo.pool,
14252  m_CurrentFrameIndex.load(),
14253  vkMemReq.size,
14254  alignmentForPool,
14255  createInfo,
14256  suballocType,
14257  pAllocation);
14258  }
14259  else
14260  {
14261  // Bit mask of memory Vulkan types acceptable for this allocation.
14262  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14263  uint32_t memTypeIndex = UINT32_MAX;
14264  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14265  if(res == VK_SUCCESS)
14266  {
14267  VkDeviceSize alignmentForMemType = VMA_MAX(
14268  vkMemReq.alignment,
14269  GetMemoryTypeMinAlignment(memTypeIndex));
14270 
14271  res = AllocateMemoryOfType(
14272  vkMemReq.size,
14273  alignmentForMemType,
14274  requiresDedicatedAllocation || prefersDedicatedAllocation,
14275  dedicatedBuffer,
14276  dedicatedImage,
14277  createInfo,
14278  memTypeIndex,
14279  suballocType,
14280  pAllocation);
14281  // Succeeded on first try.
14282  if(res == VK_SUCCESS)
14283  {
14284  return res;
14285  }
14286  // Allocation from this memory type failed. Try other compatible memory types.
14287  else
14288  {
14289  for(;;)
14290  {
14291  // Remove old memTypeIndex from list of possibilities.
14292  memoryTypeBits &= ~(1u << memTypeIndex);
14293  // Find alternative memTypeIndex.
14294  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14295  if(res == VK_SUCCESS)
14296  {
14297  alignmentForMemType = VMA_MAX(
14298  vkMemReq.alignment,
14299  GetMemoryTypeMinAlignment(memTypeIndex));
14300 
14301  res = AllocateMemoryOfType(
14302  vkMemReq.size,
14303  alignmentForMemType,
14304  requiresDedicatedAllocation || prefersDedicatedAllocation,
14305  dedicatedBuffer,
14306  dedicatedImage,
14307  createInfo,
14308  memTypeIndex,
14309  suballocType,
14310  pAllocation);
14311  // Allocation from this alternative memory type succeeded.
14312  if(res == VK_SUCCESS)
14313  {
14314  return res;
14315  }
14316  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14317  }
14318  // No other matching memory type index could be found.
14319  else
14320  {
14321  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14322  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14323  }
14324  }
14325  }
14326  }
14327  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14328  else
14329  return res;
14330  }
14331 }
14332 
14333 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
14334 {
14335  VMA_ASSERT(allocation);
14336 
14337  if(TouchAllocation(allocation))
14338  {
14339  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14340  {
14341  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14342  }
14343 
14344  switch(allocation->GetType())
14345  {
14346  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14347  {
14348  VmaBlockVector* pBlockVector = VMA_NULL;
14349  VmaPool hPool = allocation->GetPool();
14350  if(hPool != VK_NULL_HANDLE)
14351  {
14352  pBlockVector = &hPool->m_BlockVector;
14353  }
14354  else
14355  {
14356  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14357  pBlockVector = m_pBlockVectors[memTypeIndex];
14358  }
14359  pBlockVector->Free(allocation);
14360  }
14361  break;
14362  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14363  FreeDedicatedMemory(allocation);
14364  break;
14365  default:
14366  VMA_ASSERT(0);
14367  }
14368  }
14369 
14370  allocation->SetUserData(this, VMA_NULL);
14371  vma_delete(this, allocation);
14372 }
14373 
14374 VkResult VmaAllocator_T::ResizeAllocation(
14375  const VmaAllocation alloc,
14376  VkDeviceSize newSize)
14377 {
14378  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14379  {
14380  return VK_ERROR_VALIDATION_FAILED_EXT;
14381  }
14382  if(newSize == alloc->GetSize())
14383  {
14384  return VK_SUCCESS;
14385  }
14386 
14387  switch(alloc->GetType())
14388  {
14389  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14390  return VK_ERROR_FEATURE_NOT_PRESENT;
14391  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14392  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14393  {
14394  alloc->ChangeSize(newSize);
14395  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14396  return VK_SUCCESS;
14397  }
14398  else
14399  {
14400  return VK_ERROR_OUT_OF_POOL_MEMORY;
14401  }
14402  default:
14403  VMA_ASSERT(0);
14404  return VK_ERROR_VALIDATION_FAILED_EXT;
14405  }
14406 }
14407 
14408 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14409 {
14410  // Initialize.
14411  InitStatInfo(pStats->total);
14412  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14413  InitStatInfo(pStats->memoryType[i]);
14414  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14415  InitStatInfo(pStats->memoryHeap[i]);
14416 
14417  // Process default pools.
14418  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14419  {
14420  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14421  VMA_ASSERT(pBlockVector);
14422  pBlockVector->AddStats(pStats);
14423  }
14424 
14425  // Process custom pools.
14426  {
14427  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14428  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14429  {
14430  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14431  }
14432  }
14433 
14434  // Process dedicated allocations.
14435  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14436  {
14437  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14438  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14439  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14440  VMA_ASSERT(pDedicatedAllocVector);
14441  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14442  {
14443  VmaStatInfo allocationStatInfo;
14444  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14445  VmaAddStatInfo(pStats->total, allocationStatInfo);
14446  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14447  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14448  }
14449  }
14450 
14451  // Postprocess.
14452  VmaPostprocessCalcStatInfo(pStats->total);
14453  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14454  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14455  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14456  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14457 }
14458 
14459 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14460 
14461 VkResult VmaAllocator_T::DefragmentationBegin(
14462  const VmaDefragmentationInfo2& info,
14463  VmaDefragmentationStats* pStats,
14464  VmaDefragmentationContext* pContext)
14465 {
14466  if(info.pAllocationsChanged != VMA_NULL)
14467  {
14468  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14469  }
14470 
14471  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14472  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14473 
14474  (*pContext)->AddPools(info.poolCount, info.pPools);
14475  (*pContext)->AddAllocations(
14477 
14478  VkResult res = (*pContext)->Defragment(
14481  info.commandBuffer, pStats);
14482 
14483  if(res != VK_NOT_READY)
14484  {
14485  vma_delete(this, *pContext);
14486  *pContext = VMA_NULL;
14487  }
14488 
14489  return res;
14490 }
14491 
14492 VkResult VmaAllocator_T::DefragmentationEnd(
14493  VmaDefragmentationContext context)
14494 {
14495  vma_delete(this, context);
14496  return VK_SUCCESS;
14497 }
14498 
14499 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14500 {
14501  if(hAllocation->CanBecomeLost())
14502  {
14503  /*
14504  Warning: This is a carefully designed algorithm.
14505  Do not modify unless you really know what you're doing :)
14506  */
14507  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14508  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14509  for(;;)
14510  {
14511  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14512  {
14513  pAllocationInfo->memoryType = UINT32_MAX;
14514  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14515  pAllocationInfo->offset = 0;
14516  pAllocationInfo->size = hAllocation->GetSize();
14517  pAllocationInfo->pMappedData = VMA_NULL;
14518  pAllocationInfo->pUserData = hAllocation->GetUserData();
14519  return;
14520  }
14521  else if(localLastUseFrameIndex == localCurrFrameIndex)
14522  {
14523  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14524  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14525  pAllocationInfo->offset = hAllocation->GetOffset();
14526  pAllocationInfo->size = hAllocation->GetSize();
14527  pAllocationInfo->pMappedData = VMA_NULL;
14528  pAllocationInfo->pUserData = hAllocation->GetUserData();
14529  return;
14530  }
14531  else // Last use time earlier than current time.
14532  {
14533  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14534  {
14535  localLastUseFrameIndex = localCurrFrameIndex;
14536  }
14537  }
14538  }
14539  }
14540  else
14541  {
14542 #if VMA_STATS_STRING_ENABLED
14543  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14544  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14545  for(;;)
14546  {
14547  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14548  if(localLastUseFrameIndex == localCurrFrameIndex)
14549  {
14550  break;
14551  }
14552  else // Last use time earlier than current time.
14553  {
14554  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14555  {
14556  localLastUseFrameIndex = localCurrFrameIndex;
14557  }
14558  }
14559  }
14560 #endif
14561 
14562  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14563  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14564  pAllocationInfo->offset = hAllocation->GetOffset();
14565  pAllocationInfo->size = hAllocation->GetSize();
14566  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14567  pAllocationInfo->pUserData = hAllocation->GetUserData();
14568  }
14569 }
14570 
14571 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14572 {
14573  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14574  if(hAllocation->CanBecomeLost())
14575  {
14576  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14577  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14578  for(;;)
14579  {
14580  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14581  {
14582  return false;
14583  }
14584  else if(localLastUseFrameIndex == localCurrFrameIndex)
14585  {
14586  return true;
14587  }
14588  else // Last use time earlier than current time.
14589  {
14590  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14591  {
14592  localLastUseFrameIndex = localCurrFrameIndex;
14593  }
14594  }
14595  }
14596  }
14597  else
14598  {
14599 #if VMA_STATS_STRING_ENABLED
14600  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14601  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14602  for(;;)
14603  {
14604  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14605  if(localLastUseFrameIndex == localCurrFrameIndex)
14606  {
14607  break;
14608  }
14609  else // Last use time earlier than current time.
14610  {
14611  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14612  {
14613  localLastUseFrameIndex = localCurrFrameIndex;
14614  }
14615  }
14616  }
14617 #endif
14618 
14619  return true;
14620  }
14621 }
14622 
14623 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14624 {
14625  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14626 
14627  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14628 
14629  if(newCreateInfo.maxBlockCount == 0)
14630  {
14631  newCreateInfo.maxBlockCount = SIZE_MAX;
14632  }
14633  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14634  {
14635  return VK_ERROR_INITIALIZATION_FAILED;
14636  }
14637 
14638  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14639 
14640  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14641 
14642  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14643  if(res != VK_SUCCESS)
14644  {
14645  vma_delete(this, *pPool);
14646  *pPool = VMA_NULL;
14647  return res;
14648  }
14649 
14650  // Add to m_Pools.
14651  {
14652  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14653  (*pPool)->SetId(m_NextPoolId++);
14654  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14655  }
14656 
14657  return VK_SUCCESS;
14658 }
14659 
14660 void VmaAllocator_T::DestroyPool(VmaPool pool)
14661 {
14662  // Remove from m_Pools.
14663  {
14664  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14665  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
14666  VMA_ASSERT(success && "Pool not found in Allocator.");
14667  }
14668 
14669  vma_delete(this, pool);
14670 }
14671 
14672 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
14673 {
14674  pool->m_BlockVector.GetPoolStats(pPoolStats);
14675 }
14676 
14677 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
14678 {
14679  m_CurrentFrameIndex.store(frameIndex);
14680 }
14681 
14682 void VmaAllocator_T::MakePoolAllocationsLost(
14683  VmaPool hPool,
14684  size_t* pLostAllocationCount)
14685 {
14686  hPool->m_BlockVector.MakePoolAllocationsLost(
14687  m_CurrentFrameIndex.load(),
14688  pLostAllocationCount);
14689 }
14690 
14691 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
14692 {
14693  return hPool->m_BlockVector.CheckCorruption();
14694 }
14695 
14696 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
14697 {
14698  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
14699 
14700  // Process default pools.
14701  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14702  {
14703  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
14704  {
14705  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14706  VMA_ASSERT(pBlockVector);
14707  VkResult localRes = pBlockVector->CheckCorruption();
14708  switch(localRes)
14709  {
14710  case VK_ERROR_FEATURE_NOT_PRESENT:
14711  break;
14712  case VK_SUCCESS:
14713  finalRes = VK_SUCCESS;
14714  break;
14715  default:
14716  return localRes;
14717  }
14718  }
14719  }
14720 
14721  // Process custom pools.
14722  {
14723  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14724  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14725  {
14726  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
14727  {
14728  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
14729  switch(localRes)
14730  {
14731  case VK_ERROR_FEATURE_NOT_PRESENT:
14732  break;
14733  case VK_SUCCESS:
14734  finalRes = VK_SUCCESS;
14735  break;
14736  default:
14737  return localRes;
14738  }
14739  }
14740  }
14741  }
14742 
14743  return finalRes;
14744 }
14745 
14746 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
14747 {
14748  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
14749  (*pAllocation)->InitLost();
14750 }
14751 
14752 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
14753 {
14754  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
14755 
14756  VkResult res;
14757  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14758  {
14759  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14760  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
14761  {
14762  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14763  if(res == VK_SUCCESS)
14764  {
14765  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
14766  }
14767  }
14768  else
14769  {
14770  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
14771  }
14772  }
14773  else
14774  {
14775  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14776  }
14777 
14778  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
14779  {
14780  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
14781  }
14782 
14783  return res;
14784 }
14785 
14786 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
14787 {
14788  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
14789  {
14790  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
14791  }
14792 
14793  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
14794 
14795  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
14796  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14797  {
14798  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14799  m_HeapSizeLimit[heapIndex] += size;
14800  }
14801 }
14802 
14803 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
14804 {
14805  if(hAllocation->CanBecomeLost())
14806  {
14807  return VK_ERROR_MEMORY_MAP_FAILED;
14808  }
14809 
14810  switch(hAllocation->GetType())
14811  {
14812  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14813  {
14814  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14815  char *pBytes = VMA_NULL;
14816  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
14817  if(res == VK_SUCCESS)
14818  {
14819  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
14820  hAllocation->BlockAllocMap();
14821  }
14822  return res;
14823  }
14824  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14825  return hAllocation->DedicatedAllocMap(this, ppData);
14826  default:
14827  VMA_ASSERT(0);
14828  return VK_ERROR_MEMORY_MAP_FAILED;
14829  }
14830 }
14831 
14832 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
14833 {
14834  switch(hAllocation->GetType())
14835  {
14836  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14837  {
14838  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14839  hAllocation->BlockAllocUnmap();
14840  pBlock->Unmap(this, 1);
14841  }
14842  break;
14843  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14844  hAllocation->DedicatedAllocUnmap(this);
14845  break;
14846  default:
14847  VMA_ASSERT(0);
14848  }
14849 }
14850 
14851 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
14852 {
14853  VkResult res = VK_SUCCESS;
14854  switch(hAllocation->GetType())
14855  {
14856  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14857  res = GetVulkanFunctions().vkBindBufferMemory(
14858  m_hDevice,
14859  hBuffer,
14860  hAllocation->GetMemory(),
14861  0); //memoryOffset
14862  break;
14863  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14864  {
14865  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14866  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
14867  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
14868  break;
14869  }
14870  default:
14871  VMA_ASSERT(0);
14872  }
14873  return res;
14874 }
14875 
14876 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
14877 {
14878  VkResult res = VK_SUCCESS;
14879  switch(hAllocation->GetType())
14880  {
14881  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14882  res = GetVulkanFunctions().vkBindImageMemory(
14883  m_hDevice,
14884  hImage,
14885  hAllocation->GetMemory(),
14886  0); //memoryOffset
14887  break;
14888  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14889  {
14890  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14891  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
14892  res = pBlock->BindImageMemory(this, hAllocation, hImage);
14893  break;
14894  }
14895  default:
14896  VMA_ASSERT(0);
14897  }
14898  return res;
14899 }
14900 
14901 void VmaAllocator_T::FlushOrInvalidateAllocation(
14902  VmaAllocation hAllocation,
14903  VkDeviceSize offset, VkDeviceSize size,
14904  VMA_CACHE_OPERATION op)
14905 {
14906  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
14907  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
14908  {
14909  const VkDeviceSize allocationSize = hAllocation->GetSize();
14910  VMA_ASSERT(offset <= allocationSize);
14911 
14912  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
14913 
14914  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
14915  memRange.memory = hAllocation->GetMemory();
14916 
14917  switch(hAllocation->GetType())
14918  {
14919  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14920  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
14921  if(size == VK_WHOLE_SIZE)
14922  {
14923  memRange.size = allocationSize - memRange.offset;
14924  }
14925  else
14926  {
14927  VMA_ASSERT(offset + size <= allocationSize);
14928  memRange.size = VMA_MIN(
14929  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
14930  allocationSize - memRange.offset);
14931  }
14932  break;
14933 
14934  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14935  {
14936  // 1. Still within this allocation.
14937  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
14938  if(size == VK_WHOLE_SIZE)
14939  {
14940  size = allocationSize - offset;
14941  }
14942  else
14943  {
14944  VMA_ASSERT(offset + size <= allocationSize);
14945  }
14946  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
14947 
14948  // 2. Adjust to whole block.
14949  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
14950  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
14951  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
14952  memRange.offset += allocationOffset;
14953  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
14954 
14955  break;
14956  }
14957 
14958  default:
14959  VMA_ASSERT(0);
14960  }
14961 
14962  switch(op)
14963  {
14964  case VMA_CACHE_FLUSH:
14965  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
14966  break;
14967  case VMA_CACHE_INVALIDATE:
14968  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
14969  break;
14970  default:
14971  VMA_ASSERT(0);
14972  }
14973  }
14974  // else: Just ignore this call.
14975 }
14976 
14977 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
14978 {
14979  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
14980 
14981  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14982  {
14983  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14984  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14985  VMA_ASSERT(pDedicatedAllocations);
14986  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
14987  VMA_ASSERT(success);
14988  }
14989 
14990  VkDeviceMemory hMemory = allocation->GetMemory();
14991 
14992  /*
14993  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14994  before vkFreeMemory.
14995 
14996  if(allocation->GetMappedData() != VMA_NULL)
14997  {
14998  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14999  }
15000  */
15001 
15002  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15003 
15004  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15005 }
15006 
15007 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15008 {
15009  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15010  !hAllocation->CanBecomeLost() &&
15011  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15012  {
15013  void* pData = VMA_NULL;
15014  VkResult res = Map(hAllocation, &pData);
15015  if(res == VK_SUCCESS)
15016  {
15017  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15018  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15019  Unmap(hAllocation);
15020  }
15021  else
15022  {
15023  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15024  }
15025  }
15026 }
15027 
15028 #if VMA_STATS_STRING_ENABLED
15029 
15030 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15031 {
15032  bool dedicatedAllocationsStarted = false;
15033  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15034  {
15035  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15036  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15037  VMA_ASSERT(pDedicatedAllocVector);
15038  if(pDedicatedAllocVector->empty() == false)
15039  {
15040  if(dedicatedAllocationsStarted == false)
15041  {
15042  dedicatedAllocationsStarted = true;
15043  json.WriteString("DedicatedAllocations");
15044  json.BeginObject();
15045  }
15046 
15047  json.BeginString("Type ");
15048  json.ContinueString(memTypeIndex);
15049  json.EndString();
15050 
15051  json.BeginArray();
15052 
15053  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15054  {
15055  json.BeginObject(true);
15056  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15057  hAlloc->PrintParameters(json);
15058  json.EndObject();
15059  }
15060 
15061  json.EndArray();
15062  }
15063  }
15064  if(dedicatedAllocationsStarted)
15065  {
15066  json.EndObject();
15067  }
15068 
15069  {
15070  bool allocationsStarted = false;
15071  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15072  {
15073  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15074  {
15075  if(allocationsStarted == false)
15076  {
15077  allocationsStarted = true;
15078  json.WriteString("DefaultPools");
15079  json.BeginObject();
15080  }
15081 
15082  json.BeginString("Type ");
15083  json.ContinueString(memTypeIndex);
15084  json.EndString();
15085 
15086  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15087  }
15088  }
15089  if(allocationsStarted)
15090  {
15091  json.EndObject();
15092  }
15093  }
15094 
15095  // Custom pools
15096  {
15097  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15098  const size_t poolCount = m_Pools.size();
15099  if(poolCount > 0)
15100  {
15101  json.WriteString("Pools");
15102  json.BeginObject();
15103  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15104  {
15105  json.BeginString();
15106  json.ContinueString(m_Pools[poolIndex]->GetId());
15107  json.EndString();
15108 
15109  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15110  }
15111  json.EndObject();
15112  }
15113  }
15114 }
15115 
15116 #endif // #if VMA_STATS_STRING_ENABLED
15117 
15119 // Public interface
15120 
15121 VkResult vmaCreateAllocator(
15122  const VmaAllocatorCreateInfo* pCreateInfo,
15123  VmaAllocator* pAllocator)
15124 {
15125  VMA_ASSERT(pCreateInfo && pAllocator);
15126  VMA_DEBUG_LOG("vmaCreateAllocator");
15127  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15128  return (*pAllocator)->Init(pCreateInfo);
15129 }
15130 
15131 void vmaDestroyAllocator(
15132  VmaAllocator allocator)
15133 {
15134  if(allocator != VK_NULL_HANDLE)
15135  {
15136  VMA_DEBUG_LOG("vmaDestroyAllocator");
15137  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15138  vma_delete(&allocationCallbacks, allocator);
15139  }
15140 }
15141 
15143  VmaAllocator allocator,
15144  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15145 {
15146  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15147  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15148 }
15149 
15151  VmaAllocator allocator,
15152  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15153 {
15154  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15155  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15156 }
15157 
15159  VmaAllocator allocator,
15160  uint32_t memoryTypeIndex,
15161  VkMemoryPropertyFlags* pFlags)
15162 {
15163  VMA_ASSERT(allocator && pFlags);
15164  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15165  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15166 }
15167 
15169  VmaAllocator allocator,
15170  uint32_t frameIndex)
15171 {
15172  VMA_ASSERT(allocator);
15173  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15174 
15175  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15176 
15177  allocator->SetCurrentFrameIndex(frameIndex);
15178 }
15179 
15180 void vmaCalculateStats(
15181  VmaAllocator allocator,
15182  VmaStats* pStats)
15183 {
15184  VMA_ASSERT(allocator && pStats);
15185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15186  allocator->CalculateStats(pStats);
15187 }
15188 
15189 #if VMA_STATS_STRING_ENABLED
15190 
15191 void vmaBuildStatsString(
15192  VmaAllocator allocator,
15193  char** ppStatsString,
15194  VkBool32 detailedMap)
15195 {
15196  VMA_ASSERT(allocator && ppStatsString);
15197  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15198 
15199  VmaStringBuilder sb(allocator);
15200  {
15201  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15202  json.BeginObject();
15203 
15204  VmaStats stats;
15205  allocator->CalculateStats(&stats);
15206 
15207  json.WriteString("Total");
15208  VmaPrintStatInfo(json, stats.total);
15209 
15210  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15211  {
15212  json.BeginString("Heap ");
15213  json.ContinueString(heapIndex);
15214  json.EndString();
15215  json.BeginObject();
15216 
15217  json.WriteString("Size");
15218  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15219 
15220  json.WriteString("Flags");
15221  json.BeginArray(true);
15222  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15223  {
15224  json.WriteString("DEVICE_LOCAL");
15225  }
15226  json.EndArray();
15227 
15228  if(stats.memoryHeap[heapIndex].blockCount > 0)
15229  {
15230  json.WriteString("Stats");
15231  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15232  }
15233 
15234  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15235  {
15236  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15237  {
15238  json.BeginString("Type ");
15239  json.ContinueString(typeIndex);
15240  json.EndString();
15241 
15242  json.BeginObject();
15243 
15244  json.WriteString("Flags");
15245  json.BeginArray(true);
15246  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15247  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15248  {
15249  json.WriteString("DEVICE_LOCAL");
15250  }
15251  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15252  {
15253  json.WriteString("HOST_VISIBLE");
15254  }
15255  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15256  {
15257  json.WriteString("HOST_COHERENT");
15258  }
15259  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15260  {
15261  json.WriteString("HOST_CACHED");
15262  }
15263  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15264  {
15265  json.WriteString("LAZILY_ALLOCATED");
15266  }
15267  json.EndArray();
15268 
15269  if(stats.memoryType[typeIndex].blockCount > 0)
15270  {
15271  json.WriteString("Stats");
15272  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15273  }
15274 
15275  json.EndObject();
15276  }
15277  }
15278 
15279  json.EndObject();
15280  }
15281  if(detailedMap == VK_TRUE)
15282  {
15283  allocator->PrintDetailedMap(json);
15284  }
15285 
15286  json.EndObject();
15287  }
15288 
15289  const size_t len = sb.GetLength();
15290  char* const pChars = vma_new_array(allocator, char, len + 1);
15291  if(len > 0)
15292  {
15293  memcpy(pChars, sb.GetData(), len);
15294  }
15295  pChars[len] = '\0';
15296  *ppStatsString = pChars;
15297 }
15298 
15299 void vmaFreeStatsString(
15300  VmaAllocator allocator,
15301  char* pStatsString)
15302 {
15303  if(pStatsString != VMA_NULL)
15304  {
15305  VMA_ASSERT(allocator);
15306  size_t len = strlen(pStatsString);
15307  vma_delete_array(allocator, pStatsString, len + 1);
15308  }
15309 }
15310 
15311 #endif // #if VMA_STATS_STRING_ENABLED
15312 
15313 /*
15314 This function is not protected by any mutex because it just reads immutable data.
15315 */
15316 VkResult vmaFindMemoryTypeIndex(
15317  VmaAllocator allocator,
15318  uint32_t memoryTypeBits,
15319  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15320  uint32_t* pMemoryTypeIndex)
15321 {
15322  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15323  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15324  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15325 
15326  if(pAllocationCreateInfo->memoryTypeBits != 0)
15327  {
15328  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15329  }
15330 
15331  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15332  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15333 
15334  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15335  if(mapped)
15336  {
15337  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15338  }
15339 
15340  // Convert usage to requiredFlags and preferredFlags.
15341  switch(pAllocationCreateInfo->usage)
15342  {
15344  break;
15346  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15347  {
15348  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15349  }
15350  break;
15352  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15353  break;
15355  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15356  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15357  {
15358  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15359  }
15360  break;
15362  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15363  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15364  break;
15365  default:
15366  break;
15367  }
15368 
15369  *pMemoryTypeIndex = UINT32_MAX;
15370  uint32_t minCost = UINT32_MAX;
15371  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15372  memTypeIndex < allocator->GetMemoryTypeCount();
15373  ++memTypeIndex, memTypeBit <<= 1)
15374  {
15375  // This memory type is acceptable according to memoryTypeBits bitmask.
15376  if((memTypeBit & memoryTypeBits) != 0)
15377  {
15378  const VkMemoryPropertyFlags currFlags =
15379  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15380  // This memory type contains requiredFlags.
15381  if((requiredFlags & ~currFlags) == 0)
15382  {
15383  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15384  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15385  // Remember memory type with lowest cost.
15386  if(currCost < minCost)
15387  {
15388  *pMemoryTypeIndex = memTypeIndex;
15389  if(currCost == 0)
15390  {
15391  return VK_SUCCESS;
15392  }
15393  minCost = currCost;
15394  }
15395  }
15396  }
15397  }
15398  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15399 }
15400 
15402  VmaAllocator allocator,
15403  const VkBufferCreateInfo* pBufferCreateInfo,
15404  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15405  uint32_t* pMemoryTypeIndex)
15406 {
15407  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15408  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15409  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15410  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15411 
15412  const VkDevice hDev = allocator->m_hDevice;
15413  VkBuffer hBuffer = VK_NULL_HANDLE;
15414  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15415  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15416  if(res == VK_SUCCESS)
15417  {
15418  VkMemoryRequirements memReq = {};
15419  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15420  hDev, hBuffer, &memReq);
15421 
15422  res = vmaFindMemoryTypeIndex(
15423  allocator,
15424  memReq.memoryTypeBits,
15425  pAllocationCreateInfo,
15426  pMemoryTypeIndex);
15427 
15428  allocator->GetVulkanFunctions().vkDestroyBuffer(
15429  hDev, hBuffer, allocator->GetAllocationCallbacks());
15430  }
15431  return res;
15432 }
15433 
15435  VmaAllocator allocator,
15436  const VkImageCreateInfo* pImageCreateInfo,
15437  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15438  uint32_t* pMemoryTypeIndex)
15439 {
15440  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15441  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15442  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15443  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15444 
15445  const VkDevice hDev = allocator->m_hDevice;
15446  VkImage hImage = VK_NULL_HANDLE;
15447  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15448  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15449  if(res == VK_SUCCESS)
15450  {
15451  VkMemoryRequirements memReq = {};
15452  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15453  hDev, hImage, &memReq);
15454 
15455  res = vmaFindMemoryTypeIndex(
15456  allocator,
15457  memReq.memoryTypeBits,
15458  pAllocationCreateInfo,
15459  pMemoryTypeIndex);
15460 
15461  allocator->GetVulkanFunctions().vkDestroyImage(
15462  hDev, hImage, allocator->GetAllocationCallbacks());
15463  }
15464  return res;
15465 }
15466 
15467 VkResult vmaCreatePool(
15468  VmaAllocator allocator,
15469  const VmaPoolCreateInfo* pCreateInfo,
15470  VmaPool* pPool)
15471 {
15472  VMA_ASSERT(allocator && pCreateInfo && pPool);
15473 
15474  VMA_DEBUG_LOG("vmaCreatePool");
15475 
15476  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15477 
15478  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15479 
15480 #if VMA_RECORDING_ENABLED
15481  if(allocator->GetRecorder() != VMA_NULL)
15482  {
15483  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15484  }
15485 #endif
15486 
15487  return res;
15488 }
15489 
15490 void vmaDestroyPool(
15491  VmaAllocator allocator,
15492  VmaPool pool)
15493 {
15494  VMA_ASSERT(allocator);
15495 
15496  if(pool == VK_NULL_HANDLE)
15497  {
15498  return;
15499  }
15500 
15501  VMA_DEBUG_LOG("vmaDestroyPool");
15502 
15503  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15504 
15505 #if VMA_RECORDING_ENABLED
15506  if(allocator->GetRecorder() != VMA_NULL)
15507  {
15508  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15509  }
15510 #endif
15511 
15512  allocator->DestroyPool(pool);
15513 }
15514 
15515 void vmaGetPoolStats(
15516  VmaAllocator allocator,
15517  VmaPool pool,
15518  VmaPoolStats* pPoolStats)
15519 {
15520  VMA_ASSERT(allocator && pool && pPoolStats);
15521 
15522  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15523 
15524  allocator->GetPoolStats(pool, pPoolStats);
15525 }
15526 
15528  VmaAllocator allocator,
15529  VmaPool pool,
15530  size_t* pLostAllocationCount)
15531 {
15532  VMA_ASSERT(allocator && pool);
15533 
15534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15535 
15536 #if VMA_RECORDING_ENABLED
15537  if(allocator->GetRecorder() != VMA_NULL)
15538  {
15539  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15540  }
15541 #endif
15542 
15543  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15544 }
15545 
15546 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15547 {
15548  VMA_ASSERT(allocator && pool);
15549 
15550  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15551 
15552  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15553 
15554  return allocator->CheckPoolCorruption(pool);
15555 }
15556 
15557 VkResult vmaAllocateMemory(
15558  VmaAllocator allocator,
15559  const VkMemoryRequirements* pVkMemoryRequirements,
15560  const VmaAllocationCreateInfo* pCreateInfo,
15561  VmaAllocation* pAllocation,
15562  VmaAllocationInfo* pAllocationInfo)
15563 {
15564  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15565 
15566  VMA_DEBUG_LOG("vmaAllocateMemory");
15567 
15568  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15569 
15570  VkResult result = allocator->AllocateMemory(
15571  *pVkMemoryRequirements,
15572  false, // requiresDedicatedAllocation
15573  false, // prefersDedicatedAllocation
15574  VK_NULL_HANDLE, // dedicatedBuffer
15575  VK_NULL_HANDLE, // dedicatedImage
15576  *pCreateInfo,
15577  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15578  pAllocation);
15579 
15580 #if VMA_RECORDING_ENABLED
15581  if(allocator->GetRecorder() != VMA_NULL)
15582  {
15583  allocator->GetRecorder()->RecordAllocateMemory(
15584  allocator->GetCurrentFrameIndex(),
15585  *pVkMemoryRequirements,
15586  *pCreateInfo,
15587  *pAllocation);
15588  }
15589 #endif
15590 
15591  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15592  {
15593  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15594  }
15595 
15596  return result;
15597 }
15598 
15600  VmaAllocator allocator,
15601  VkBuffer buffer,
15602  const VmaAllocationCreateInfo* pCreateInfo,
15603  VmaAllocation* pAllocation,
15604  VmaAllocationInfo* pAllocationInfo)
15605 {
15606  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15607 
15608  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
15609 
15610  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15611 
15612  VkMemoryRequirements vkMemReq = {};
15613  bool requiresDedicatedAllocation = false;
15614  bool prefersDedicatedAllocation = false;
15615  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
15616  requiresDedicatedAllocation,
15617  prefersDedicatedAllocation);
15618 
15619  VkResult result = allocator->AllocateMemory(
15620  vkMemReq,
15621  requiresDedicatedAllocation,
15622  prefersDedicatedAllocation,
15623  buffer, // dedicatedBuffer
15624  VK_NULL_HANDLE, // dedicatedImage
15625  *pCreateInfo,
15626  VMA_SUBALLOCATION_TYPE_BUFFER,
15627  pAllocation);
15628 
15629 #if VMA_RECORDING_ENABLED
15630  if(allocator->GetRecorder() != VMA_NULL)
15631  {
15632  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
15633  allocator->GetCurrentFrameIndex(),
15634  vkMemReq,
15635  requiresDedicatedAllocation,
15636  prefersDedicatedAllocation,
15637  *pCreateInfo,
15638  *pAllocation);
15639  }
15640 #endif
15641 
15642  if(pAllocationInfo && result == VK_SUCCESS)
15643  {
15644  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15645  }
15646 
15647  return result;
15648 }
15649 
15650 VkResult vmaAllocateMemoryForImage(
15651  VmaAllocator allocator,
15652  VkImage image,
15653  const VmaAllocationCreateInfo* pCreateInfo,
15654  VmaAllocation* pAllocation,
15655  VmaAllocationInfo* pAllocationInfo)
15656 {
15657  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15658 
15659  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
15660 
15661  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15662 
15663  VkMemoryRequirements vkMemReq = {};
15664  bool requiresDedicatedAllocation = false;
15665  bool prefersDedicatedAllocation = false;
15666  allocator->GetImageMemoryRequirements(image, vkMemReq,
15667  requiresDedicatedAllocation, prefersDedicatedAllocation);
15668 
15669  VkResult result = allocator->AllocateMemory(
15670  vkMemReq,
15671  requiresDedicatedAllocation,
15672  prefersDedicatedAllocation,
15673  VK_NULL_HANDLE, // dedicatedBuffer
15674  image, // dedicatedImage
15675  *pCreateInfo,
15676  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
15677  pAllocation);
15678 
15679 #if VMA_RECORDING_ENABLED
15680  if(allocator->GetRecorder() != VMA_NULL)
15681  {
15682  allocator->GetRecorder()->RecordAllocateMemoryForImage(
15683  allocator->GetCurrentFrameIndex(),
15684  vkMemReq,
15685  requiresDedicatedAllocation,
15686  prefersDedicatedAllocation,
15687  *pCreateInfo,
15688  *pAllocation);
15689  }
15690 #endif
15691 
15692  if(pAllocationInfo && result == VK_SUCCESS)
15693  {
15694  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15695  }
15696 
15697  return result;
15698 }
15699 
15700 void vmaFreeMemory(
15701  VmaAllocator allocator,
15702  VmaAllocation allocation)
15703 {
15704  VMA_ASSERT(allocator);
15705 
15706  if(allocation == VK_NULL_HANDLE)
15707  {
15708  return;
15709  }
15710 
15711  VMA_DEBUG_LOG("vmaFreeMemory");
15712 
15713  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15714 
15715 #if VMA_RECORDING_ENABLED
15716  if(allocator->GetRecorder() != VMA_NULL)
15717  {
15718  allocator->GetRecorder()->RecordFreeMemory(
15719  allocator->GetCurrentFrameIndex(),
15720  allocation);
15721  }
15722 #endif
15723 
15724  allocator->FreeMemory(allocation);
15725 }
15726 
15727 VkResult vmaResizeAllocation(
15728  VmaAllocator allocator,
15729  VmaAllocation allocation,
15730  VkDeviceSize newSize)
15731 {
15732  VMA_ASSERT(allocator && allocation);
15733 
15734  VMA_DEBUG_LOG("vmaResizeAllocation");
15735 
15736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15737 
15738 #if VMA_RECORDING_ENABLED
15739  if(allocator->GetRecorder() != VMA_NULL)
15740  {
15741  allocator->GetRecorder()->RecordResizeAllocation(
15742  allocator->GetCurrentFrameIndex(),
15743  allocation,
15744  newSize);
15745  }
15746 #endif
15747 
15748  return allocator->ResizeAllocation(allocation, newSize);
15749 }
15750 
15752  VmaAllocator allocator,
15753  VmaAllocation allocation,
15754  VmaAllocationInfo* pAllocationInfo)
15755 {
15756  VMA_ASSERT(allocator && allocation && pAllocationInfo);
15757 
15758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15759 
15760 #if VMA_RECORDING_ENABLED
15761  if(allocator->GetRecorder() != VMA_NULL)
15762  {
15763  allocator->GetRecorder()->RecordGetAllocationInfo(
15764  allocator->GetCurrentFrameIndex(),
15765  allocation);
15766  }
15767 #endif
15768 
15769  allocator->GetAllocationInfo(allocation, pAllocationInfo);
15770 }
15771 
15772 VkBool32 vmaTouchAllocation(
15773  VmaAllocator allocator,
15774  VmaAllocation allocation)
15775 {
15776  VMA_ASSERT(allocator && allocation);
15777 
15778  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15779 
15780 #if VMA_RECORDING_ENABLED
15781  if(allocator->GetRecorder() != VMA_NULL)
15782  {
15783  allocator->GetRecorder()->RecordTouchAllocation(
15784  allocator->GetCurrentFrameIndex(),
15785  allocation);
15786  }
15787 #endif
15788 
15789  return allocator->TouchAllocation(allocation);
15790 }
15791 
15793  VmaAllocator allocator,
15794  VmaAllocation allocation,
15795  void* pUserData)
15796 {
15797  VMA_ASSERT(allocator && allocation);
15798 
15799  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15800 
15801  allocation->SetUserData(allocator, pUserData);
15802 
15803 #if VMA_RECORDING_ENABLED
15804  if(allocator->GetRecorder() != VMA_NULL)
15805  {
15806  allocator->GetRecorder()->RecordSetAllocationUserData(
15807  allocator->GetCurrentFrameIndex(),
15808  allocation,
15809  pUserData);
15810  }
15811 #endif
15812 }
15813 
15815  VmaAllocator allocator,
15816  VmaAllocation* pAllocation)
15817 {
15818  VMA_ASSERT(allocator && pAllocation);
15819 
15820  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
15821 
15822  allocator->CreateLostAllocation(pAllocation);
15823 
15824 #if VMA_RECORDING_ENABLED
15825  if(allocator->GetRecorder() != VMA_NULL)
15826  {
15827  allocator->GetRecorder()->RecordCreateLostAllocation(
15828  allocator->GetCurrentFrameIndex(),
15829  *pAllocation);
15830  }
15831 #endif
15832 }
15833 
15834 VkResult vmaMapMemory(
15835  VmaAllocator allocator,
15836  VmaAllocation allocation,
15837  void** ppData)
15838 {
15839  VMA_ASSERT(allocator && allocation && ppData);
15840 
15841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15842 
15843  VkResult res = allocator->Map(allocation, ppData);
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordMapMemory(
15849  allocator->GetCurrentFrameIndex(),
15850  allocation);
15851  }
15852 #endif
15853 
15854  return res;
15855 }
15856 
15857 void vmaUnmapMemory(
15858  VmaAllocator allocator,
15859  VmaAllocation allocation)
15860 {
15861  VMA_ASSERT(allocator && allocation);
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865 #if VMA_RECORDING_ENABLED
15866  if(allocator->GetRecorder() != VMA_NULL)
15867  {
15868  allocator->GetRecorder()->RecordUnmapMemory(
15869  allocator->GetCurrentFrameIndex(),
15870  allocation);
15871  }
15872 #endif
15873 
15874  allocator->Unmap(allocation);
15875 }
15876 
15877 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15878 {
15879  VMA_ASSERT(allocator && allocation);
15880 
15881  VMA_DEBUG_LOG("vmaFlushAllocation");
15882 
15883  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15884 
15885  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
15886 
15887 #if VMA_RECORDING_ENABLED
15888  if(allocator->GetRecorder() != VMA_NULL)
15889  {
15890  allocator->GetRecorder()->RecordFlushAllocation(
15891  allocator->GetCurrentFrameIndex(),
15892  allocation, offset, size);
15893  }
15894 #endif
15895 }
15896 
15897 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15898 {
15899  VMA_ASSERT(allocator && allocation);
15900 
15901  VMA_DEBUG_LOG("vmaInvalidateAllocation");
15902 
15903  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15904 
15905  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
15906 
15907 #if VMA_RECORDING_ENABLED
15908  if(allocator->GetRecorder() != VMA_NULL)
15909  {
15910  allocator->GetRecorder()->RecordInvalidateAllocation(
15911  allocator->GetCurrentFrameIndex(),
15912  allocation, offset, size);
15913  }
15914 #endif
15915 }
15916 
15917 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
15918 {
15919  VMA_ASSERT(allocator);
15920 
15921  VMA_DEBUG_LOG("vmaCheckCorruption");
15922 
15923  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15924 
15925  return allocator->CheckCorruption(memoryTypeBits);
15926 }
15927 
15928 VkResult vmaDefragment(
15929  VmaAllocator allocator,
15930  VmaAllocation* pAllocations,
15931  size_t allocationCount,
15932  VkBool32* pAllocationsChanged,
15933  const VmaDefragmentationInfo *pDefragmentationInfo,
15934  VmaDefragmentationStats* pDefragmentationStats)
15935 {
15936  // Deprecated interface, reimplemented using new one.
15937 
15938  VmaDefragmentationInfo2 info2 = {};
15939  info2.allocationCount = (uint32_t)allocationCount;
15940  info2.pAllocations = pAllocations;
15941  info2.pAllocationsChanged = pAllocationsChanged;
15942  if(pDefragmentationInfo != VMA_NULL)
15943  {
15944  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
15945  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
15946  }
15947  else
15948  {
15949  info2.maxCpuAllocationsToMove = UINT32_MAX;
15950  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
15951  }
15952  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
15953 
15955  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
15956  if(res == VK_NOT_READY)
15957  {
15958  res = vmaDefragmentationEnd( allocator, ctx);
15959  }
15960  return res;
15961 }
15962 
15963 VkResult vmaDefragmentationBegin(
15964  VmaAllocator allocator,
15965  const VmaDefragmentationInfo2* pInfo,
15966  VmaDefragmentationStats* pStats,
15967  VmaDefragmentationContext *pContext)
15968 {
15969  VMA_ASSERT(allocator && pInfo && pContext);
15970  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
15971  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
15972 
15973  VMA_DEBUG_LOG("vmaDefragmentationBegin");
15974 
15975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15976 
15977  return allocator->DefragmentationBegin(*pInfo, pStats, pContext);
15978 }
15979 
15980 VkResult vmaDefragmentationEnd(
15981  VmaAllocator allocator,
15982  VmaDefragmentationContext context)
15983 {
15984  VMA_ASSERT(allocator);
15985 
15986  VMA_DEBUG_LOG("vmaDefragmentationEnd");
15987 
15988  if(context != VK_NULL_HANDLE)
15989  {
15990  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15991 
15992  return allocator->DefragmentationEnd(context);
15993  }
15994  else
15995  {
15996  return VK_SUCCESS;
15997  }
15998 }
15999 
16000 VkResult vmaBindBufferMemory(
16001  VmaAllocator allocator,
16002  VmaAllocation allocation,
16003  VkBuffer buffer)
16004 {
16005  VMA_ASSERT(allocator && allocation && buffer);
16006 
16007  VMA_DEBUG_LOG("vmaBindBufferMemory");
16008 
16009  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16010 
16011  return allocator->BindBufferMemory(allocation, buffer);
16012 }
16013 
16014 VkResult vmaBindImageMemory(
16015  VmaAllocator allocator,
16016  VmaAllocation allocation,
16017  VkImage image)
16018 {
16019  VMA_ASSERT(allocator && allocation && image);
16020 
16021  VMA_DEBUG_LOG("vmaBindImageMemory");
16022 
16023  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16024 
16025  return allocator->BindImageMemory(allocation, image);
16026 }
16027 
16028 VkResult vmaCreateBuffer(
16029  VmaAllocator allocator,
16030  const VkBufferCreateInfo* pBufferCreateInfo,
16031  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16032  VkBuffer* pBuffer,
16033  VmaAllocation* pAllocation,
16034  VmaAllocationInfo* pAllocationInfo)
16035 {
16036  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16037 
16038  if(pBufferCreateInfo->size == 0)
16039  {
16040  return VK_ERROR_VALIDATION_FAILED_EXT;
16041  }
16042 
16043  VMA_DEBUG_LOG("vmaCreateBuffer");
16044 
16045  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16046 
16047  *pBuffer = VK_NULL_HANDLE;
16048  *pAllocation = VK_NULL_HANDLE;
16049 
16050  // 1. Create VkBuffer.
16051  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16052  allocator->m_hDevice,
16053  pBufferCreateInfo,
16054  allocator->GetAllocationCallbacks(),
16055  pBuffer);
16056  if(res >= 0)
16057  {
16058  // 2. vkGetBufferMemoryRequirements.
16059  VkMemoryRequirements vkMemReq = {};
16060  bool requiresDedicatedAllocation = false;
16061  bool prefersDedicatedAllocation = false;
16062  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16063  requiresDedicatedAllocation, prefersDedicatedAllocation);
16064 
16065  // Make sure alignment requirements for specific buffer usages reported
16066  // in Physical Device Properties are included in alignment reported by memory requirements.
16067  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16068  {
16069  VMA_ASSERT(vkMemReq.alignment %
16070  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16071  }
16072  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16073  {
16074  VMA_ASSERT(vkMemReq.alignment %
16075  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16076  }
16077  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16078  {
16079  VMA_ASSERT(vkMemReq.alignment %
16080  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16081  }
16082 
16083  // 3. Allocate memory using allocator.
16084  res = allocator->AllocateMemory(
16085  vkMemReq,
16086  requiresDedicatedAllocation,
16087  prefersDedicatedAllocation,
16088  *pBuffer, // dedicatedBuffer
16089  VK_NULL_HANDLE, // dedicatedImage
16090  *pAllocationCreateInfo,
16091  VMA_SUBALLOCATION_TYPE_BUFFER,
16092  pAllocation);
16093 
16094 #if VMA_RECORDING_ENABLED
16095  if(allocator->GetRecorder() != VMA_NULL)
16096  {
16097  allocator->GetRecorder()->RecordCreateBuffer(
16098  allocator->GetCurrentFrameIndex(),
16099  *pBufferCreateInfo,
16100  *pAllocationCreateInfo,
16101  *pAllocation);
16102  }
16103 #endif
16104 
16105  if(res >= 0)
16106  {
16107  // 3. Bind buffer with memory.
16108  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16109  if(res >= 0)
16110  {
16111  // All steps succeeded.
16112  #if VMA_STATS_STRING_ENABLED
16113  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16114  #endif
16115  if(pAllocationInfo != VMA_NULL)
16116  {
16117  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16118  }
16119 
16120  return VK_SUCCESS;
16121  }
16122  allocator->FreeMemory(*pAllocation);
16123  *pAllocation = VK_NULL_HANDLE;
16124  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16125  *pBuffer = VK_NULL_HANDLE;
16126  return res;
16127  }
16128  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16129  *pBuffer = VK_NULL_HANDLE;
16130  return res;
16131  }
16132  return res;
16133 }
16134 
16135 void vmaDestroyBuffer(
16136  VmaAllocator allocator,
16137  VkBuffer buffer,
16138  VmaAllocation allocation)
16139 {
16140  VMA_ASSERT(allocator);
16141 
16142  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16143  {
16144  return;
16145  }
16146 
16147  VMA_DEBUG_LOG("vmaDestroyBuffer");
16148 
16149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16150 
16151 #if VMA_RECORDING_ENABLED
16152  if(allocator->GetRecorder() != VMA_NULL)
16153  {
16154  allocator->GetRecorder()->RecordDestroyBuffer(
16155  allocator->GetCurrentFrameIndex(),
16156  allocation);
16157  }
16158 #endif
16159 
16160  if(buffer != VK_NULL_HANDLE)
16161  {
16162  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16163  }
16164 
16165  if(allocation != VK_NULL_HANDLE)
16166  {
16167  allocator->FreeMemory(allocation);
16168  }
16169 }
16170 
16171 VkResult vmaCreateImage(
16172  VmaAllocator allocator,
16173  const VkImageCreateInfo* pImageCreateInfo,
16174  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16175  VkImage* pImage,
16176  VmaAllocation* pAllocation,
16177  VmaAllocationInfo* pAllocationInfo)
16178 {
16179  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16180 
16181  if(pImageCreateInfo->extent.width == 0 ||
16182  pImageCreateInfo->extent.height == 0 ||
16183  pImageCreateInfo->extent.depth == 0 ||
16184  pImageCreateInfo->mipLevels == 0 ||
16185  pImageCreateInfo->arrayLayers == 0)
16186  {
16187  return VK_ERROR_VALIDATION_FAILED_EXT;
16188  }
16189 
16190  VMA_DEBUG_LOG("vmaCreateImage");
16191 
16192  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16193 
16194  *pImage = VK_NULL_HANDLE;
16195  *pAllocation = VK_NULL_HANDLE;
16196 
16197  // 1. Create VkImage.
16198  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16199  allocator->m_hDevice,
16200  pImageCreateInfo,
16201  allocator->GetAllocationCallbacks(),
16202  pImage);
16203  if(res >= 0)
16204  {
16205  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16206  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16207  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16208 
16209  // 2. Allocate memory using allocator.
16210  VkMemoryRequirements vkMemReq = {};
16211  bool requiresDedicatedAllocation = false;
16212  bool prefersDedicatedAllocation = false;
16213  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16214  requiresDedicatedAllocation, prefersDedicatedAllocation);
16215 
16216  res = allocator->AllocateMemory(
16217  vkMemReq,
16218  requiresDedicatedAllocation,
16219  prefersDedicatedAllocation,
16220  VK_NULL_HANDLE, // dedicatedBuffer
16221  *pImage, // dedicatedImage
16222  *pAllocationCreateInfo,
16223  suballocType,
16224  pAllocation);
16225 
16226 #if VMA_RECORDING_ENABLED
16227  if(allocator->GetRecorder() != VMA_NULL)
16228  {
16229  allocator->GetRecorder()->RecordCreateImage(
16230  allocator->GetCurrentFrameIndex(),
16231  *pImageCreateInfo,
16232  *pAllocationCreateInfo,
16233  *pAllocation);
16234  }
16235 #endif
16236 
16237  if(res >= 0)
16238  {
16239  // 3. Bind image with memory.
16240  res = allocator->BindImageMemory(*pAllocation, *pImage);
16241  if(res >= 0)
16242  {
16243  // All steps succeeded.
16244  #if VMA_STATS_STRING_ENABLED
16245  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16246  #endif
16247  if(pAllocationInfo != VMA_NULL)
16248  {
16249  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16250  }
16251 
16252  return VK_SUCCESS;
16253  }
16254  allocator->FreeMemory(*pAllocation);
16255  *pAllocation = VK_NULL_HANDLE;
16256  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16257  *pImage = VK_NULL_HANDLE;
16258  return res;
16259  }
16260  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16261  *pImage = VK_NULL_HANDLE;
16262  return res;
16263  }
16264  return res;
16265 }
16266 
16267 void vmaDestroyImage(
16268  VmaAllocator allocator,
16269  VkImage image,
16270  VmaAllocation allocation)
16271 {
16272  VMA_ASSERT(allocator);
16273 
16274  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16275  {
16276  return;
16277  }
16278 
16279  VMA_DEBUG_LOG("vmaDestroyImage");
16280 
16281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16282 
16283 #if VMA_RECORDING_ENABLED
16284  if(allocator->GetRecorder() != VMA_NULL)
16285  {
16286  allocator->GetRecorder()->RecordDestroyImage(
16287  allocator->GetCurrentFrameIndex(),
16288  allocation);
16289  }
16290 #endif
16291 
16292  if(image != VK_NULL_HANDLE)
16293  {
16294  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16295  }
16296  if(allocation != VK_NULL_HANDLE)
16297  {
16298  allocator->FreeMemory(allocation);
16299  }
16300 }
16301 
16302 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1727
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1618 /*
1619 Define this macro to 0/1 to disable/enable support for recording functionality,
1620 available through VmaAllocatorCreateInfo::pRecordSettings.
1621 */
1622 #ifndef VMA_RECORDING_ENABLED
1623  #ifdef _WIN32
1624  #define VMA_RECORDING_ENABLED 1
1625  #else
1626  #define VMA_RECORDING_ENABLED 0
1627  #endif
1628 #endif
1629 
1630 #ifndef NOMINMAX
1631  #define NOMINMAX // For windows.h
1632 #endif
1633 
1634 #ifndef VULKAN_H_
1635  #include <vulkan/vulkan.h>
1636 #endif
1637 
1638 #if VMA_RECORDING_ENABLED
1639  #include <windows.h>
1640 #endif
1641 
1642 #if !defined(VMA_DEDICATED_ALLOCATION)
1643  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1644  #define VMA_DEDICATED_ALLOCATION 1
1645  #else
1646  #define VMA_DEDICATED_ALLOCATION 0
1647  #endif
1648 #endif
1649 
1659 VK_DEFINE_HANDLE(VmaAllocator)
1660 
1661 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1663  VmaAllocator allocator,
1664  uint32_t memoryType,
1665  VkDeviceMemory memory,
1666  VkDeviceSize size);
1668 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1669  VmaAllocator allocator,
1670  uint32_t memoryType,
1671  VkDeviceMemory memory,
1672  VkDeviceSize size);
1673 
1687 
1717 
1720 typedef VkFlags VmaAllocatorCreateFlags;
1721 
1726 typedef struct VmaVulkanFunctions {
1727  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1728  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1729  PFN_vkAllocateMemory vkAllocateMemory;
1730  PFN_vkFreeMemory vkFreeMemory;
1731  PFN_vkMapMemory vkMapMemory;
1732  PFN_vkUnmapMemory vkUnmapMemory;
1733  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1734  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1735  PFN_vkBindBufferMemory vkBindBufferMemory;
1736  PFN_vkBindImageMemory vkBindImageMemory;
1737  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1738  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1739  PFN_vkCreateBuffer vkCreateBuffer;
1740  PFN_vkDestroyBuffer vkDestroyBuffer;
1741  PFN_vkCreateImage vkCreateImage;
1742  PFN_vkDestroyImage vkDestroyImage;
1743  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1744 #if VMA_DEDICATED_ALLOCATION
1745  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1746  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1747 #endif
1749 
1751 typedef enum VmaRecordFlagBits {
1758 
1761 typedef VkFlags VmaRecordFlags;
1762 
1764 typedef struct VmaRecordSettings
1765 {
1775  const char* pFilePath;
1777 
1780 {
1784 
1785  VkPhysicalDevice physicalDevice;
1787 
1788  VkDevice device;
1790 
1793 
1794  const VkAllocationCallbacks* pAllocationCallbacks;
1796 
1836  const VkDeviceSize* pHeapSizeLimit;
1857 
1859 VkResult vmaCreateAllocator(
1860  const VmaAllocatorCreateInfo* pCreateInfo,
1861  VmaAllocator* pAllocator);
1862 
1864 void vmaDestroyAllocator(
1865  VmaAllocator allocator);
1866 
1872  VmaAllocator allocator,
1873  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1874 
1880  VmaAllocator allocator,
1881  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1882 
1890  VmaAllocator allocator,
1891  uint32_t memoryTypeIndex,
1892  VkMemoryPropertyFlags* pFlags);
1893 
1903  VmaAllocator allocator,
1904  uint32_t frameIndex);
1905 
1908 typedef struct VmaStatInfo
1909 {
1911  uint32_t blockCount;
1917  VkDeviceSize usedBytes;
1919  VkDeviceSize unusedBytes;
1922 } VmaStatInfo;
1923 
1925 typedef struct VmaStats
1926 {
1927  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1928  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1930 } VmaStats;
1931 
1933 void vmaCalculateStats(
1934  VmaAllocator allocator,
1935  VmaStats* pStats);
1936 
1937 #define VMA_STATS_STRING_ENABLED 1
1938 
1939 #if VMA_STATS_STRING_ENABLED
1940 
1942 
1944 void vmaBuildStatsString(
1945  VmaAllocator allocator,
1946  char** ppStatsString,
1947  VkBool32 detailedMap);
1948 
1949 void vmaFreeStatsString(
1950  VmaAllocator allocator,
1951  char* pStatsString);
1952 
1953 #endif // #if VMA_STATS_STRING_ENABLED
1954 
1963 VK_DEFINE_HANDLE(VmaPool)
1964 
1965 typedef enum VmaMemoryUsage
1966 {
2015 } VmaMemoryUsage;
2016 
2031 
2086 
2102 
2112 
2119 
2123 
2125 {
2138  VkMemoryPropertyFlags requiredFlags;
2143  VkMemoryPropertyFlags preferredFlags;
2151  uint32_t memoryTypeBits;
2164  void* pUserData;
2166 
2183 VkResult vmaFindMemoryTypeIndex(
2184  VmaAllocator allocator,
2185  uint32_t memoryTypeBits,
2186  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2187  uint32_t* pMemoryTypeIndex);
2188 
2202  VmaAllocator allocator,
2203  const VkBufferCreateInfo* pBufferCreateInfo,
2204  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2205  uint32_t* pMemoryTypeIndex);
2206 
2220  VmaAllocator allocator,
2221  const VkImageCreateInfo* pImageCreateInfo,
2222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2223  uint32_t* pMemoryTypeIndex);
2224 
2245 
2262 
2273 
2279 
2282 typedef VkFlags VmaPoolCreateFlags;
2283 
2286 typedef struct VmaPoolCreateInfo {
2301  VkDeviceSize blockSize;
2330 
2333 typedef struct VmaPoolStats {
2336  VkDeviceSize size;
2339  VkDeviceSize unusedSize;
2352  VkDeviceSize unusedRangeSizeMax;
2355  size_t blockCount;
2356 } VmaPoolStats;
2357 
2364 VkResult vmaCreatePool(
2365  VmaAllocator allocator,
2366  const VmaPoolCreateInfo* pCreateInfo,
2367  VmaPool* pPool);
2368 
2371 void vmaDestroyPool(
2372  VmaAllocator allocator,
2373  VmaPool pool);
2374 
2381 void vmaGetPoolStats(
2382  VmaAllocator allocator,
2383  VmaPool pool,
2384  VmaPoolStats* pPoolStats);
2385 
2393  VmaAllocator allocator,
2394  VmaPool pool,
2395  size_t* pLostAllocationCount);
2396 
2411 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2412 
2437 VK_DEFINE_HANDLE(VmaAllocation)
2438 
2439 
2441 typedef struct VmaAllocationInfo {
2446  uint32_t memoryType;
2455  VkDeviceMemory deviceMemory;
2460  VkDeviceSize offset;
2465  VkDeviceSize size;
2479  void* pUserData;
2481 
2492 VkResult vmaAllocateMemory(
2493  VmaAllocator allocator,
2494  const VkMemoryRequirements* pVkMemoryRequirements,
2495  const VmaAllocationCreateInfo* pCreateInfo,
2496  VmaAllocation* pAllocation,
2497  VmaAllocationInfo* pAllocationInfo);
2498 
2506  VmaAllocator allocator,
2507  VkBuffer buffer,
2508  const VmaAllocationCreateInfo* pCreateInfo,
2509  VmaAllocation* pAllocation,
2510  VmaAllocationInfo* pAllocationInfo);
2511 
2513 VkResult vmaAllocateMemoryForImage(
2514  VmaAllocator allocator,
2515  VkImage image,
2516  const VmaAllocationCreateInfo* pCreateInfo,
2517  VmaAllocation* pAllocation,
2518  VmaAllocationInfo* pAllocationInfo);
2519 
2521 void vmaFreeMemory(
2522  VmaAllocator allocator,
2523  VmaAllocation allocation);
2524 
2545 VkResult vmaResizeAllocation(
2546  VmaAllocator allocator,
2547  VmaAllocation allocation,
2548  VkDeviceSize newSize);
2549 
2567  VmaAllocator allocator,
2568  VmaAllocation allocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2585 VkBool32 vmaTouchAllocation(
2586  VmaAllocator allocator,
2587  VmaAllocation allocation);
2588 
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  void* pUserData);
2606 
2618  VmaAllocator allocator,
2619  VmaAllocation* pAllocation);
2620 
2655 VkResult vmaMapMemory(
2656  VmaAllocator allocator,
2657  VmaAllocation allocation,
2658  void** ppData);
2659 
2664 void vmaUnmapMemory(
2665  VmaAllocator allocator,
2666  VmaAllocation allocation);
2667 
2680 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2681 
2694 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2695 
2712 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2713 
2720 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2721 
2722 typedef enum VmaDefragmentationFlagBits {
2726 typedef VkFlags VmaDefragmentationFlags;
2727 
2732 typedef struct VmaDefragmentationInfo2 {
2756  uint32_t poolCount;
2777  VkDeviceSize maxCpuBytesToMove;
2787  VkDeviceSize maxGpuBytesToMove;
2801  VkCommandBuffer commandBuffer;
2803 
2808 typedef struct VmaDefragmentationInfo {
2813  VkDeviceSize maxBytesToMove;
2820 
2822 typedef struct VmaDefragmentationStats {
2824  VkDeviceSize bytesMoved;
2826  VkDeviceSize bytesFreed;
2832 
2859 VkResult vmaDefragmentationBegin(
2860  VmaAllocator allocator,
2861  const VmaDefragmentationInfo2* pInfo,
2862  VmaDefragmentationStats* pStats,
2863  VmaDefragmentationContext *pContext);
2864 
2870 VkResult vmaDefragmentationEnd(
2871  VmaAllocator allocator,
2872  VmaDefragmentationContext context);
2873 
2914 VkResult vmaDefragment(
2915  VmaAllocator allocator,
2916  VmaAllocation* pAllocations,
2917  size_t allocationCount,
2918  VkBool32* pAllocationsChanged,
2919  const VmaDefragmentationInfo *pDefragmentationInfo,
2920  VmaDefragmentationStats* pDefragmentationStats);
2921 
2934 VkResult vmaBindBufferMemory(
2935  VmaAllocator allocator,
2936  VmaAllocation allocation,
2937  VkBuffer buffer);
2938 
2951 VkResult vmaBindImageMemory(
2952  VmaAllocator allocator,
2953  VmaAllocation allocation,
2954  VkImage image);
2955 
2982 VkResult vmaCreateBuffer(
2983  VmaAllocator allocator,
2984  const VkBufferCreateInfo* pBufferCreateInfo,
2985  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2986  VkBuffer* pBuffer,
2987  VmaAllocation* pAllocation,
2988  VmaAllocationInfo* pAllocationInfo);
2989 
3001 void vmaDestroyBuffer(
3002  VmaAllocator allocator,
3003  VkBuffer buffer,
3004  VmaAllocation allocation);
3005 
3007 VkResult vmaCreateImage(
3008  VmaAllocator allocator,
3009  const VkImageCreateInfo* pImageCreateInfo,
3010  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3011  VkImage* pImage,
3012  VmaAllocation* pAllocation,
3013  VmaAllocationInfo* pAllocationInfo);
3014 
3026 void vmaDestroyImage(
3027  VmaAllocator allocator,
3028  VkImage image,
3029  VmaAllocation allocation);
3030 
3031 #ifdef __cplusplus
3032 }
3033 #endif
3034 
3035 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3036 
3037 // For Visual Studio IntelliSense.
3038 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3039 #define VMA_IMPLEMENTATION
3040 #endif
3041 
3042 #ifdef VMA_IMPLEMENTATION
3043 #undef VMA_IMPLEMENTATION
3044 
3045 #include <cstdint>
3046 #include <cstdlib>
3047 #include <cstring>
3048 
3049 /*******************************************************************************
3050 CONFIGURATION SECTION
3051 
3052 Define some of these macros before each #include of this header or change them
3053 here if you need other then default behavior depending on your environment.
3054 */
3055 
3056 /*
3057 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3058 internally, like:
3059 
3060  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3061 
3062 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3063 VmaAllocatorCreateInfo::pVulkanFunctions.
3064 */
3065 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3066 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3067 #endif
3068 
3069 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3070 //#define VMA_USE_STL_CONTAINERS 1
3071 
3072 /* Set this macro to 1 to make the library including and using STL containers:
3073 std::pair, std::vector, std::list, std::unordered_map.
3074 
3075 Set it to 0 or undefined to make the library using its own implementation of
3076 the containers.
3077 */
3078 #if VMA_USE_STL_CONTAINERS
3079  #define VMA_USE_STL_VECTOR 1
3080  #define VMA_USE_STL_UNORDERED_MAP 1
3081  #define VMA_USE_STL_LIST 1
3082 #endif
3083 
3084 #ifndef VMA_USE_STL_SHARED_MUTEX
3085  // Minimum Visual Studio 2015 Update 2
3086  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3087  #define VMA_USE_STL_SHARED_MUTEX 1
3088  #endif
3089 #endif
3090 
3091 #if VMA_USE_STL_VECTOR
3092  #include <vector>
3093 #endif
3094 
3095 #if VMA_USE_STL_UNORDERED_MAP
3096  #include <unordered_map>
3097 #endif
3098 
3099 #if VMA_USE_STL_LIST
3100  #include <list>
3101 #endif
3102 
3103 /*
3104 Following headers are used in this CONFIGURATION section only, so feel free to
3105 remove them if not needed.
3106 */
3107 #include <cassert> // for assert
3108 #include <algorithm> // for min, max
3109 #include <mutex>
3110 #include <atomic> // for std::atomic
3111 
3112 #ifndef VMA_NULL
3113  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3114  #define VMA_NULL nullptr
3115 #endif
3116 
3117 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3118 #include <cstdlib>
3119 void *aligned_alloc(size_t alignment, size_t size)
3120 {
3121  // alignment must be >= sizeof(void*)
3122  if(alignment < sizeof(void*))
3123  {
3124  alignment = sizeof(void*);
3125  }
3126 
3127  return memalign(alignment, size);
3128 }
3129 #elif defined(__APPLE__) || defined(__ANDROID__)
3130 #include <cstdlib>
3131 void *aligned_alloc(size_t alignment, size_t size)
3132 {
3133  // alignment must be >= sizeof(void*)
3134  if(alignment < sizeof(void*))
3135  {
3136  alignment = sizeof(void*);
3137  }
3138 
3139  void *pointer;
3140  if(posix_memalign(&pointer, alignment, size) == 0)
3141  return pointer;
3142  return VMA_NULL;
3143 }
3144 #endif
3145 
3146 // If your compiler is not compatible with C++11 and definition of
3147 // aligned_alloc() function is missing, uncommeting following line may help:
3148 
3149 //#include <malloc.h>
3150 
3151 // Normal assert to check for programmer's errors, especially in Debug configuration.
3152 #ifndef VMA_ASSERT
3153  #ifdef _DEBUG
3154  #define VMA_ASSERT(expr) assert(expr)
3155  #else
3156  #define VMA_ASSERT(expr)
3157  #endif
3158 #endif
3159 
3160 // Assert that will be called very often, like inside data structures e.g. operator[].
3161 // Making it non-empty can make program slow.
3162 #ifndef VMA_HEAVY_ASSERT
3163  #ifdef _DEBUG
3164  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3165  #else
3166  #define VMA_HEAVY_ASSERT(expr)
3167  #endif
3168 #endif
3169 
3170 #ifndef VMA_ALIGN_OF
3171  #define VMA_ALIGN_OF(type) (__alignof(type))
3172 #endif
3173 
3174 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3175  #if defined(_WIN32)
3176  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3177  #else
3178  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3179  #endif
3180 #endif
3181 
3182 #ifndef VMA_SYSTEM_FREE
3183  #if defined(_WIN32)
3184  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3185  #else
3186  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3187  #endif
3188 #endif
3189 
3190 #ifndef VMA_MIN
3191  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3192 #endif
3193 
3194 #ifndef VMA_MAX
3195  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3196 #endif
3197 
3198 #ifndef VMA_SWAP
3199  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3200 #endif
3201 
3202 #ifndef VMA_SORT
3203  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3204 #endif
3205 
3206 #ifndef VMA_DEBUG_LOG
3207  #define VMA_DEBUG_LOG(format, ...)
3208  /*
3209  #define VMA_DEBUG_LOG(format, ...) do { \
3210  printf(format, __VA_ARGS__); \
3211  printf("\n"); \
3212  } while(false)
3213  */
3214 #endif
3215 
3216 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3217 #if VMA_STATS_STRING_ENABLED
3218  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3219  {
3220  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3221  }
3222  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3223  {
3224  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3225  }
3226  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3227  {
3228  snprintf(outStr, strLen, "%p", ptr);
3229  }
3230 #endif
3231 
3232 #ifndef VMA_MUTEX
3233  class VmaMutex
3234  {
3235  public:
3236  void Lock() { m_Mutex.lock(); }
3237  void Unlock() { m_Mutex.unlock(); }
3238  private:
3239  std::mutex m_Mutex;
3240  };
3241  #define VMA_MUTEX VmaMutex
3242 #endif
3243 
3244 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3245 #ifndef VMA_RW_MUTEX
3246  #if VMA_USE_STL_SHARED_MUTEX
3247  // Use std::shared_mutex from C++17.
3248  #include <shared_mutex>
3249  class VmaRWMutex
3250  {
3251  public:
3252  void LockRead() { m_Mutex.lock_shared(); }
3253  void UnlockRead() { m_Mutex.unlock_shared(); }
3254  void LockWrite() { m_Mutex.lock(); }
3255  void UnlockWrite() { m_Mutex.unlock(); }
3256  private:
3257  std::shared_mutex m_Mutex;
3258  };
3259  #define VMA_RW_MUTEX VmaRWMutex
3260  #elif defined(_WIN32)
3261  // Use SRWLOCK from WinAPI.
3262  class VmaRWMutex
3263  {
3264  public:
3265  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3266  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3267  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3268  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3269  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3270  private:
3271  SRWLOCK m_Lock;
3272  };
3273  #define VMA_RW_MUTEX VmaRWMutex
3274  #else
3275  // Less efficient fallback: Use normal mutex.
3276  class VmaRWMutex
3277  {
3278  public:
3279  void LockRead() { m_Mutex.Lock(); }
3280  void UnlockRead() { m_Mutex.Unlock(); }
3281  void LockWrite() { m_Mutex.Lock(); }
3282  void UnlockWrite() { m_Mutex.Unlock(); }
3283  private:
3284  VMA_MUTEX m_Mutex;
3285  };
3286  #define VMA_RW_MUTEX VmaRWMutex
3287  #endif // #if VMA_USE_STL_SHARED_MUTEX
3288 #endif // #ifndef VMA_RW_MUTEX
3289 
3290 /*
3291 If providing your own implementation, you need to implement a subset of std::atomic:
3292 
3293 - Constructor(uint32_t desired)
3294 - uint32_t load() const
3295 - void store(uint32_t desired)
3296 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3297 */
3298 #ifndef VMA_ATOMIC_UINT32
3299  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3300 #endif
3301 
3302 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3303 
3307  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3308 #endif
3309 
3310 #ifndef VMA_DEBUG_ALIGNMENT
3311 
3315  #define VMA_DEBUG_ALIGNMENT (1)
3316 #endif
3317 
3318 #ifndef VMA_DEBUG_MARGIN
3319 
3323  #define VMA_DEBUG_MARGIN (0)
3324 #endif
3325 
3326 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3327 
3331  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3332 #endif
3333 
3334 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3335 
3340  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3344 
3348  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3352 
3356  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3357 #endif
3358 
3359 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3360  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3362 #endif
3363 
3364 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3365  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3367 #endif
3368 
3369 #ifndef VMA_CLASS_NO_COPY
3370  #define VMA_CLASS_NO_COPY(className) \
3371  private: \
3372  className(const className&) = delete; \
3373  className& operator=(const className&) = delete;
3374 #endif
3375 
3376 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3377 
3378 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3379 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3380 
3381 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3382 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3383 
3384 /*******************************************************************************
3385 END OF CONFIGURATION
3386 */
3387 
3388 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3389 
3390 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3391  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3392 
3393 // Returns number of bits set to 1 in (v).
3394 static inline uint32_t VmaCountBitsSet(uint32_t v)
3395 {
3396  uint32_t c = v - ((v >> 1) & 0x55555555);
3397  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3398  c = ((c >> 4) + c) & 0x0F0F0F0F;
3399  c = ((c >> 8) + c) & 0x00FF00FF;
3400  c = ((c >> 16) + c) & 0x0000FFFF;
3401  return c;
3402 }
3403 
3404 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3405 // Use types like uint32_t, uint64_t as T.
3406 template <typename T>
3407 static inline T VmaAlignUp(T val, T align)
3408 {
3409  return (val + align - 1) / align * align;
3410 }
3411 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3412 // Use types like uint32_t, uint64_t as T.
3413 template <typename T>
3414 static inline T VmaAlignDown(T val, T align)
3415 {
3416  return val / align * align;
3417 }
3418 
3419 // Division with mathematical rounding to nearest number.
3420 template <typename T>
3421 static inline T VmaRoundDiv(T x, T y)
3422 {
3423  return (x + (y / (T)2)) / y;
3424 }
3425 
3426 /*
3427 Returns true if given number is a power of two.
3428 T must be unsigned integer number or signed integer but always nonnegative.
3429 For 0 returns true.
3430 */
3431 template <typename T>
3432 inline bool VmaIsPow2(T x)
3433 {
3434  return (x & (x-1)) == 0;
3435 }
3436 
3437 // Returns smallest power of 2 greater or equal to v.
3438 static inline uint32_t VmaNextPow2(uint32_t v)
3439 {
3440  v--;
3441  v |= v >> 1;
3442  v |= v >> 2;
3443  v |= v >> 4;
3444  v |= v >> 8;
3445  v |= v >> 16;
3446  v++;
3447  return v;
3448 }
3449 static inline uint64_t VmaNextPow2(uint64_t v)
3450 {
3451  v--;
3452  v |= v >> 1;
3453  v |= v >> 2;
3454  v |= v >> 4;
3455  v |= v >> 8;
3456  v |= v >> 16;
3457  v |= v >> 32;
3458  v++;
3459  return v;
3460 }
3461 
3462 // Returns largest power of 2 less or equal to v.
3463 static inline uint32_t VmaPrevPow2(uint32_t v)
3464 {
3465  v |= v >> 1;
3466  v |= v >> 2;
3467  v |= v >> 4;
3468  v |= v >> 8;
3469  v |= v >> 16;
3470  v = v ^ (v >> 1);
3471  return v;
3472 }
3473 static inline uint64_t VmaPrevPow2(uint64_t v)
3474 {
3475  v |= v >> 1;
3476  v |= v >> 2;
3477  v |= v >> 4;
3478  v |= v >> 8;
3479  v |= v >> 16;
3480  v |= v >> 32;
3481  v = v ^ (v >> 1);
3482  return v;
3483 }
3484 
3485 static inline bool VmaStrIsEmpty(const char* pStr)
3486 {
3487  return pStr == VMA_NULL || *pStr == '\0';
3488 }
3489 
3490 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3491 {
3492  switch(algorithm)
3493  {
3495  return "Linear";
3497  return "Buddy";
3498  case 0:
3499  return "Default";
3500  default:
3501  VMA_ASSERT(0);
3502  return "";
3503  }
3504 }
3505 
3506 #ifndef VMA_SORT
3507 
3508 template<typename Iterator, typename Compare>
3509 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3510 {
3511  Iterator centerValue = end; --centerValue;
3512  Iterator insertIndex = beg;
3513  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3514  {
3515  if(cmp(*memTypeIndex, *centerValue))
3516  {
3517  if(insertIndex != memTypeIndex)
3518  {
3519  VMA_SWAP(*memTypeIndex, *insertIndex);
3520  }
3521  ++insertIndex;
3522  }
3523  }
3524  if(insertIndex != centerValue)
3525  {
3526  VMA_SWAP(*insertIndex, *centerValue);
3527  }
3528  return insertIndex;
3529 }
3530 
3531 template<typename Iterator, typename Compare>
3532 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3533 {
3534  if(beg < end)
3535  {
3536  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3537  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3538  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3539  }
3540 }
3541 
3542 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3543 
3544 #endif // #ifndef VMA_SORT
3545 
3546 /*
3547 Returns true if two memory blocks occupy overlapping pages.
3548 ResourceA must be in less memory offset than ResourceB.
3549 
3550 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3551 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3552 */
3553 static inline bool VmaBlocksOnSamePage(
3554  VkDeviceSize resourceAOffset,
3555  VkDeviceSize resourceASize,
3556  VkDeviceSize resourceBOffset,
3557  VkDeviceSize pageSize)
3558 {
3559  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3560  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3561  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3562  VkDeviceSize resourceBStart = resourceBOffset;
3563  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3564  return resourceAEndPage == resourceBStartPage;
3565 }
3566 
3567 enum VmaSuballocationType
3568 {
3569  VMA_SUBALLOCATION_TYPE_FREE = 0,
3570  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3571  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3572  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3573  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3574  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3575  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3576 };
3577 
3578 /*
3579 Returns true if given suballocation types could conflict and must respect
3580 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3581 or linear image and another one is optimal image. If type is unknown, behave
3582 conservatively.
3583 */
3584 static inline bool VmaIsBufferImageGranularityConflict(
3585  VmaSuballocationType suballocType1,
3586  VmaSuballocationType suballocType2)
3587 {
3588  if(suballocType1 > suballocType2)
3589  {
3590  VMA_SWAP(suballocType1, suballocType2);
3591  }
3592 
3593  switch(suballocType1)
3594  {
3595  case VMA_SUBALLOCATION_TYPE_FREE:
3596  return false;
3597  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3598  return true;
3599  case VMA_SUBALLOCATION_TYPE_BUFFER:
3600  return
3601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3603  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3604  return
3605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3606  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3607  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3608  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3609  return
3610  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3611  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3612  return false;
3613  default:
3614  VMA_ASSERT(0);
3615  return true;
3616  }
3617 }
3618 
3619 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3620 {
3621  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3622  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3623  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3624  {
3625  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3626  }
3627 }
3628 
3629 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3630 {
3631  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3632  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3633  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3634  {
3635  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3636  {
3637  return false;
3638  }
3639  }
3640  return true;
3641 }
3642 
3643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3644 struct VmaMutexLock
3645 {
3646  VMA_CLASS_NO_COPY(VmaMutexLock)
3647 public:
3648  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3649  m_pMutex(useMutex ? &mutex : VMA_NULL)
3650  { if(m_pMutex) { m_pMutex->Lock(); } }
3651  ~VmaMutexLock()
3652  { if(m_pMutex) { m_pMutex->Unlock(); } }
3653 private:
3654  VMA_MUTEX* m_pMutex;
3655 };
3656 
3657 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3658 struct VmaMutexLockRead
3659 {
3660  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3661 public:
3662  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3663  m_pMutex(useMutex ? &mutex : VMA_NULL)
3664  { if(m_pMutex) { m_pMutex->LockRead(); } }
3665  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3666 private:
3667  VMA_RW_MUTEX* m_pMutex;
3668 };
3669 
3670 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3671 struct VmaMutexLockWrite
3672 {
3673  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3674 public:
3675  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3676  m_pMutex(useMutex ? &mutex : VMA_NULL)
3677  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3678  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3679 private:
3680  VMA_RW_MUTEX* m_pMutex;
3681 };
3682 
3683 #if VMA_DEBUG_GLOBAL_MUTEX
3684  static VMA_MUTEX gDebugGlobalMutex;
3685  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3686 #else
3687  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3688 #endif
3689 
3690 // Minimum size of a free suballocation to register it in the free suballocation collection.
3691 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3692 
3693 /*
3694 Performs binary search and returns iterator to first element that is greater or
3695 equal to (key), according to comparison (cmp).
3696 
3697 Cmp should return true if first argument is less than second argument.
3698 
3699 Returned value is the found element, if present in the collection or place where
3700 new element with value (key) should be inserted.
3701 */
3702 template <typename CmpLess, typename IterT, typename KeyT>
3703 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3704 {
3705  size_t down = 0, up = (end - beg);
3706  while(down < up)
3707  {
3708  const size_t mid = (down + up) / 2;
3709  if(cmp(*(beg+mid), key))
3710  {
3711  down = mid + 1;
3712  }
3713  else
3714  {
3715  up = mid;
3716  }
3717  }
3718  return beg + down;
3719 }
3720 
3721 /*
3722 Returns true if all pointers in the array are not-null and unique.
3723 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3724 T must be pointer type, e.g. VmaAllocation, VmaPool.
3725 */
3726 template<typename T>
3727 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3728 {
3729  for(uint32_t i = 0; i < count; ++i)
3730  {
3731  const T iPtr = arr[i];
3732  if(iPtr == VMA_NULL)
3733  {
3734  return false;
3735  }
3736  for(uint32_t j = i + 1; j < count; ++j)
3737  {
3738  if(iPtr == arr[j])
3739  {
3740  return false;
3741  }
3742  }
3743  }
3744  return true;
3745 }
3746 
3748 // Memory allocation
3749 
3750 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3751 {
3752  if((pAllocationCallbacks != VMA_NULL) &&
3753  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3754  {
3755  return (*pAllocationCallbacks->pfnAllocation)(
3756  pAllocationCallbacks->pUserData,
3757  size,
3758  alignment,
3759  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3760  }
3761  else
3762  {
3763  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3764  }
3765 }
3766 
3767 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3768 {
3769  if((pAllocationCallbacks != VMA_NULL) &&
3770  (pAllocationCallbacks->pfnFree != VMA_NULL))
3771  {
3772  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3773  }
3774  else
3775  {
3776  VMA_SYSTEM_FREE(ptr);
3777  }
3778 }
3779 
3780 template<typename T>
3781 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3782 {
3783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3784 }
3785 
3786 template<typename T>
3787 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3788 {
3789  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3790 }
3791 
3792 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3793 
3794 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3795 
3796 template<typename T>
3797 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3798 {
3799  ptr->~T();
3800  VmaFree(pAllocationCallbacks, ptr);
3801 }
3802 
3803 template<typename T>
3804 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3805 {
3806  if(ptr != VMA_NULL)
3807  {
3808  for(size_t i = count; i--; )
3809  {
3810  ptr[i].~T();
3811  }
3812  VmaFree(pAllocationCallbacks, ptr);
3813  }
3814 }
3815 
3816 // STL-compatible allocator.
3817 template<typename T>
3818 class VmaStlAllocator
3819 {
3820 public:
3821  const VkAllocationCallbacks* const m_pCallbacks;
3822  typedef T value_type;
3823 
3824  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3825  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3826 
3827  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3828  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3829 
3830  template<typename U>
3831  bool operator==(const VmaStlAllocator<U>& rhs) const
3832  {
3833  return m_pCallbacks == rhs.m_pCallbacks;
3834  }
3835  template<typename U>
3836  bool operator!=(const VmaStlAllocator<U>& rhs) const
3837  {
3838  return m_pCallbacks != rhs.m_pCallbacks;
3839  }
3840 
3841  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3842 };
3843 
3844 #if VMA_USE_STL_VECTOR
3845 
3846 #define VmaVector std::vector
3847 
3848 template<typename T, typename allocatorT>
3849 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3850 {
3851  vec.insert(vec.begin() + index, item);
3852 }
3853 
3854 template<typename T, typename allocatorT>
3855 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3856 {
3857  vec.erase(vec.begin() + index);
3858 }
3859 
3860 #else // #if VMA_USE_STL_VECTOR
3861 
3862 /* Class with interface compatible with subset of std::vector.
3863 T must be POD because constructors and destructors are not called and memcpy is
3864 used for these objects. */
3865 template<typename T, typename AllocatorT>
3866 class VmaVector
3867 {
3868 public:
3869  typedef T value_type;
3870 
3871  VmaVector(const AllocatorT& allocator) :
3872  m_Allocator(allocator),
3873  m_pArray(VMA_NULL),
3874  m_Count(0),
3875  m_Capacity(0)
3876  {
3877  }
3878 
3879  VmaVector(size_t count, const AllocatorT& allocator) :
3880  m_Allocator(allocator),
3881  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3882  m_Count(count),
3883  m_Capacity(count)
3884  {
3885  }
3886 
3887  VmaVector(const VmaVector<T, AllocatorT>& src) :
3888  m_Allocator(src.m_Allocator),
3889  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3890  m_Count(src.m_Count),
3891  m_Capacity(src.m_Count)
3892  {
3893  if(m_Count != 0)
3894  {
3895  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3896  }
3897  }
3898 
3899  ~VmaVector()
3900  {
3901  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3902  }
3903 
3904  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3905  {
3906  if(&rhs != this)
3907  {
3908  resize(rhs.m_Count);
3909  if(m_Count != 0)
3910  {
3911  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3912  }
3913  }
3914  return *this;
3915  }
3916 
3917  bool empty() const { return m_Count == 0; }
3918  size_t size() const { return m_Count; }
3919  T* data() { return m_pArray; }
3920  const T* data() const { return m_pArray; }
3921 
3922  T& operator[](size_t index)
3923  {
3924  VMA_HEAVY_ASSERT(index < m_Count);
3925  return m_pArray[index];
3926  }
3927  const T& operator[](size_t index) const
3928  {
3929  VMA_HEAVY_ASSERT(index < m_Count);
3930  return m_pArray[index];
3931  }
3932 
3933  T& front()
3934  {
3935  VMA_HEAVY_ASSERT(m_Count > 0);
3936  return m_pArray[0];
3937  }
3938  const T& front() const
3939  {
3940  VMA_HEAVY_ASSERT(m_Count > 0);
3941  return m_pArray[0];
3942  }
3943  T& back()
3944  {
3945  VMA_HEAVY_ASSERT(m_Count > 0);
3946  return m_pArray[m_Count - 1];
3947  }
3948  const T& back() const
3949  {
3950  VMA_HEAVY_ASSERT(m_Count > 0);
3951  return m_pArray[m_Count - 1];
3952  }
3953 
3954  void reserve(size_t newCapacity, bool freeMemory = false)
3955  {
3956  newCapacity = VMA_MAX(newCapacity, m_Count);
3957 
3958  if((newCapacity < m_Capacity) && !freeMemory)
3959  {
3960  newCapacity = m_Capacity;
3961  }
3962 
3963  if(newCapacity != m_Capacity)
3964  {
3965  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3966  if(m_Count != 0)
3967  {
3968  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3969  }
3970  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3971  m_Capacity = newCapacity;
3972  m_pArray = newArray;
3973  }
3974  }
3975 
3976  void resize(size_t newCount, bool freeMemory = false)
3977  {
3978  size_t newCapacity = m_Capacity;
3979  if(newCount > m_Capacity)
3980  {
3981  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3982  }
3983  else if(freeMemory)
3984  {
3985  newCapacity = newCount;
3986  }
3987 
3988  if(newCapacity != m_Capacity)
3989  {
3990  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3991  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3992  if(elementsToCopy != 0)
3993  {
3994  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3995  }
3996  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3997  m_Capacity = newCapacity;
3998  m_pArray = newArray;
3999  }
4000 
4001  m_Count = newCount;
4002  }
4003 
4004  void clear(bool freeMemory = false)
4005  {
4006  resize(0, freeMemory);
4007  }
4008 
4009  void insert(size_t index, const T& src)
4010  {
4011  VMA_HEAVY_ASSERT(index <= m_Count);
4012  const size_t oldCount = size();
4013  resize(oldCount + 1);
4014  if(index < oldCount)
4015  {
4016  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4017  }
4018  m_pArray[index] = src;
4019  }
4020 
4021  void remove(size_t index)
4022  {
4023  VMA_HEAVY_ASSERT(index < m_Count);
4024  const size_t oldCount = size();
4025  if(index < oldCount - 1)
4026  {
4027  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4028  }
4029  resize(oldCount - 1);
4030  }
4031 
4032  void push_back(const T& src)
4033  {
4034  const size_t newIndex = size();
4035  resize(newIndex + 1);
4036  m_pArray[newIndex] = src;
4037  }
4038 
4039  void pop_back()
4040  {
4041  VMA_HEAVY_ASSERT(m_Count > 0);
4042  resize(size() - 1);
4043  }
4044 
4045  void push_front(const T& src)
4046  {
4047  insert(0, src);
4048  }
4049 
4050  void pop_front()
4051  {
4052  VMA_HEAVY_ASSERT(m_Count > 0);
4053  remove(0);
4054  }
4055 
4056  typedef T* iterator;
4057 
4058  iterator begin() { return m_pArray; }
4059  iterator end() { return m_pArray + m_Count; }
4060 
4061 private:
4062  AllocatorT m_Allocator;
4063  T* m_pArray;
4064  size_t m_Count;
4065  size_t m_Capacity;
4066 };
4067 
4068 template<typename T, typename allocatorT>
4069 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4070 {
4071  vec.insert(index, item);
4072 }
4073 
4074 template<typename T, typename allocatorT>
4075 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4076 {
4077  vec.remove(index);
4078 }
4079 
4080 #endif // #if VMA_USE_STL_VECTOR
4081 
4082 template<typename CmpLess, typename VectorT>
4083 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4084 {
4085  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4086  vector.data(),
4087  vector.data() + vector.size(),
4088  value,
4089  CmpLess()) - vector.data();
4090  VmaVectorInsert(vector, indexToInsert, value);
4091  return indexToInsert;
4092 }
4093 
4094 template<typename CmpLess, typename VectorT>
4095 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4096 {
4097  CmpLess comparator;
4098  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4099  vector.begin(),
4100  vector.end(),
4101  value,
4102  comparator);
4103  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4104  {
4105  size_t indexToRemove = it - vector.begin();
4106  VmaVectorRemove(vector, indexToRemove);
4107  return true;
4108  }
4109  return false;
4110 }
4111 
4112 template<typename CmpLess, typename IterT, typename KeyT>
4113 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4114 {
4115  CmpLess comparator;
4116  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4117  beg, end, value, comparator);
4118  if(it == end ||
4119  (!comparator(*it, value) && !comparator(value, *it)))
4120  {
4121  return it;
4122  }
4123  return end;
4124 }
4125 
4127 // class VmaPoolAllocator
4128 
4129 /*
4130 Allocator for objects of type T using a list of arrays (pools) to speed up
4131 allocation. Number of elements that can be allocated is not bounded because
4132 allocator can create multiple blocks.
4133 */
4134 template<typename T>
4135 class VmaPoolAllocator
4136 {
4137  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4138 public:
4139  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4140  ~VmaPoolAllocator();
4141  void Clear();
4142  T* Alloc();
4143  void Free(T* ptr);
4144 
4145 private:
4146  union Item
4147  {
4148  uint32_t NextFreeIndex;
4149  T Value;
4150  };
4151 
4152  struct ItemBlock
4153  {
4154  Item* pItems;
4155  uint32_t FirstFreeIndex;
4156  };
4157 
4158  const VkAllocationCallbacks* m_pAllocationCallbacks;
4159  size_t m_ItemsPerBlock;
4160  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4161 
4162  ItemBlock& CreateNewBlock();
4163 };
4164 
4165 template<typename T>
4166 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4167  m_pAllocationCallbacks(pAllocationCallbacks),
4168  m_ItemsPerBlock(itemsPerBlock),
4169  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4170 {
4171  VMA_ASSERT(itemsPerBlock > 0);
4172 }
4173 
4174 template<typename T>
4175 VmaPoolAllocator<T>::~VmaPoolAllocator()
4176 {
4177  Clear();
4178 }
4179 
4180 template<typename T>
4181 void VmaPoolAllocator<T>::Clear()
4182 {
4183  for(size_t i = m_ItemBlocks.size(); i--; )
4184  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4185  m_ItemBlocks.clear();
4186 }
4187 
4188 template<typename T>
4189 T* VmaPoolAllocator<T>::Alloc()
4190 {
4191  for(size_t i = m_ItemBlocks.size(); i--; )
4192  {
4193  ItemBlock& block = m_ItemBlocks[i];
4194  // This block has some free items: Use first one.
4195  if(block.FirstFreeIndex != UINT32_MAX)
4196  {
4197  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4198  block.FirstFreeIndex = pItem->NextFreeIndex;
4199  return &pItem->Value;
4200  }
4201  }
4202 
4203  // No block has free item: Create new one and use it.
4204  ItemBlock& newBlock = CreateNewBlock();
4205  Item* const pItem = &newBlock.pItems[0];
4206  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4207  return &pItem->Value;
4208 }
4209 
4210 template<typename T>
4211 void VmaPoolAllocator<T>::Free(T* ptr)
4212 {
4213  // Search all memory blocks to find ptr.
4214  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4215  {
4216  ItemBlock& block = m_ItemBlocks[i];
4217 
4218  // Casting to union.
4219  Item* pItemPtr;
4220  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4221 
4222  // Check if pItemPtr is in address range of this block.
4223  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4224  {
4225  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4226  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4227  block.FirstFreeIndex = index;
4228  return;
4229  }
4230  }
4231  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4232 }
4233 
4234 template<typename T>
4235 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4236 {
4237  ItemBlock newBlock = {
4238  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4239 
4240  m_ItemBlocks.push_back(newBlock);
4241 
4242  // Setup singly-linked list of all free items in this block.
4243  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4244  newBlock.pItems[i].NextFreeIndex = i + 1;
4245  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4246  return m_ItemBlocks.back();
4247 }
4248 
4250 // class VmaRawList, VmaList
4251 
4252 #if VMA_USE_STL_LIST
4253 
4254 #define VmaList std::list
4255 
4256 #else // #if VMA_USE_STL_LIST
4257 
4258 template<typename T>
4259 struct VmaListItem
4260 {
4261  VmaListItem* pPrev;
4262  VmaListItem* pNext;
4263  T Value;
4264 };
4265 
4266 // Doubly linked list.
4267 template<typename T>
4268 class VmaRawList
4269 {
4270  VMA_CLASS_NO_COPY(VmaRawList)
4271 public:
4272  typedef VmaListItem<T> ItemType;
4273 
4274  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4275  ~VmaRawList();
4276  void Clear();
4277 
4278  size_t GetCount() const { return m_Count; }
4279  bool IsEmpty() const { return m_Count == 0; }
4280 
4281  ItemType* Front() { return m_pFront; }
4282  const ItemType* Front() const { return m_pFront; }
4283  ItemType* Back() { return m_pBack; }
4284  const ItemType* Back() const { return m_pBack; }
4285 
4286  ItemType* PushBack();
4287  ItemType* PushFront();
4288  ItemType* PushBack(const T& value);
4289  ItemType* PushFront(const T& value);
4290  void PopBack();
4291  void PopFront();
4292 
4293  // Item can be null - it means PushBack.
4294  ItemType* InsertBefore(ItemType* pItem);
4295  // Item can be null - it means PushFront.
4296  ItemType* InsertAfter(ItemType* pItem);
4297 
4298  ItemType* InsertBefore(ItemType* pItem, const T& value);
4299  ItemType* InsertAfter(ItemType* pItem, const T& value);
4300 
4301  void Remove(ItemType* pItem);
4302 
4303 private:
4304  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4305  VmaPoolAllocator<ItemType> m_ItemAllocator;
4306  ItemType* m_pFront;
4307  ItemType* m_pBack;
4308  size_t m_Count;
4309 };
4310 
4311 template<typename T>
4312 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4313  m_pAllocationCallbacks(pAllocationCallbacks),
4314  m_ItemAllocator(pAllocationCallbacks, 128),
4315  m_pFront(VMA_NULL),
4316  m_pBack(VMA_NULL),
4317  m_Count(0)
4318 {
4319 }
4320 
4321 template<typename T>
4322 VmaRawList<T>::~VmaRawList()
4323 {
4324  // Intentionally not calling Clear, because that would be unnecessary
4325  // computations to return all items to m_ItemAllocator as free.
4326 }
4327 
4328 template<typename T>
4329 void VmaRawList<T>::Clear()
4330 {
4331  if(IsEmpty() == false)
4332  {
4333  ItemType* pItem = m_pBack;
4334  while(pItem != VMA_NULL)
4335  {
4336  ItemType* const pPrevItem = pItem->pPrev;
4337  m_ItemAllocator.Free(pItem);
4338  pItem = pPrevItem;
4339  }
4340  m_pFront = VMA_NULL;
4341  m_pBack = VMA_NULL;
4342  m_Count = 0;
4343  }
4344 }
4345 
4346 template<typename T>
4347 VmaListItem<T>* VmaRawList<T>::PushBack()
4348 {
4349  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4350  pNewItem->pNext = VMA_NULL;
4351  if(IsEmpty())
4352  {
4353  pNewItem->pPrev = VMA_NULL;
4354  m_pFront = pNewItem;
4355  m_pBack = pNewItem;
4356  m_Count = 1;
4357  }
4358  else
4359  {
4360  pNewItem->pPrev = m_pBack;
4361  m_pBack->pNext = pNewItem;
4362  m_pBack = pNewItem;
4363  ++m_Count;
4364  }
4365  return pNewItem;
4366 }
4367 
4368 template<typename T>
4369 VmaListItem<T>* VmaRawList<T>::PushFront()
4370 {
4371  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4372  pNewItem->pPrev = VMA_NULL;
4373  if(IsEmpty())
4374  {
4375  pNewItem->pNext = VMA_NULL;
4376  m_pFront = pNewItem;
4377  m_pBack = pNewItem;
4378  m_Count = 1;
4379  }
4380  else
4381  {
4382  pNewItem->pNext = m_pFront;
4383  m_pFront->pPrev = pNewItem;
4384  m_pFront = pNewItem;
4385  ++m_Count;
4386  }
4387  return pNewItem;
4388 }
4389 
4390 template<typename T>
4391 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4392 {
4393  ItemType* const pNewItem = PushBack();
4394  pNewItem->Value = value;
4395  return pNewItem;
4396 }
4397 
4398 template<typename T>
4399 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4400 {
4401  ItemType* const pNewItem = PushFront();
4402  pNewItem->Value = value;
4403  return pNewItem;
4404 }
4405 
4406 template<typename T>
4407 void VmaRawList<T>::PopBack()
4408 {
4409  VMA_HEAVY_ASSERT(m_Count > 0);
4410  ItemType* const pBackItem = m_pBack;
4411  ItemType* const pPrevItem = pBackItem->pPrev;
4412  if(pPrevItem != VMA_NULL)
4413  {
4414  pPrevItem->pNext = VMA_NULL;
4415  }
4416  m_pBack = pPrevItem;
4417  m_ItemAllocator.Free(pBackItem);
4418  --m_Count;
4419 }
4420 
4421 template<typename T>
4422 void VmaRawList<T>::PopFront()
4423 {
4424  VMA_HEAVY_ASSERT(m_Count > 0);
4425  ItemType* const pFrontItem = m_pFront;
4426  ItemType* const pNextItem = pFrontItem->pNext;
4427  if(pNextItem != VMA_NULL)
4428  {
4429  pNextItem->pPrev = VMA_NULL;
4430  }
4431  m_pFront = pNextItem;
4432  m_ItemAllocator.Free(pFrontItem);
4433  --m_Count;
4434 }
4435 
4436 template<typename T>
4437 void VmaRawList<T>::Remove(ItemType* pItem)
4438 {
4439  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4440  VMA_HEAVY_ASSERT(m_Count > 0);
4441 
4442  if(pItem->pPrev != VMA_NULL)
4443  {
4444  pItem->pPrev->pNext = pItem->pNext;
4445  }
4446  else
4447  {
4448  VMA_HEAVY_ASSERT(m_pFront == pItem);
4449  m_pFront = pItem->pNext;
4450  }
4451 
4452  if(pItem->pNext != VMA_NULL)
4453  {
4454  pItem->pNext->pPrev = pItem->pPrev;
4455  }
4456  else
4457  {
4458  VMA_HEAVY_ASSERT(m_pBack == pItem);
4459  m_pBack = pItem->pPrev;
4460  }
4461 
4462  m_ItemAllocator.Free(pItem);
4463  --m_Count;
4464 }
4465 
4466 template<typename T>
4467 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4468 {
4469  if(pItem != VMA_NULL)
4470  {
4471  ItemType* const prevItem = pItem->pPrev;
4472  ItemType* const newItem = m_ItemAllocator.Alloc();
4473  newItem->pPrev = prevItem;
4474  newItem->pNext = pItem;
4475  pItem->pPrev = newItem;
4476  if(prevItem != VMA_NULL)
4477  {
4478  prevItem->pNext = newItem;
4479  }
4480  else
4481  {
4482  VMA_HEAVY_ASSERT(m_pFront == pItem);
4483  m_pFront = newItem;
4484  }
4485  ++m_Count;
4486  return newItem;
4487  }
4488  else
4489  return PushBack();
4490 }
4491 
4492 template<typename T>
4493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4494 {
4495  if(pItem != VMA_NULL)
4496  {
4497  ItemType* const nextItem = pItem->pNext;
4498  ItemType* const newItem = m_ItemAllocator.Alloc();
4499  newItem->pNext = nextItem;
4500  newItem->pPrev = pItem;
4501  pItem->pNext = newItem;
4502  if(nextItem != VMA_NULL)
4503  {
4504  nextItem->pPrev = newItem;
4505  }
4506  else
4507  {
4508  VMA_HEAVY_ASSERT(m_pBack == pItem);
4509  m_pBack = newItem;
4510  }
4511  ++m_Count;
4512  return newItem;
4513  }
4514  else
4515  return PushFront();
4516 }
4517 
4518 template<typename T>
4519 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4520 {
4521  ItemType* const newItem = InsertBefore(pItem);
4522  newItem->Value = value;
4523  return newItem;
4524 }
4525 
4526 template<typename T>
4527 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4528 {
4529  ItemType* const newItem = InsertAfter(pItem);
4530  newItem->Value = value;
4531  return newItem;
4532 }
4533 
4534 template<typename T, typename AllocatorT>
4535 class VmaList
4536 {
4537  VMA_CLASS_NO_COPY(VmaList)
4538 public:
4539  class iterator
4540  {
4541  public:
4542  iterator() :
4543  m_pList(VMA_NULL),
4544  m_pItem(VMA_NULL)
4545  {
4546  }
4547 
4548  T& operator*() const
4549  {
4550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4551  return m_pItem->Value;
4552  }
4553  T* operator->() const
4554  {
4555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4556  return &m_pItem->Value;
4557  }
4558 
4559  iterator& operator++()
4560  {
4561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4562  m_pItem = m_pItem->pNext;
4563  return *this;
4564  }
4565  iterator& operator--()
4566  {
4567  if(m_pItem != VMA_NULL)
4568  {
4569  m_pItem = m_pItem->pPrev;
4570  }
4571  else
4572  {
4573  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4574  m_pItem = m_pList->Back();
4575  }
4576  return *this;
4577  }
4578 
4579  iterator operator++(int)
4580  {
4581  iterator result = *this;
4582  ++*this;
4583  return result;
4584  }
4585  iterator operator--(int)
4586  {
4587  iterator result = *this;
4588  --*this;
4589  return result;
4590  }
4591 
4592  bool operator==(const iterator& rhs) const
4593  {
4594  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4595  return m_pItem == rhs.m_pItem;
4596  }
4597  bool operator!=(const iterator& rhs) const
4598  {
4599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4600  return m_pItem != rhs.m_pItem;
4601  }
4602 
4603  private:
4604  VmaRawList<T>* m_pList;
4605  VmaListItem<T>* m_pItem;
4606 
4607  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4608  m_pList(pList),
4609  m_pItem(pItem)
4610  {
4611  }
4612 
4613  friend class VmaList<T, AllocatorT>;
4614  };
4615 
4616  class const_iterator
4617  {
4618  public:
4619  const_iterator() :
4620  m_pList(VMA_NULL),
4621  m_pItem(VMA_NULL)
4622  {
4623  }
4624 
4625  const_iterator(const iterator& src) :
4626  m_pList(src.m_pList),
4627  m_pItem(src.m_pItem)
4628  {
4629  }
4630 
4631  const T& operator*() const
4632  {
4633  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4634  return m_pItem->Value;
4635  }
4636  const T* operator->() const
4637  {
4638  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4639  return &m_pItem->Value;
4640  }
4641 
4642  const_iterator& operator++()
4643  {
4644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4645  m_pItem = m_pItem->pNext;
4646  return *this;
4647  }
4648  const_iterator& operator--()
4649  {
4650  if(m_pItem != VMA_NULL)
4651  {
4652  m_pItem = m_pItem->pPrev;
4653  }
4654  else
4655  {
4656  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4657  m_pItem = m_pList->Back();
4658  }
4659  return *this;
4660  }
4661 
4662  const_iterator operator++(int)
4663  {
4664  const_iterator result = *this;
4665  ++*this;
4666  return result;
4667  }
4668  const_iterator operator--(int)
4669  {
4670  const_iterator result = *this;
4671  --*this;
4672  return result;
4673  }
4674 
4675  bool operator==(const const_iterator& rhs) const
4676  {
4677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4678  return m_pItem == rhs.m_pItem;
4679  }
4680  bool operator!=(const const_iterator& rhs) const
4681  {
4682  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4683  return m_pItem != rhs.m_pItem;
4684  }
4685 
4686  private:
4687  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4688  m_pList(pList),
4689  m_pItem(pItem)
4690  {
4691  }
4692 
4693  const VmaRawList<T>* m_pList;
4694  const VmaListItem<T>* m_pItem;
4695 
4696  friend class VmaList<T, AllocatorT>;
4697  };
4698 
4699  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4700 
4701  bool empty() const { return m_RawList.IsEmpty(); }
4702  size_t size() const { return m_RawList.GetCount(); }
4703 
4704  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4705  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4706 
4707  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4708  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4709 
4710  void clear() { m_RawList.Clear(); }
4711  void push_back(const T& value) { m_RawList.PushBack(value); }
4712  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4713  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4714 
4715 private:
4716  VmaRawList<T> m_RawList;
4717 };
4718 
4719 #endif // #if VMA_USE_STL_LIST
4720 
4722 // class VmaMap
4723 
4724 // Unused in this version.
4725 #if 0
4726 
4727 #if VMA_USE_STL_UNORDERED_MAP
4728 
4729 #define VmaPair std::pair
4730 
4731 #define VMA_MAP_TYPE(KeyT, ValueT) \
4732  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4733 
4734 #else // #if VMA_USE_STL_UNORDERED_MAP
4735 
4736 template<typename T1, typename T2>
4737 struct VmaPair
4738 {
4739  T1 first;
4740  T2 second;
4741 
4742  VmaPair() : first(), second() { }
4743  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4744 };
4745 
4746 /* Class compatible with subset of interface of std::unordered_map.
4747 KeyT, ValueT must be POD because they will be stored in VmaVector.
4748 */
4749 template<typename KeyT, typename ValueT>
4750 class VmaMap
4751 {
4752 public:
4753  typedef VmaPair<KeyT, ValueT> PairType;
4754  typedef PairType* iterator;
4755 
4756  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4757 
4758  iterator begin() { return m_Vector.begin(); }
4759  iterator end() { return m_Vector.end(); }
4760 
4761  void insert(const PairType& pair);
4762  iterator find(const KeyT& key);
4763  void erase(iterator it);
4764 
4765 private:
4766  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4767 };
4768 
4769 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4770 
4771 template<typename FirstT, typename SecondT>
4772 struct VmaPairFirstLess
4773 {
4774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4775  {
4776  return lhs.first < rhs.first;
4777  }
4778  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4779  {
4780  return lhs.first < rhsFirst;
4781  }
4782 };
4783 
4784 template<typename KeyT, typename ValueT>
4785 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4786 {
4787  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4788  m_Vector.data(),
4789  m_Vector.data() + m_Vector.size(),
4790  pair,
4791  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4792  VmaVectorInsert(m_Vector, indexToInsert, pair);
4793 }
4794 
4795 template<typename KeyT, typename ValueT>
4796 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4797 {
4798  PairType* it = VmaBinaryFindFirstNotLess(
4799  m_Vector.data(),
4800  m_Vector.data() + m_Vector.size(),
4801  key,
4802  VmaPairFirstLess<KeyT, ValueT>());
4803  if((it != m_Vector.end()) && (it->first == key))
4804  {
4805  return it;
4806  }
4807  else
4808  {
4809  return m_Vector.end();
4810  }
4811 }
4812 
4813 template<typename KeyT, typename ValueT>
4814 void VmaMap<KeyT, ValueT>::erase(iterator it)
4815 {
4816  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4817 }
4818 
4819 #endif // #if VMA_USE_STL_UNORDERED_MAP
4820 
4821 #endif // #if 0
4822 
4824 
4825 class VmaDeviceMemoryBlock;
4826 
4827 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4828 
4829 struct VmaAllocation_T
4830 {
4831  VMA_CLASS_NO_COPY(VmaAllocation_T)
4832 private:
4833  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4834 
4835  enum FLAGS
4836  {
4837  FLAG_USER_DATA_STRING = 0x01,
4838  };
4839 
4840 public:
4841  enum ALLOCATION_TYPE
4842  {
4843  ALLOCATION_TYPE_NONE,
4844  ALLOCATION_TYPE_BLOCK,
4845  ALLOCATION_TYPE_DEDICATED,
4846  };
4847 
4848  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4849  m_Alignment(1),
4850  m_Size(0),
4851  m_pUserData(VMA_NULL),
4852  m_LastUseFrameIndex(currentFrameIndex),
4853  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4854  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4855  m_MapCount(0),
4856  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4857  {
4858 #if VMA_STATS_STRING_ENABLED
4859  m_CreationFrameIndex = currentFrameIndex;
4860  m_BufferImageUsage = 0;
4861 #endif
4862  }
4863 
4864  ~VmaAllocation_T()
4865  {
4866  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4867 
4868  // Check if owned string was freed.
4869  VMA_ASSERT(m_pUserData == VMA_NULL);
4870  }
4871 
4872  void InitBlockAllocation(
4873  VmaPool hPool,
4874  VmaDeviceMemoryBlock* block,
4875  VkDeviceSize offset,
4876  VkDeviceSize alignment,
4877  VkDeviceSize size,
4878  VmaSuballocationType suballocationType,
4879  bool mapped,
4880  bool canBecomeLost)
4881  {
4882  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4883  VMA_ASSERT(block != VMA_NULL);
4884  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4885  m_Alignment = alignment;
4886  m_Size = size;
4887  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4888  m_SuballocationType = (uint8_t)suballocationType;
4889  m_BlockAllocation.m_hPool = hPool;
4890  m_BlockAllocation.m_Block = block;
4891  m_BlockAllocation.m_Offset = offset;
4892  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4893  }
4894 
4895  void InitLost()
4896  {
4897  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4898  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4899  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4900  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4901  m_BlockAllocation.m_Block = VMA_NULL;
4902  m_BlockAllocation.m_Offset = 0;
4903  m_BlockAllocation.m_CanBecomeLost = true;
4904  }
4905 
4906  void ChangeBlockAllocation(
4907  VmaAllocator hAllocator,
4908  VmaDeviceMemoryBlock* block,
4909  VkDeviceSize offset);
4910 
4911  void ChangeSize(VkDeviceSize newSize);
4912  void ChangeOffset(VkDeviceSize newOffset);
4913 
4914  // pMappedData not null means allocation is created with MAPPED flag.
4915  void InitDedicatedAllocation(
4916  uint32_t memoryTypeIndex,
4917  VkDeviceMemory hMemory,
4918  VmaSuballocationType suballocationType,
4919  void* pMappedData,
4920  VkDeviceSize size)
4921  {
4922  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4923  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4924  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4925  m_Alignment = 0;
4926  m_Size = size;
4927  m_SuballocationType = (uint8_t)suballocationType;
4928  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4929  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4930  m_DedicatedAllocation.m_hMemory = hMemory;
4931  m_DedicatedAllocation.m_pMappedData = pMappedData;
4932  }
4933 
4934  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4935  VkDeviceSize GetAlignment() const { return m_Alignment; }
4936  VkDeviceSize GetSize() const { return m_Size; }
4937  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4938  void* GetUserData() const { return m_pUserData; }
4939  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4940  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4941 
4942  VmaDeviceMemoryBlock* GetBlock() const
4943  {
4944  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4945  return m_BlockAllocation.m_Block;
4946  }
4947  VkDeviceSize GetOffset() const;
4948  VkDeviceMemory GetMemory() const;
4949  uint32_t GetMemoryTypeIndex() const;
4950  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4951  void* GetMappedData() const;
4952  bool CanBecomeLost() const;
4953  VmaPool GetPool() const;
4954 
4955  uint32_t GetLastUseFrameIndex() const
4956  {
4957  return m_LastUseFrameIndex.load();
4958  }
4959  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4960  {
4961  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4962  }
4963  /*
4964  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4965  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4966  - Else, returns false.
4967 
4968  If hAllocation is already lost, assert - you should not call it then.
4969  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4970  */
4971  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4972 
4973  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4974  {
4975  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4976  outInfo.blockCount = 1;
4977  outInfo.allocationCount = 1;
4978  outInfo.unusedRangeCount = 0;
4979  outInfo.usedBytes = m_Size;
4980  outInfo.unusedBytes = 0;
4981  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4982  outInfo.unusedRangeSizeMin = UINT64_MAX;
4983  outInfo.unusedRangeSizeMax = 0;
4984  }
4985 
4986  void BlockAllocMap();
4987  void BlockAllocUnmap();
4988  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4989  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4990 
4991 #if VMA_STATS_STRING_ENABLED
4992  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4993  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4994 
4995  void InitBufferImageUsage(uint32_t bufferImageUsage)
4996  {
4997  VMA_ASSERT(m_BufferImageUsage == 0);
4998  m_BufferImageUsage = bufferImageUsage;
4999  }
5000 
5001  void PrintParameters(class VmaJsonWriter& json) const;
5002 #endif
5003 
5004 private:
5005  VkDeviceSize m_Alignment;
5006  VkDeviceSize m_Size;
5007  void* m_pUserData;
5008  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5009  uint8_t m_Type; // ALLOCATION_TYPE
5010  uint8_t m_SuballocationType; // VmaSuballocationType
5011  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5012  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5013  uint8_t m_MapCount;
5014  uint8_t m_Flags; // enum FLAGS
5015 
5016  // Allocation out of VmaDeviceMemoryBlock.
5017  struct BlockAllocation
5018  {
5019  VmaPool m_hPool; // Null if belongs to general memory.
5020  VmaDeviceMemoryBlock* m_Block;
5021  VkDeviceSize m_Offset;
5022  bool m_CanBecomeLost;
5023  };
5024 
5025  // Allocation for an object that has its own private VkDeviceMemory.
5026  struct DedicatedAllocation
5027  {
5028  uint32_t m_MemoryTypeIndex;
5029  VkDeviceMemory m_hMemory;
5030  void* m_pMappedData; // Not null means memory is mapped.
5031  };
5032 
5033  union
5034  {
5035  // Allocation out of VmaDeviceMemoryBlock.
5036  BlockAllocation m_BlockAllocation;
5037  // Allocation for an object that has its own private VkDeviceMemory.
5038  DedicatedAllocation m_DedicatedAllocation;
5039  };
5040 
5041 #if VMA_STATS_STRING_ENABLED
5042  uint32_t m_CreationFrameIndex;
5043  uint32_t m_BufferImageUsage; // 0 if unknown.
5044 #endif
5045 
5046  void FreeUserDataString(VmaAllocator hAllocator);
5047 };
5048 
5049 /*
5050 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5051 allocated memory block or free.
5052 */
5053 struct VmaSuballocation
5054 {
5055  VkDeviceSize offset;
5056  VkDeviceSize size;
5057  VmaAllocation hAllocation;
5058  VmaSuballocationType type;
5059 };
5060 
5061 // Comparator for offsets.
5062 struct VmaSuballocationOffsetLess
5063 {
5064  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5065  {
5066  return lhs.offset < rhs.offset;
5067  }
5068 };
5069 struct VmaSuballocationOffsetGreater
5070 {
5071  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5072  {
5073  return lhs.offset > rhs.offset;
5074  }
5075 };
5076 
5077 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5078 
5079 // Cost of one additional allocation lost, as equivalent in bytes.
5080 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5081 
5082 /*
5083 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5084 
5085 If canMakeOtherLost was false:
5086 - item points to a FREE suballocation.
5087 - itemsToMakeLostCount is 0.
5088 
5089 If canMakeOtherLost was true:
5090 - item points to first of sequence of suballocations, which are either FREE,
5091  or point to VmaAllocations that can become lost.
5092 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5093  the requested allocation to succeed.
5094 */
5095 struct VmaAllocationRequest
5096 {
5097  VkDeviceSize offset;
5098  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5099  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5100  VmaSuballocationList::iterator item;
5101  size_t itemsToMakeLostCount;
5102  void* customData;
5103 
5104  VkDeviceSize CalcCost() const
5105  {
5106  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5107  }
5108 };
5109 
5110 /*
5111 Data structure used for bookkeeping of allocations and unused ranges of memory
5112 in a single VkDeviceMemory block.
5113 */
5114 class VmaBlockMetadata
5115 {
5116 public:
5117  VmaBlockMetadata(VmaAllocator hAllocator);
5118  virtual ~VmaBlockMetadata() { }
5119  virtual void Init(VkDeviceSize size) { m_Size = size; }
5120 
5121  // Validates all data structures inside this object. If not valid, returns false.
5122  virtual bool Validate() const = 0;
5123  VkDeviceSize GetSize() const { return m_Size; }
5124  virtual size_t GetAllocationCount() const = 0;
5125  virtual VkDeviceSize GetSumFreeSize() const = 0;
5126  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5127  // Returns true if this block is empty - contains only single free suballocation.
5128  virtual bool IsEmpty() const = 0;
5129 
5130  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5131  // Shouldn't modify blockCount.
5132  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5133 
5134 #if VMA_STATS_STRING_ENABLED
5135  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5136 #endif
5137 
5138  // Tries to find a place for suballocation with given parameters inside this block.
5139  // If succeeded, fills pAllocationRequest and returns true.
5140  // If failed, returns false.
5141  virtual bool CreateAllocationRequest(
5142  uint32_t currentFrameIndex,
5143  uint32_t frameInUseCount,
5144  VkDeviceSize bufferImageGranularity,
5145  VkDeviceSize allocSize,
5146  VkDeviceSize allocAlignment,
5147  bool upperAddress,
5148  VmaSuballocationType allocType,
5149  bool canMakeOtherLost,
5150  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5151  uint32_t strategy,
5152  VmaAllocationRequest* pAllocationRequest) = 0;
5153 
5154  virtual bool MakeRequestedAllocationsLost(
5155  uint32_t currentFrameIndex,
5156  uint32_t frameInUseCount,
5157  VmaAllocationRequest* pAllocationRequest) = 0;
5158 
5159  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5160 
5161  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5162 
5163  // Makes actual allocation based on request. Request must already be checked and valid.
5164  virtual void Alloc(
5165  const VmaAllocationRequest& request,
5166  VmaSuballocationType type,
5167  VkDeviceSize allocSize,
5168  bool upperAddress,
5169  VmaAllocation hAllocation) = 0;
5170 
5171  // Frees suballocation assigned to given memory region.
5172  virtual void Free(const VmaAllocation allocation) = 0;
5173  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5174 
5175  // Tries to resize (grow or shrink) space for given allocation, in place.
5176  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5177 
5178 protected:
5179  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5180 
5181 #if VMA_STATS_STRING_ENABLED
5182  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5183  VkDeviceSize unusedBytes,
5184  size_t allocationCount,
5185  size_t unusedRangeCount) const;
5186  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5187  VkDeviceSize offset,
5188  VmaAllocation hAllocation) const;
5189  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5190  VkDeviceSize offset,
5191  VkDeviceSize size) const;
5192  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5193 #endif
5194 
5195 private:
5196  VkDeviceSize m_Size;
5197  const VkAllocationCallbacks* m_pAllocationCallbacks;
5198 };
5199 
5200 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5201  VMA_ASSERT(0 && "Validation failed: " #cond); \
5202  return false; \
5203  } } while(false)
5204 
5205 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5206 {
5207  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5208 public:
5209  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5210  virtual ~VmaBlockMetadata_Generic();
5211  virtual void Init(VkDeviceSize size);
5212 
5213  virtual bool Validate() const;
5214  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5215  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5216  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5217  virtual bool IsEmpty() const;
5218 
5219  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5220  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5221 
5222 #if VMA_STATS_STRING_ENABLED
5223  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5224 #endif
5225 
5226  virtual bool CreateAllocationRequest(
5227  uint32_t currentFrameIndex,
5228  uint32_t frameInUseCount,
5229  VkDeviceSize bufferImageGranularity,
5230  VkDeviceSize allocSize,
5231  VkDeviceSize allocAlignment,
5232  bool upperAddress,
5233  VmaSuballocationType allocType,
5234  bool canMakeOtherLost,
5235  uint32_t strategy,
5236  VmaAllocationRequest* pAllocationRequest);
5237 
5238  virtual bool MakeRequestedAllocationsLost(
5239  uint32_t currentFrameIndex,
5240  uint32_t frameInUseCount,
5241  VmaAllocationRequest* pAllocationRequest);
5242 
5243  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5244 
5245  virtual VkResult CheckCorruption(const void* pBlockData);
5246 
5247  virtual void Alloc(
5248  const VmaAllocationRequest& request,
5249  VmaSuballocationType type,
5250  VkDeviceSize allocSize,
5251  bool upperAddress,
5252  VmaAllocation hAllocation);
5253 
5254  virtual void Free(const VmaAllocation allocation);
5255  virtual void FreeAtOffset(VkDeviceSize offset);
5256 
5257  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5258 
5260  // For defragmentation
5261 
5262  bool IsBufferImageGranularityConflictPossible(
5263  VkDeviceSize bufferImageGranularity,
5264  VmaSuballocationType& inOutPrevSuballocType) const;
5265 
5266 private:
5267  friend class VmaDefragmentationAlgorithm_Generic;
5268  friend class VmaDefragmentationAlgorithm_Fast;
5269 
5270  uint32_t m_FreeCount;
5271  VkDeviceSize m_SumFreeSize;
5272  VmaSuballocationList m_Suballocations;
5273  // Suballocations that are free and have size greater than certain threshold.
5274  // Sorted by size, ascending.
5275  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5276 
5277  bool ValidateFreeSuballocationList() const;
5278 
5279  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5280  // If yes, fills pOffset and returns true. If no, returns false.
5281  bool CheckAllocation(
5282  uint32_t currentFrameIndex,
5283  uint32_t frameInUseCount,
5284  VkDeviceSize bufferImageGranularity,
5285  VkDeviceSize allocSize,
5286  VkDeviceSize allocAlignment,
5287  VmaSuballocationType allocType,
5288  VmaSuballocationList::const_iterator suballocItem,
5289  bool canMakeOtherLost,
5290  VkDeviceSize* pOffset,
5291  size_t* itemsToMakeLostCount,
5292  VkDeviceSize* pSumFreeSize,
5293  VkDeviceSize* pSumItemSize) const;
5294  // Given free suballocation, it merges it with following one, which must also be free.
5295  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5296  // Releases given suballocation, making it free.
5297  // Merges it with adjacent free suballocations if applicable.
5298  // Returns iterator to new free suballocation at this place.
5299  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5300  // Given free suballocation, it inserts it into sorted list of
5301  // m_FreeSuballocationsBySize if it's suitable.
5302  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5303  // Given free suballocation, it removes it from sorted list of
5304  // m_FreeSuballocationsBySize if it's suitable.
5305  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5306 };
5307 
5308 /*
5309 Allocations and their references in internal data structure look like this:
5310 
5311 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5312 
5313  0 +-------+
5314  | |
5315  | |
5316  | |
5317  +-------+
5318  | Alloc | 1st[m_1stNullItemsBeginCount]
5319  +-------+
5320  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5321  +-------+
5322  | ... |
5323  +-------+
5324  | Alloc | 1st[1st.size() - 1]
5325  +-------+
5326  | |
5327  | |
5328  | |
5329 GetSize() +-------+
5330 
5331 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5332 
5333  0 +-------+
5334  | Alloc | 2nd[0]
5335  +-------+
5336  | Alloc | 2nd[1]
5337  +-------+
5338  | ... |
5339  +-------+
5340  | Alloc | 2nd[2nd.size() - 1]
5341  +-------+
5342  | |
5343  | |
5344  | |
5345  +-------+
5346  | Alloc | 1st[m_1stNullItemsBeginCount]
5347  +-------+
5348  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5349  +-------+
5350  | ... |
5351  +-------+
5352  | Alloc | 1st[1st.size() - 1]
5353  +-------+
5354  | |
5355 GetSize() +-------+
5356 
5357 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5358 
5359  0 +-------+
5360  | |
5361  | |
5362  | |
5363  +-------+
5364  | Alloc | 1st[m_1stNullItemsBeginCount]
5365  +-------+
5366  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5367  +-------+
5368  | ... |
5369  +-------+
5370  | Alloc | 1st[1st.size() - 1]
5371  +-------+
5372  | |
5373  | |
5374  | |
5375  +-------+
5376  | Alloc | 2nd[2nd.size() - 1]
5377  +-------+
5378  | ... |
5379  +-------+
5380  | Alloc | 2nd[1]
5381  +-------+
5382  | Alloc | 2nd[0]
5383 GetSize() +-------+
5384 
5385 */
5386 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5387 {
5388  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5389 public:
5390  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5391  virtual ~VmaBlockMetadata_Linear();
5392  virtual void Init(VkDeviceSize size);
5393 
5394  virtual bool Validate() const;
5395  virtual size_t GetAllocationCount() const;
5396  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5397  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5398  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5399 
5400  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5401  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5402 
5403 #if VMA_STATS_STRING_ENABLED
5404  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5405 #endif
5406 
5407  virtual bool CreateAllocationRequest(
5408  uint32_t currentFrameIndex,
5409  uint32_t frameInUseCount,
5410  VkDeviceSize bufferImageGranularity,
5411  VkDeviceSize allocSize,
5412  VkDeviceSize allocAlignment,
5413  bool upperAddress,
5414  VmaSuballocationType allocType,
5415  bool canMakeOtherLost,
5416  uint32_t strategy,
5417  VmaAllocationRequest* pAllocationRequest);
5418 
5419  virtual bool MakeRequestedAllocationsLost(
5420  uint32_t currentFrameIndex,
5421  uint32_t frameInUseCount,
5422  VmaAllocationRequest* pAllocationRequest);
5423 
5424  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5425 
5426  virtual VkResult CheckCorruption(const void* pBlockData);
5427 
5428  virtual void Alloc(
5429  const VmaAllocationRequest& request,
5430  VmaSuballocationType type,
5431  VkDeviceSize allocSize,
5432  bool upperAddress,
5433  VmaAllocation hAllocation);
5434 
5435  virtual void Free(const VmaAllocation allocation);
5436  virtual void FreeAtOffset(VkDeviceSize offset);
5437 
5438 private:
5439  /*
5440  There are two suballocation vectors, used in ping-pong way.
5441  The one with index m_1stVectorIndex is called 1st.
5442  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5443  2nd can be non-empty only when 1st is not empty.
5444  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5445  */
5446  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5447 
5448  enum SECOND_VECTOR_MODE
5449  {
5450  SECOND_VECTOR_EMPTY,
5451  /*
5452  Suballocations in 2nd vector are created later than the ones in 1st, but they
5453  all have smaller offset.
5454  */
5455  SECOND_VECTOR_RING_BUFFER,
5456  /*
5457  Suballocations in 2nd vector are upper side of double stack.
5458  They all have offsets higher than those in 1st vector.
5459  Top of this stack means smaller offsets, but higher indices in this vector.
5460  */
5461  SECOND_VECTOR_DOUBLE_STACK,
5462  };
5463 
5464  VkDeviceSize m_SumFreeSize;
5465  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5466  uint32_t m_1stVectorIndex;
5467  SECOND_VECTOR_MODE m_2ndVectorMode;
5468 
5469  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5470  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5471  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5472  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5473 
5474  // Number of items in 1st vector with hAllocation = null at the beginning.
5475  size_t m_1stNullItemsBeginCount;
5476  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5477  size_t m_1stNullItemsMiddleCount;
5478  // Number of items in 2nd vector with hAllocation = null.
5479  size_t m_2ndNullItemsCount;
5480 
5481  bool ShouldCompact1st() const;
5482  void CleanupAfterFree();
5483 };
5484 
5485 /*
5486 - GetSize() is the original size of allocated memory block.
5487 - m_UsableSize is this size aligned down to a power of two.
5488  All allocations and calculations happen relative to m_UsableSize.
5489 - GetUnusableSize() is the difference between them.
5490  It is repoted as separate, unused range, not available for allocations.
5491 
5492 Node at level 0 has size = m_UsableSize.
5493 Each next level contains nodes with size 2 times smaller than current level.
5494 m_LevelCount is the maximum number of levels to use in the current object.
5495 */
5496 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5497 {
5498  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5499 public:
5500  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5501  virtual ~VmaBlockMetadata_Buddy();
5502  virtual void Init(VkDeviceSize size);
5503 
5504  virtual bool Validate() const;
5505  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5506  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5507  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5508  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5509 
5510  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5511  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5512 
5513 #if VMA_STATS_STRING_ENABLED
5514  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5515 #endif
5516 
5517  virtual bool CreateAllocationRequest(
5518  uint32_t currentFrameIndex,
5519  uint32_t frameInUseCount,
5520  VkDeviceSize bufferImageGranularity,
5521  VkDeviceSize allocSize,
5522  VkDeviceSize allocAlignment,
5523  bool upperAddress,
5524  VmaSuballocationType allocType,
5525  bool canMakeOtherLost,
5526  uint32_t strategy,
5527  VmaAllocationRequest* pAllocationRequest);
5528 
5529  virtual bool MakeRequestedAllocationsLost(
5530  uint32_t currentFrameIndex,
5531  uint32_t frameInUseCount,
5532  VmaAllocationRequest* pAllocationRequest);
5533 
5534  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5535 
5536  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5537 
5538  virtual void Alloc(
5539  const VmaAllocationRequest& request,
5540  VmaSuballocationType type,
5541  VkDeviceSize allocSize,
5542  bool upperAddress,
5543  VmaAllocation hAllocation);
5544 
5545  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5546  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5547 
5548 private:
5549  static const VkDeviceSize MIN_NODE_SIZE = 32;
5550  static const size_t MAX_LEVELS = 30;
5551 
5552  struct ValidationContext
5553  {
5554  size_t calculatedAllocationCount;
5555  size_t calculatedFreeCount;
5556  VkDeviceSize calculatedSumFreeSize;
5557 
5558  ValidationContext() :
5559  calculatedAllocationCount(0),
5560  calculatedFreeCount(0),
5561  calculatedSumFreeSize(0) { }
5562  };
5563 
5564  struct Node
5565  {
5566  VkDeviceSize offset;
5567  enum TYPE
5568  {
5569  TYPE_FREE,
5570  TYPE_ALLOCATION,
5571  TYPE_SPLIT,
5572  TYPE_COUNT
5573  } type;
5574  Node* parent;
5575  Node* buddy;
5576 
5577  union
5578  {
5579  struct
5580  {
5581  Node* prev;
5582  Node* next;
5583  } free;
5584  struct
5585  {
5586  VmaAllocation alloc;
5587  } allocation;
5588  struct
5589  {
5590  Node* leftChild;
5591  } split;
5592  };
5593  };
5594 
5595  // Size of the memory block aligned down to a power of two.
5596  VkDeviceSize m_UsableSize;
5597  uint32_t m_LevelCount;
5598 
5599  Node* m_Root;
5600  struct {
5601  Node* front;
5602  Node* back;
5603  } m_FreeList[MAX_LEVELS];
5604  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5605  size_t m_AllocationCount;
5606  // Number of nodes in the tree with type == TYPE_FREE.
5607  size_t m_FreeCount;
5608  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5609  VkDeviceSize m_SumFreeSize;
5610 
5611  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5612  void DeleteNode(Node* node);
5613  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5614  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5615  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5616  // Alloc passed just for validation. Can be null.
5617  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5618  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5619  // Adds node to the front of FreeList at given level.
5620  // node->type must be FREE.
5621  // node->free.prev, next can be undefined.
5622  void AddToFreeListFront(uint32_t level, Node* node);
5623  // Removes node from FreeList at given level.
5624  // node->type must be FREE.
5625  // node->free.prev, next stay untouched.
5626  void RemoveFromFreeList(uint32_t level, Node* node);
5627 
5628 #if VMA_STATS_STRING_ENABLED
5629  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5630 #endif
5631 };
5632 
5633 /*
5634 Represents a single block of device memory (`VkDeviceMemory`) with all the
5635 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5636 
5637 Thread-safety: This class must be externally synchronized.
5638 */
5639 class VmaDeviceMemoryBlock
5640 {
5641  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5642 public:
5643  VmaBlockMetadata* m_pMetadata;
5644 
5645  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5646 
5647  ~VmaDeviceMemoryBlock()
5648  {
5649  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5650  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5651  }
5652 
5653  // Always call after construction.
5654  void Init(
5655  VmaAllocator hAllocator,
5656  uint32_t newMemoryTypeIndex,
5657  VkDeviceMemory newMemory,
5658  VkDeviceSize newSize,
5659  uint32_t id,
5660  uint32_t algorithm);
5661  // Always call before destruction.
5662  void Destroy(VmaAllocator allocator);
5663 
5664  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5665  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5666  uint32_t GetId() const { return m_Id; }
5667  void* GetMappedData() const { return m_pMappedData; }
5668 
5669  // Validates all data structures inside this object. If not valid, returns false.
5670  bool Validate() const;
5671 
5672  VkResult CheckCorruption(VmaAllocator hAllocator);
5673 
5674  // ppData can be null.
5675  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5676  void Unmap(VmaAllocator hAllocator, uint32_t count);
5677 
5678  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5679  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5680 
5681  VkResult BindBufferMemory(
5682  const VmaAllocator hAllocator,
5683  const VmaAllocation hAllocation,
5684  VkBuffer hBuffer);
5685  VkResult BindImageMemory(
5686  const VmaAllocator hAllocator,
5687  const VmaAllocation hAllocation,
5688  VkImage hImage);
5689 
5690 private:
5691  uint32_t m_MemoryTypeIndex;
5692  uint32_t m_Id;
5693  VkDeviceMemory m_hMemory;
5694 
5695  /*
5696  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5697  Also protects m_MapCount, m_pMappedData.
5698  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5699  */
5700  VMA_MUTEX m_Mutex;
5701  uint32_t m_MapCount;
5702  void* m_pMappedData;
5703 };
5704 
5705 struct VmaPointerLess
5706 {
5707  bool operator()(const void* lhs, const void* rhs) const
5708  {
5709  return lhs < rhs;
5710  }
5711 };
5712 
5713 struct VmaDefragmentationMove
5714 {
5715  size_t srcBlockIndex;
5716  size_t dstBlockIndex;
5717  VkDeviceSize srcOffset;
5718  VkDeviceSize dstOffset;
5719  VkDeviceSize size;
5720 };
5721 
5722 class VmaDefragmentationAlgorithm;
5723 
5724 /*
5725 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5726 Vulkan memory type.
5727 
5728 Synchronized internally with a mutex.
5729 */
5730 struct VmaBlockVector
5731 {
5732  VMA_CLASS_NO_COPY(VmaBlockVector)
5733 public:
5734  VmaBlockVector(
5735  VmaAllocator hAllocator,
5736  uint32_t memoryTypeIndex,
5737  VkDeviceSize preferredBlockSize,
5738  size_t minBlockCount,
5739  size_t maxBlockCount,
5740  VkDeviceSize bufferImageGranularity,
5741  uint32_t frameInUseCount,
5742  bool isCustomPool,
5743  bool explicitBlockSize,
5744  uint32_t algorithm);
5745  ~VmaBlockVector();
5746 
5747  VkResult CreateMinBlocks();
5748 
5749  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5750  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5751  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5752  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5753  uint32_t GetAlgorithm() const { return m_Algorithm; }
5754 
5755  void GetPoolStats(VmaPoolStats* pStats);
5756 
5757  bool IsEmpty() const { return m_Blocks.empty(); }
5758  bool IsCorruptionDetectionEnabled() const;
5759 
5760  VkResult Allocate(
5761  VmaPool hCurrentPool,
5762  uint32_t currentFrameIndex,
5763  VkDeviceSize size,
5764  VkDeviceSize alignment,
5765  const VmaAllocationCreateInfo& createInfo,
5766  VmaSuballocationType suballocType,
5767  VmaAllocation* pAllocation);
5768 
5769  void Free(
5770  VmaAllocation hAllocation);
5771 
5772  // Adds statistics of this BlockVector to pStats.
5773  void AddStats(VmaStats* pStats);
5774 
5775 #if VMA_STATS_STRING_ENABLED
5776  void PrintDetailedMap(class VmaJsonWriter& json);
5777 #endif
5778 
5779  void MakePoolAllocationsLost(
5780  uint32_t currentFrameIndex,
5781  size_t* pLostAllocationCount);
5782  VkResult CheckCorruption();
5783 
5784  // Saves results in pCtx->res.
5785  void Defragment(
5786  class VmaBlockVectorDefragmentationContext* pCtx,
5787  VmaDefragmentationStats* pStats,
5788  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5789  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5790  VkCommandBuffer commandBuffer);
5791  void DefragmentationEnd(
5792  class VmaBlockVectorDefragmentationContext* pCtx,
5793  VmaDefragmentationStats* pStats);
5794 
5796  // To be used only while the m_Mutex is locked. Used during defragmentation.
5797 
5798  size_t GetBlockCount() const { return m_Blocks.size(); }
5799  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5800  size_t CalcAllocationCount() const;
5801  bool IsBufferImageGranularityConflictPossible() const;
5802 
5803 private:
5804  friend class VmaDefragmentationAlgorithm_Generic;
5805 
5806  const VmaAllocator m_hAllocator;
5807  const uint32_t m_MemoryTypeIndex;
5808  const VkDeviceSize m_PreferredBlockSize;
5809  const size_t m_MinBlockCount;
5810  const size_t m_MaxBlockCount;
5811  const VkDeviceSize m_BufferImageGranularity;
5812  const uint32_t m_FrameInUseCount;
5813  const bool m_IsCustomPool;
5814  const bool m_ExplicitBlockSize;
5815  const uint32_t m_Algorithm;
5816  /* There can be at most one allocation that is completely empty - a
5817  hysteresis to avoid pessimistic case of alternating creation and destruction
5818  of a VkDeviceMemory. */
5819  bool m_HasEmptyBlock;
5820  VMA_RW_MUTEX m_Mutex;
5821  // Incrementally sorted by sumFreeSize, ascending.
5822  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5823  uint32_t m_NextBlockId;
5824 
5825  VkDeviceSize CalcMaxBlockSize() const;
5826 
5827  // Finds and removes given block from vector.
5828  void Remove(VmaDeviceMemoryBlock* pBlock);
5829 
5830  // Performs single step in sorting m_Blocks. They may not be fully sorted
5831  // after this call.
5832  void IncrementallySortBlocks();
5833 
5834  // To be used only without CAN_MAKE_OTHER_LOST flag.
5835  VkResult AllocateFromBlock(
5836  VmaDeviceMemoryBlock* pBlock,
5837  VmaPool hCurrentPool,
5838  uint32_t currentFrameIndex,
5839  VkDeviceSize size,
5840  VkDeviceSize alignment,
5841  VmaAllocationCreateFlags allocFlags,
5842  void* pUserData,
5843  VmaSuballocationType suballocType,
5844  uint32_t strategy,
5845  VmaAllocation* pAllocation);
5846 
5847  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5848 
5849  // Saves result to pCtx->res.
5850  void ApplyDefragmentationMovesCpu(
5851  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5852  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5853  // Saves result to pCtx->res.
5854  void ApplyDefragmentationMovesGpu(
5855  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5856  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5857  VkCommandBuffer commandBuffer);
5858 
5859  /*
5860  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5861  - updated with new data.
5862  */
5863  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5864 };
5865 
5866 struct VmaPool_T
5867 {
5868  VMA_CLASS_NO_COPY(VmaPool_T)
5869 public:
5870  VmaBlockVector m_BlockVector;
5871 
5872  VmaPool_T(
5873  VmaAllocator hAllocator,
5874  const VmaPoolCreateInfo& createInfo,
5875  VkDeviceSize preferredBlockSize);
5876  ~VmaPool_T();
5877 
5878  uint32_t GetId() const { return m_Id; }
5879  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5880 
5881 #if VMA_STATS_STRING_ENABLED
5882  //void PrintDetailedMap(class VmaStringBuilder& sb);
5883 #endif
5884 
5885 private:
5886  uint32_t m_Id;
5887 };
5888 
5889 /*
5890 Performs defragmentation:
5891 
5892 - Updates `pBlockVector->m_pMetadata`.
5893 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5894 - Does not move actual data, only returns requested moves as `moves`.
5895 */
5896 class VmaDefragmentationAlgorithm
5897 {
5898  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5899 public:
5900  VmaDefragmentationAlgorithm(
5901  VmaAllocator hAllocator,
5902  VmaBlockVector* pBlockVector,
5903  uint32_t currentFrameIndex) :
5904  m_hAllocator(hAllocator),
5905  m_pBlockVector(pBlockVector),
5906  m_CurrentFrameIndex(currentFrameIndex)
5907  {
5908  }
5909  virtual ~VmaDefragmentationAlgorithm()
5910  {
5911  }
5912 
5913  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5914  virtual void AddAll() = 0;
5915 
5916  virtual VkResult Defragment(
5917  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5918  VkDeviceSize maxBytesToMove,
5919  uint32_t maxAllocationsToMove) = 0;
5920 
5921  virtual VkDeviceSize GetBytesMoved() const = 0;
5922  virtual uint32_t GetAllocationsMoved() const = 0;
5923 
5924 protected:
5925  VmaAllocator const m_hAllocator;
5926  VmaBlockVector* const m_pBlockVector;
5927  const uint32_t m_CurrentFrameIndex;
5928 
5929  struct AllocationInfo
5930  {
5931  VmaAllocation m_hAllocation;
5932  VkBool32* m_pChanged;
5933 
5934  AllocationInfo() :
5935  m_hAllocation(VK_NULL_HANDLE),
5936  m_pChanged(VMA_NULL)
5937  {
5938  }
5939  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5940  m_hAllocation(hAlloc),
5941  m_pChanged(pChanged)
5942  {
5943  }
5944  };
5945 };
5946 
5947 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5948 {
5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
5950 public:
5951  VmaDefragmentationAlgorithm_Generic(
5952  VmaAllocator hAllocator,
5953  VmaBlockVector* pBlockVector,
5954  uint32_t currentFrameIndex,
5955  bool overlappingMoveSupported);
5956  virtual ~VmaDefragmentationAlgorithm_Generic();
5957 
5958  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5959  virtual void AddAll() { m_AllAllocations = true; }
5960 
5961  virtual VkResult Defragment(
5962  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5963  VkDeviceSize maxBytesToMove,
5964  uint32_t maxAllocationsToMove);
5965 
5966  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5967  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5968 
5969 private:
5970  uint32_t m_AllocationCount;
5971  bool m_AllAllocations;
5972 
5973  VkDeviceSize m_BytesMoved;
5974  uint32_t m_AllocationsMoved;
5975 
5976  struct AllocationInfoSizeGreater
5977  {
5978  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5979  {
5980  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5981  }
5982  };
5983 
5984  struct AllocationInfoOffsetGreater
5985  {
5986  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5987  {
5988  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
5989  }
5990  };
5991 
5992  struct BlockInfo
5993  {
5994  size_t m_OriginalBlockIndex;
5995  VmaDeviceMemoryBlock* m_pBlock;
5996  bool m_HasNonMovableAllocations;
5997  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5998 
5999  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6000  m_OriginalBlockIndex(SIZE_MAX),
6001  m_pBlock(VMA_NULL),
6002  m_HasNonMovableAllocations(true),
6003  m_Allocations(pAllocationCallbacks)
6004  {
6005  }
6006 
6007  void CalcHasNonMovableAllocations()
6008  {
6009  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6010  const size_t defragmentAllocCount = m_Allocations.size();
6011  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6012  }
6013 
6014  void SortAllocationsBySizeDescending()
6015  {
6016  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6017  }
6018 
6019  void SortAllocationsByOffsetDescending()
6020  {
6021  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6022  }
6023  };
6024 
6025  struct BlockPointerLess
6026  {
6027  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6028  {
6029  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6030  }
6031  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6032  {
6033  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6034  }
6035  };
6036 
6037  // 1. Blocks with some non-movable allocations go first.
6038  // 2. Blocks with smaller sumFreeSize go first.
6039  struct BlockInfoCompareMoveDestination
6040  {
6041  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6042  {
6043  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6044  {
6045  return true;
6046  }
6047  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6048  {
6049  return false;
6050  }
6051  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6052  {
6053  return true;
6054  }
6055  return false;
6056  }
6057  };
6058 
6059  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6060  BlockInfoVector m_Blocks;
6061 
6062  VkResult DefragmentRound(
6063  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6064  VkDeviceSize maxBytesToMove,
6065  uint32_t maxAllocationsToMove);
6066 
6067  size_t CalcBlocksWithNonMovableCount() const;
6068 
6069  static bool MoveMakesSense(
6070  size_t dstBlockIndex, VkDeviceSize dstOffset,
6071  size_t srcBlockIndex, VkDeviceSize srcOffset);
6072 };
6073 
6074 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6075 {
6076  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6077 public:
6078  VmaDefragmentationAlgorithm_Fast(
6079  VmaAllocator hAllocator,
6080  VmaBlockVector* pBlockVector,
6081  uint32_t currentFrameIndex,
6082  bool overlappingMoveSupported);
6083  virtual ~VmaDefragmentationAlgorithm_Fast();
6084 
6085  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6086  virtual void AddAll() { m_AllAllocations = true; }
6087 
6088  virtual VkResult Defragment(
6089  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6090  VkDeviceSize maxBytesToMove,
6091  uint32_t maxAllocationsToMove);
6092 
6093  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6094  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6095 
6096 private:
6097  struct BlockInfo
6098  {
6099  size_t origBlockIndex;
6100  };
6101 
6102  class FreeSpaceDatabase
6103  {
6104  public:
6105  FreeSpaceDatabase()
6106  {
6107  FreeSpace s = {};
6108  s.blockInfoIndex = SIZE_MAX;
6109  for(size_t i = 0; i < MAX_COUNT; ++i)
6110  {
6111  m_FreeSpaces[i] = s;
6112  }
6113  }
6114 
6115  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6116  {
6117  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6118  {
6119  return;
6120  }
6121 
6122  // Find first invalid or the smallest structure.
6123  size_t bestIndex = SIZE_MAX;
6124  for(size_t i = 0; i < MAX_COUNT; ++i)
6125  {
6126  // Empty structure.
6127  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6128  {
6129  bestIndex = i;
6130  break;
6131  }
6132  if(m_FreeSpaces[i].size < size &&
6133  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6134  {
6135  bestIndex = i;
6136  }
6137  }
6138 
6139  if(bestIndex != SIZE_MAX)
6140  {
6141  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6142  m_FreeSpaces[bestIndex].offset = offset;
6143  m_FreeSpaces[bestIndex].size = size;
6144  }
6145  }
6146 
6147  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6148  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6149  {
6150  size_t bestIndex = SIZE_MAX;
6151  VkDeviceSize bestFreeSpaceAfter = 0;
6152  for(size_t i = 0; i < MAX_COUNT; ++i)
6153  {
6154  // Structure is valid.
6155  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6156  {
6157  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6158  // Allocation fits into this structure.
6159  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6160  {
6161  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6162  (dstOffset + size);
6163  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6164  {
6165  bestIndex = i;
6166  bestFreeSpaceAfter = freeSpaceAfter;
6167  }
6168  }
6169  }
6170  }
6171 
6172  if(bestIndex != SIZE_MAX)
6173  {
6174  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6175  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6176 
6177  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6178  {
6179  // Leave this structure for remaining empty space.
6180  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6181  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6182  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6183  }
6184  else
6185  {
6186  // This structure becomes invalid.
6187  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6188  }
6189 
6190  return true;
6191  }
6192 
6193  return false;
6194  }
6195 
6196  private:
6197  static const size_t MAX_COUNT = 4;
6198 
6199  struct FreeSpace
6200  {
6201  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6202  VkDeviceSize offset;
6203  VkDeviceSize size;
6204  } m_FreeSpaces[MAX_COUNT];
6205  };
6206 
6207  const bool m_OverlappingMoveSupported;
6208 
6209  uint32_t m_AllocationCount;
6210  bool m_AllAllocations;
6211 
6212  VkDeviceSize m_BytesMoved;
6213  uint32_t m_AllocationsMoved;
6214 
6215  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6216 
6217  void PreprocessMetadata();
6218  void PostprocessMetadata();
6219  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6220 };
6221 
6222 struct VmaBlockDefragmentationContext
6223 {
6224 private:
6225  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6226 public:
6227  enum BLOCK_FLAG
6228  {
6229  BLOCK_FLAG_USED = 0x00000001,
6230  };
6231  uint32_t flags;
6232  VkBuffer hBuffer;
6233 
6234  VmaBlockDefragmentationContext() :
6235  flags(0),
6236  hBuffer(VK_NULL_HANDLE)
6237  {
6238  }
6239 };
6240 
6241 class VmaBlockVectorDefragmentationContext
6242 {
6243  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6244 public:
6245  VkResult res;
6246  bool mutexLocked;
6247  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6248 
6249  VmaBlockVectorDefragmentationContext(
6250  VmaAllocator hAllocator,
6251  VmaPool hCustomPool, // Optional.
6252  VmaBlockVector* pBlockVector,
6253  uint32_t currFrameIndex,
6254  uint32_t flags);
6255  ~VmaBlockVectorDefragmentationContext();
6256 
6257  VmaPool GetCustomPool() const { return m_hCustomPool; }
6258  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6259  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6260 
6261  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6262  void AddAll() { m_AllAllocations = true; }
6263 
6264  void Begin(bool overlappingMoveSupported);
6265 
6266 private:
6267  const VmaAllocator m_hAllocator;
6268  // Null if not from custom pool.
6269  const VmaPool m_hCustomPool;
6270  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6271  VmaBlockVector* const m_pBlockVector;
6272  const uint32_t m_CurrFrameIndex;
6273  const uint32_t m_AlgorithmFlags;
6274  // Owner of this object.
6275  VmaDefragmentationAlgorithm* m_pAlgorithm;
6276 
6277  struct AllocInfo
6278  {
6279  VmaAllocation hAlloc;
6280  VkBool32* pChanged;
6281  };
6282  // Used between constructor and Begin.
6283  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6284  bool m_AllAllocations;
6285 };
6286 
6287 struct VmaDefragmentationContext_T
6288 {
6289 private:
6290  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6291 public:
6292  VmaDefragmentationContext_T(
6293  VmaAllocator hAllocator,
6294  uint32_t currFrameIndex,
6295  uint32_t flags,
6296  VmaDefragmentationStats* pStats);
6297  ~VmaDefragmentationContext_T();
6298 
6299  void AddPools(uint32_t poolCount, VmaPool* pPools);
6300  void AddAllocations(
6301  uint32_t allocationCount,
6302  VmaAllocation* pAllocations,
6303  VkBool32* pAllocationsChanged);
6304 
6305  /*
6306  Returns:
6307  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6308  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6309  - Negative value if error occured and object can be destroyed immediately.
6310  */
6311  VkResult Defragment(
6312  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6313  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6314  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6315 
6316 private:
6317  const VmaAllocator m_hAllocator;
6318  const uint32_t m_CurrFrameIndex;
6319  const uint32_t m_Flags;
6320  VmaDefragmentationStats* const m_pStats;
6321  // Owner of these objects.
6322  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6323  // Owner of these objects.
6324  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6325 };
6326 
6327 #if VMA_RECORDING_ENABLED
6328 
6329 class VmaRecorder
6330 {
6331 public:
6332  VmaRecorder();
6333  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6334  void WriteConfiguration(
6335  const VkPhysicalDeviceProperties& devProps,
6336  const VkPhysicalDeviceMemoryProperties& memProps,
6337  bool dedicatedAllocationExtensionEnabled);
6338  ~VmaRecorder();
6339 
6340  void RecordCreateAllocator(uint32_t frameIndex);
6341  void RecordDestroyAllocator(uint32_t frameIndex);
6342  void RecordCreatePool(uint32_t frameIndex,
6343  const VmaPoolCreateInfo& createInfo,
6344  VmaPool pool);
6345  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6346  void RecordAllocateMemory(uint32_t frameIndex,
6347  const VkMemoryRequirements& vkMemReq,
6348  const VmaAllocationCreateInfo& createInfo,
6349  VmaAllocation allocation);
6350  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6351  const VkMemoryRequirements& vkMemReq,
6352  bool requiresDedicatedAllocation,
6353  bool prefersDedicatedAllocation,
6354  const VmaAllocationCreateInfo& createInfo,
6355  VmaAllocation allocation);
6356  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6357  const VkMemoryRequirements& vkMemReq,
6358  bool requiresDedicatedAllocation,
6359  bool prefersDedicatedAllocation,
6360  const VmaAllocationCreateInfo& createInfo,
6361  VmaAllocation allocation);
6362  void RecordFreeMemory(uint32_t frameIndex,
6363  VmaAllocation allocation);
6364  void RecordResizeAllocation(
6365  uint32_t frameIndex,
6366  VmaAllocation allocation,
6367  VkDeviceSize newSize);
6368  void RecordSetAllocationUserData(uint32_t frameIndex,
6369  VmaAllocation allocation,
6370  const void* pUserData);
6371  void RecordCreateLostAllocation(uint32_t frameIndex,
6372  VmaAllocation allocation);
6373  void RecordMapMemory(uint32_t frameIndex,
6374  VmaAllocation allocation);
6375  void RecordUnmapMemory(uint32_t frameIndex,
6376  VmaAllocation allocation);
6377  void RecordFlushAllocation(uint32_t frameIndex,
6378  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6379  void RecordInvalidateAllocation(uint32_t frameIndex,
6380  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6381  void RecordCreateBuffer(uint32_t frameIndex,
6382  const VkBufferCreateInfo& bufCreateInfo,
6383  const VmaAllocationCreateInfo& allocCreateInfo,
6384  VmaAllocation allocation);
6385  void RecordCreateImage(uint32_t frameIndex,
6386  const VkImageCreateInfo& imageCreateInfo,
6387  const VmaAllocationCreateInfo& allocCreateInfo,
6388  VmaAllocation allocation);
6389  void RecordDestroyBuffer(uint32_t frameIndex,
6390  VmaAllocation allocation);
6391  void RecordDestroyImage(uint32_t frameIndex,
6392  VmaAllocation allocation);
6393  void RecordTouchAllocation(uint32_t frameIndex,
6394  VmaAllocation allocation);
6395  void RecordGetAllocationInfo(uint32_t frameIndex,
6396  VmaAllocation allocation);
6397  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6398  VmaPool pool);
6399  void RecordDefragmentationBegin(uint32_t frameIndex,
6400  const VmaDefragmentationInfo2& info,
6402  void RecordDefragmentationEnd(uint32_t frameIndex,
6404 
6405 private:
6406  struct CallParams
6407  {
6408  uint32_t threadId;
6409  double time;
6410  };
6411 
6412  class UserDataString
6413  {
6414  public:
6415  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6416  const char* GetString() const { return m_Str; }
6417 
6418  private:
6419  char m_PtrStr[17];
6420  const char* m_Str;
6421  };
6422 
6423  bool m_UseMutex;
6424  VmaRecordFlags m_Flags;
6425  FILE* m_File;
6426  VMA_MUTEX m_FileMutex;
6427  int64_t m_Freq;
6428  int64_t m_StartCounter;
6429 
6430  void GetBasicParams(CallParams& outParams);
6431 
6432  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6433  template<typename T>
6434  void PrintPointerList(uint64_t count, const T* pItems)
6435  {
6436  if(count)
6437  {
6438  fprintf(m_File, "%p", pItems[0]);
6439  for(uint64_t i = 1; i < count; ++i)
6440  {
6441  fprintf(m_File, " %p", pItems[i]);
6442  }
6443  }
6444  }
6445 
6446  void Flush();
6447 };
6448 
6449 #endif // #if VMA_RECORDING_ENABLED
6450 
6451 // Main allocator object.
6452 struct VmaAllocator_T
6453 {
6454  VMA_CLASS_NO_COPY(VmaAllocator_T)
6455 public:
6456  bool m_UseMutex;
6457  bool m_UseKhrDedicatedAllocation;
6458  VkDevice m_hDevice;
6459  bool m_AllocationCallbacksSpecified;
6460  VkAllocationCallbacks m_AllocationCallbacks;
6461  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6462 
6463  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6464  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6465  VMA_MUTEX m_HeapSizeLimitMutex;
6466 
6467  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6468  VkPhysicalDeviceMemoryProperties m_MemProps;
6469 
6470  // Default pools.
6471  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6472 
6473  // Each vector is sorted by memory (handle value).
6474  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6475  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6476  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6477 
6478  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6479  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6480  ~VmaAllocator_T();
6481 
6482  const VkAllocationCallbacks* GetAllocationCallbacks() const
6483  {
6484  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6485  }
6486  const VmaVulkanFunctions& GetVulkanFunctions() const
6487  {
6488  return m_VulkanFunctions;
6489  }
6490 
6491  VkDeviceSize GetBufferImageGranularity() const
6492  {
6493  return VMA_MAX(
6494  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6495  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6496  }
6497 
6498  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6499  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6500 
6501  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6502  {
6503  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6504  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6505  }
6506  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6507  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6508  {
6509  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6510  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6511  }
6512  // Minimum alignment for all allocations in specific memory type.
6513  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6514  {
6515  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6516  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6517  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6518  }
6519 
6520  bool IsIntegratedGpu() const
6521  {
6522  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6523  }
6524 
6525 #if VMA_RECORDING_ENABLED
6526  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6527 #endif
6528 
6529  void GetBufferMemoryRequirements(
6530  VkBuffer hBuffer,
6531  VkMemoryRequirements& memReq,
6532  bool& requiresDedicatedAllocation,
6533  bool& prefersDedicatedAllocation) const;
6534  void GetImageMemoryRequirements(
6535  VkImage hImage,
6536  VkMemoryRequirements& memReq,
6537  bool& requiresDedicatedAllocation,
6538  bool& prefersDedicatedAllocation) const;
6539 
6540  // Main allocation function.
6541  VkResult AllocateMemory(
6542  const VkMemoryRequirements& vkMemReq,
6543  bool requiresDedicatedAllocation,
6544  bool prefersDedicatedAllocation,
6545  VkBuffer dedicatedBuffer,
6546  VkImage dedicatedImage,
6547  const VmaAllocationCreateInfo& createInfo,
6548  VmaSuballocationType suballocType,
6549  VmaAllocation* pAllocation);
6550 
6551  // Main deallocation function.
6552  void FreeMemory(const VmaAllocation allocation);
6553 
6554  VkResult ResizeAllocation(
6555  const VmaAllocation alloc,
6556  VkDeviceSize newSize);
6557 
6558  void CalculateStats(VmaStats* pStats);
6559 
6560 #if VMA_STATS_STRING_ENABLED
6561  void PrintDetailedMap(class VmaJsonWriter& json);
6562 #endif
6563 
6564  VkResult DefragmentationBegin(
6565  const VmaDefragmentationInfo2& info,
6566  VmaDefragmentationStats* pStats,
6567  VmaDefragmentationContext* pContext);
6568  VkResult DefragmentationEnd(
6569  VmaDefragmentationContext context);
6570 
6571  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6572  bool TouchAllocation(VmaAllocation hAllocation);
6573 
6574  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6575  void DestroyPool(VmaPool pool);
6576  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6577 
6578  void SetCurrentFrameIndex(uint32_t frameIndex);
6579  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6580 
6581  void MakePoolAllocationsLost(
6582  VmaPool hPool,
6583  size_t* pLostAllocationCount);
6584  VkResult CheckPoolCorruption(VmaPool hPool);
6585  VkResult CheckCorruption(uint32_t memoryTypeBits);
6586 
6587  void CreateLostAllocation(VmaAllocation* pAllocation);
6588 
6589  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6590  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6591 
6592  VkResult Map(VmaAllocation hAllocation, void** ppData);
6593  void Unmap(VmaAllocation hAllocation);
6594 
6595  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6596  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6597 
6598  void FlushOrInvalidateAllocation(
6599  VmaAllocation hAllocation,
6600  VkDeviceSize offset, VkDeviceSize size,
6601  VMA_CACHE_OPERATION op);
6602 
6603  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6604 
6605 private:
6606  VkDeviceSize m_PreferredLargeHeapBlockSize;
6607 
6608  VkPhysicalDevice m_PhysicalDevice;
6609  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6610 
6611  VMA_RW_MUTEX m_PoolsMutex;
6612  // Protected by m_PoolsMutex. Sorted by pointer value.
6613  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6614  uint32_t m_NextPoolId;
6615 
6616  VmaVulkanFunctions m_VulkanFunctions;
6617 
6618 #if VMA_RECORDING_ENABLED
6619  VmaRecorder* m_pRecorder;
6620 #endif
6621 
6622  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6623 
6624  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6625 
6626  VkResult AllocateMemoryOfType(
6627  VkDeviceSize size,
6628  VkDeviceSize alignment,
6629  bool dedicatedAllocation,
6630  VkBuffer dedicatedBuffer,
6631  VkImage dedicatedImage,
6632  const VmaAllocationCreateInfo& createInfo,
6633  uint32_t memTypeIndex,
6634  VmaSuballocationType suballocType,
6635  VmaAllocation* pAllocation);
6636 
6637  // Allocates and registers new VkDeviceMemory specifically for single allocation.
6638  VkResult AllocateDedicatedMemory(
6639  VkDeviceSize size,
6640  VmaSuballocationType suballocType,
6641  uint32_t memTypeIndex,
6642  bool map,
6643  bool isUserDataString,
6644  void* pUserData,
6645  VkBuffer dedicatedBuffer,
6646  VkImage dedicatedImage,
6647  VmaAllocation* pAllocation);
6648 
6649  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6650  void FreeDedicatedMemory(VmaAllocation allocation);
6651 };
6652 
6654 // Memory allocation #2 after VmaAllocator_T definition
6655 
6656 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6657 {
6658  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6659 }
6660 
6661 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6662 {
6663  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6664 }
6665 
6666 template<typename T>
6667 static T* VmaAllocate(VmaAllocator hAllocator)
6668 {
6669  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6670 }
6671 
6672 template<typename T>
6673 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6674 {
6675  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6676 }
6677 
6678 template<typename T>
6679 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6680 {
6681  if(ptr != VMA_NULL)
6682  {
6683  ptr->~T();
6684  VmaFree(hAllocator, ptr);
6685  }
6686 }
6687 
6688 template<typename T>
6689 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6690 {
6691  if(ptr != VMA_NULL)
6692  {
6693  for(size_t i = count; i--; )
6694  ptr[i].~T();
6695  VmaFree(hAllocator, ptr);
6696  }
6697 }
6698 
6700 // VmaStringBuilder
6701 
6702 #if VMA_STATS_STRING_ENABLED
6703 
6704 class VmaStringBuilder
6705 {
6706 public:
6707  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6708  size_t GetLength() const { return m_Data.size(); }
6709  const char* GetData() const { return m_Data.data(); }
6710 
6711  void Add(char ch) { m_Data.push_back(ch); }
6712  void Add(const char* pStr);
6713  void AddNewLine() { Add('\n'); }
6714  void AddNumber(uint32_t num);
6715  void AddNumber(uint64_t num);
6716  void AddPointer(const void* ptr);
6717 
6718 private:
6719  VmaVector< char, VmaStlAllocator<char> > m_Data;
6720 };
6721 
6722 void VmaStringBuilder::Add(const char* pStr)
6723 {
6724  const size_t strLen = strlen(pStr);
6725  if(strLen > 0)
6726  {
6727  const size_t oldCount = m_Data.size();
6728  m_Data.resize(oldCount + strLen);
6729  memcpy(m_Data.data() + oldCount, pStr, strLen);
6730  }
6731 }
6732 
6733 void VmaStringBuilder::AddNumber(uint32_t num)
6734 {
6735  char buf[11];
6736  VmaUint32ToStr(buf, sizeof(buf), num);
6737  Add(buf);
6738 }
6739 
6740 void VmaStringBuilder::AddNumber(uint64_t num)
6741 {
6742  char buf[21];
6743  VmaUint64ToStr(buf, sizeof(buf), num);
6744  Add(buf);
6745 }
6746 
6747 void VmaStringBuilder::AddPointer(const void* ptr)
6748 {
6749  char buf[21];
6750  VmaPtrToStr(buf, sizeof(buf), ptr);
6751  Add(buf);
6752 }
6753 
6754 #endif // #if VMA_STATS_STRING_ENABLED
6755 
6757 // VmaJsonWriter
6758 
6759 #if VMA_STATS_STRING_ENABLED
6760 
6761 class VmaJsonWriter
6762 {
6763  VMA_CLASS_NO_COPY(VmaJsonWriter)
6764 public:
6765  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6766  ~VmaJsonWriter();
6767 
6768  void BeginObject(bool singleLine = false);
6769  void EndObject();
6770 
6771  void BeginArray(bool singleLine = false);
6772  void EndArray();
6773 
6774  void WriteString(const char* pStr);
6775  void BeginString(const char* pStr = VMA_NULL);
6776  void ContinueString(const char* pStr);
6777  void ContinueString(uint32_t n);
6778  void ContinueString(uint64_t n);
6779  void ContinueString_Pointer(const void* ptr);
6780  void EndString(const char* pStr = VMA_NULL);
6781 
6782  void WriteNumber(uint32_t n);
6783  void WriteNumber(uint64_t n);
6784  void WriteBool(bool b);
6785  void WriteNull();
6786 
6787 private:
6788  static const char* const INDENT;
6789 
6790  enum COLLECTION_TYPE
6791  {
6792  COLLECTION_TYPE_OBJECT,
6793  COLLECTION_TYPE_ARRAY,
6794  };
6795  struct StackItem
6796  {
6797  COLLECTION_TYPE type;
6798  uint32_t valueCount;
6799  bool singleLineMode;
6800  };
6801 
6802  VmaStringBuilder& m_SB;
6803  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6804  bool m_InsideString;
6805 
6806  void BeginValue(bool isString);
6807  void WriteIndent(bool oneLess = false);
6808 };
6809 
6810 const char* const VmaJsonWriter::INDENT = " ";
6811 
6812 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6813  m_SB(sb),
6814  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6815  m_InsideString(false)
6816 {
6817 }
6818 
6819 VmaJsonWriter::~VmaJsonWriter()
6820 {
6821  VMA_ASSERT(!m_InsideString);
6822  VMA_ASSERT(m_Stack.empty());
6823 }
6824 
6825 void VmaJsonWriter::BeginObject(bool singleLine)
6826 {
6827  VMA_ASSERT(!m_InsideString);
6828 
6829  BeginValue(false);
6830  m_SB.Add('{');
6831 
6832  StackItem item;
6833  item.type = COLLECTION_TYPE_OBJECT;
6834  item.valueCount = 0;
6835  item.singleLineMode = singleLine;
6836  m_Stack.push_back(item);
6837 }
6838 
6839 void VmaJsonWriter::EndObject()
6840 {
6841  VMA_ASSERT(!m_InsideString);
6842 
6843  WriteIndent(true);
6844  m_SB.Add('}');
6845 
6846  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6847  m_Stack.pop_back();
6848 }
6849 
6850 void VmaJsonWriter::BeginArray(bool singleLine)
6851 {
6852  VMA_ASSERT(!m_InsideString);
6853 
6854  BeginValue(false);
6855  m_SB.Add('[');
6856 
6857  StackItem item;
6858  item.type = COLLECTION_TYPE_ARRAY;
6859  item.valueCount = 0;
6860  item.singleLineMode = singleLine;
6861  m_Stack.push_back(item);
6862 }
6863 
6864 void VmaJsonWriter::EndArray()
6865 {
6866  VMA_ASSERT(!m_InsideString);
6867 
6868  WriteIndent(true);
6869  m_SB.Add(']');
6870 
6871  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6872  m_Stack.pop_back();
6873 }
6874 
6875 void VmaJsonWriter::WriteString(const char* pStr)
6876 {
6877  BeginString(pStr);
6878  EndString();
6879 }
6880 
6881 void VmaJsonWriter::BeginString(const char* pStr)
6882 {
6883  VMA_ASSERT(!m_InsideString);
6884 
6885  BeginValue(true);
6886  m_SB.Add('"');
6887  m_InsideString = true;
6888  if(pStr != VMA_NULL && pStr[0] != '\0')
6889  {
6890  ContinueString(pStr);
6891  }
6892 }
6893 
6894 void VmaJsonWriter::ContinueString(const char* pStr)
6895 {
6896  VMA_ASSERT(m_InsideString);
6897 
6898  const size_t strLen = strlen(pStr);
6899  for(size_t i = 0; i < strLen; ++i)
6900  {
6901  char ch = pStr[i];
6902  if(ch == '\\')
6903  {
6904  m_SB.Add("\\\\");
6905  }
6906  else if(ch == '"')
6907  {
6908  m_SB.Add("\\\"");
6909  }
6910  else if(ch >= 32)
6911  {
6912  m_SB.Add(ch);
6913  }
6914  else switch(ch)
6915  {
6916  case '\b':
6917  m_SB.Add("\\b");
6918  break;
6919  case '\f':
6920  m_SB.Add("\\f");
6921  break;
6922  case '\n':
6923  m_SB.Add("\\n");
6924  break;
6925  case '\r':
6926  m_SB.Add("\\r");
6927  break;
6928  case '\t':
6929  m_SB.Add("\\t");
6930  break;
6931  default:
6932  VMA_ASSERT(0 && "Character not currently supported.");
6933  break;
6934  }
6935  }
6936 }
6937 
6938 void VmaJsonWriter::ContinueString(uint32_t n)
6939 {
6940  VMA_ASSERT(m_InsideString);
6941  m_SB.AddNumber(n);
6942 }
6943 
6944 void VmaJsonWriter::ContinueString(uint64_t n)
6945 {
6946  VMA_ASSERT(m_InsideString);
6947  m_SB.AddNumber(n);
6948 }
6949 
6950 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6951 {
6952  VMA_ASSERT(m_InsideString);
6953  m_SB.AddPointer(ptr);
6954 }
6955 
6956 void VmaJsonWriter::EndString(const char* pStr)
6957 {
6958  VMA_ASSERT(m_InsideString);
6959  if(pStr != VMA_NULL && pStr[0] != '\0')
6960  {
6961  ContinueString(pStr);
6962  }
6963  m_SB.Add('"');
6964  m_InsideString = false;
6965 }
6966 
6967 void VmaJsonWriter::WriteNumber(uint32_t n)
6968 {
6969  VMA_ASSERT(!m_InsideString);
6970  BeginValue(false);
6971  m_SB.AddNumber(n);
6972 }
6973 
6974 void VmaJsonWriter::WriteNumber(uint64_t n)
6975 {
6976  VMA_ASSERT(!m_InsideString);
6977  BeginValue(false);
6978  m_SB.AddNumber(n);
6979 }
6980 
6981 void VmaJsonWriter::WriteBool(bool b)
6982 {
6983  VMA_ASSERT(!m_InsideString);
6984  BeginValue(false);
6985  m_SB.Add(b ? "true" : "false");
6986 }
6987 
6988 void VmaJsonWriter::WriteNull()
6989 {
6990  VMA_ASSERT(!m_InsideString);
6991  BeginValue(false);
6992  m_SB.Add("null");
6993 }
6994 
6995 void VmaJsonWriter::BeginValue(bool isString)
6996 {
6997  if(!m_Stack.empty())
6998  {
6999  StackItem& currItem = m_Stack.back();
7000  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7001  currItem.valueCount % 2 == 0)
7002  {
7003  VMA_ASSERT(isString);
7004  }
7005 
7006  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7007  currItem.valueCount % 2 != 0)
7008  {
7009  m_SB.Add(": ");
7010  }
7011  else if(currItem.valueCount > 0)
7012  {
7013  m_SB.Add(", ");
7014  WriteIndent();
7015  }
7016  else
7017  {
7018  WriteIndent();
7019  }
7020  ++currItem.valueCount;
7021  }
7022 }
7023 
7024 void VmaJsonWriter::WriteIndent(bool oneLess)
7025 {
7026  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7027  {
7028  m_SB.AddNewLine();
7029 
7030  size_t count = m_Stack.size();
7031  if(count > 0 && oneLess)
7032  {
7033  --count;
7034  }
7035  for(size_t i = 0; i < count; ++i)
7036  {
7037  m_SB.Add(INDENT);
7038  }
7039  }
7040 }
7041 
7042 #endif // #if VMA_STATS_STRING_ENABLED
7043 
7045 
7046 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7047 {
7048  if(IsUserDataString())
7049  {
7050  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7051 
7052  FreeUserDataString(hAllocator);
7053 
7054  if(pUserData != VMA_NULL)
7055  {
7056  const char* const newStrSrc = (char*)pUserData;
7057  const size_t newStrLen = strlen(newStrSrc);
7058  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7059  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7060  m_pUserData = newStrDst;
7061  }
7062  }
7063  else
7064  {
7065  m_pUserData = pUserData;
7066  }
7067 }
7068 
7069 void VmaAllocation_T::ChangeBlockAllocation(
7070  VmaAllocator hAllocator,
7071  VmaDeviceMemoryBlock* block,
7072  VkDeviceSize offset)
7073 {
7074  VMA_ASSERT(block != VMA_NULL);
7075  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7076 
7077  // Move mapping reference counter from old block to new block.
7078  if(block != m_BlockAllocation.m_Block)
7079  {
7080  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7081  if(IsPersistentMap())
7082  ++mapRefCount;
7083  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7084  block->Map(hAllocator, mapRefCount, VMA_NULL);
7085  }
7086 
7087  m_BlockAllocation.m_Block = block;
7088  m_BlockAllocation.m_Offset = offset;
7089 }
7090 
7091 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7092 {
7093  VMA_ASSERT(newSize > 0);
7094  m_Size = newSize;
7095 }
7096 
7097 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7098 {
7099  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7100  m_BlockAllocation.m_Offset = newOffset;
7101 }
7102 
7103 VkDeviceSize VmaAllocation_T::GetOffset() const
7104 {
7105  switch(m_Type)
7106  {
7107  case ALLOCATION_TYPE_BLOCK:
7108  return m_BlockAllocation.m_Offset;
7109  case ALLOCATION_TYPE_DEDICATED:
7110  return 0;
7111  default:
7112  VMA_ASSERT(0);
7113  return 0;
7114  }
7115 }
7116 
7117 VkDeviceMemory VmaAllocation_T::GetMemory() const
7118 {
7119  switch(m_Type)
7120  {
7121  case ALLOCATION_TYPE_BLOCK:
7122  return m_BlockAllocation.m_Block->GetDeviceMemory();
7123  case ALLOCATION_TYPE_DEDICATED:
7124  return m_DedicatedAllocation.m_hMemory;
7125  default:
7126  VMA_ASSERT(0);
7127  return VK_NULL_HANDLE;
7128  }
7129 }
7130 
7131 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7132 {
7133  switch(m_Type)
7134  {
7135  case ALLOCATION_TYPE_BLOCK:
7136  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7137  case ALLOCATION_TYPE_DEDICATED:
7138  return m_DedicatedAllocation.m_MemoryTypeIndex;
7139  default:
7140  VMA_ASSERT(0);
7141  return UINT32_MAX;
7142  }
7143 }
7144 
7145 void* VmaAllocation_T::GetMappedData() const
7146 {
7147  switch(m_Type)
7148  {
7149  case ALLOCATION_TYPE_BLOCK:
7150  if(m_MapCount != 0)
7151  {
7152  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7153  VMA_ASSERT(pBlockData != VMA_NULL);
7154  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7155  }
7156  else
7157  {
7158  return VMA_NULL;
7159  }
7160  break;
7161  case ALLOCATION_TYPE_DEDICATED:
7162  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7163  return m_DedicatedAllocation.m_pMappedData;
7164  default:
7165  VMA_ASSERT(0);
7166  return VMA_NULL;
7167  }
7168 }
7169 
7170 bool VmaAllocation_T::CanBecomeLost() const
7171 {
7172  switch(m_Type)
7173  {
7174  case ALLOCATION_TYPE_BLOCK:
7175  return m_BlockAllocation.m_CanBecomeLost;
7176  case ALLOCATION_TYPE_DEDICATED:
7177  return false;
7178  default:
7179  VMA_ASSERT(0);
7180  return false;
7181  }
7182 }
7183 
7184 VmaPool VmaAllocation_T::GetPool() const
7185 {
7186  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7187  return m_BlockAllocation.m_hPool;
7188 }
7189 
7190 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7191 {
7192  VMA_ASSERT(CanBecomeLost());
7193 
7194  /*
7195  Warning: This is a carefully designed algorithm.
7196  Do not modify unless you really know what you're doing :)
7197  */
7198  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7199  for(;;)
7200  {
7201  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7202  {
7203  VMA_ASSERT(0);
7204  return false;
7205  }
7206  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7207  {
7208  return false;
7209  }
7210  else // Last use time earlier than current time.
7211  {
7212  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7213  {
7214  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7215  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7216  return true;
7217  }
7218  }
7219  }
7220 }
7221 
7222 #if VMA_STATS_STRING_ENABLED
7223 
7224 // Correspond to values of enum VmaSuballocationType.
7225 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7226  "FREE",
7227  "UNKNOWN",
7228  "BUFFER",
7229  "IMAGE_UNKNOWN",
7230  "IMAGE_LINEAR",
7231  "IMAGE_OPTIMAL",
7232 };
7233 
7234 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7235 {
7236  json.WriteString("Type");
7237  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7238 
7239  json.WriteString("Size");
7240  json.WriteNumber(m_Size);
7241 
7242  if(m_pUserData != VMA_NULL)
7243  {
7244  json.WriteString("UserData");
7245  if(IsUserDataString())
7246  {
7247  json.WriteString((const char*)m_pUserData);
7248  }
7249  else
7250  {
7251  json.BeginString();
7252  json.ContinueString_Pointer(m_pUserData);
7253  json.EndString();
7254  }
7255  }
7256 
7257  json.WriteString("CreationFrameIndex");
7258  json.WriteNumber(m_CreationFrameIndex);
7259 
7260  json.WriteString("LastUseFrameIndex");
7261  json.WriteNumber(GetLastUseFrameIndex());
7262 
7263  if(m_BufferImageUsage != 0)
7264  {
7265  json.WriteString("Usage");
7266  json.WriteNumber(m_BufferImageUsage);
7267  }
7268 }
7269 
7270 #endif
7271 
7272 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7273 {
7274  VMA_ASSERT(IsUserDataString());
7275  if(m_pUserData != VMA_NULL)
7276  {
7277  char* const oldStr = (char*)m_pUserData;
7278  const size_t oldStrLen = strlen(oldStr);
7279  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7280  m_pUserData = VMA_NULL;
7281  }
7282 }
7283 
7284 void VmaAllocation_T::BlockAllocMap()
7285 {
7286  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7287 
7288  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7289  {
7290  ++m_MapCount;
7291  }
7292  else
7293  {
7294  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7295  }
7296 }
7297 
7298 void VmaAllocation_T::BlockAllocUnmap()
7299 {
7300  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7301 
7302  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7303  {
7304  --m_MapCount;
7305  }
7306  else
7307  {
7308  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7309  }
7310 }
7311 
7312 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7313 {
7314  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7315 
7316  if(m_MapCount != 0)
7317  {
7318  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7319  {
7320  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7321  *ppData = m_DedicatedAllocation.m_pMappedData;
7322  ++m_MapCount;
7323  return VK_SUCCESS;
7324  }
7325  else
7326  {
7327  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7328  return VK_ERROR_MEMORY_MAP_FAILED;
7329  }
7330  }
7331  else
7332  {
7333  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7334  hAllocator->m_hDevice,
7335  m_DedicatedAllocation.m_hMemory,
7336  0, // offset
7337  VK_WHOLE_SIZE,
7338  0, // flags
7339  ppData);
7340  if(result == VK_SUCCESS)
7341  {
7342  m_DedicatedAllocation.m_pMappedData = *ppData;
7343  m_MapCount = 1;
7344  }
7345  return result;
7346  }
7347 }
7348 
7349 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7350 {
7351  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7352 
7353  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7354  {
7355  --m_MapCount;
7356  if(m_MapCount == 0)
7357  {
7358  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7359  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7360  hAllocator->m_hDevice,
7361  m_DedicatedAllocation.m_hMemory);
7362  }
7363  }
7364  else
7365  {
7366  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7367  }
7368 }
7369 
7370 #if VMA_STATS_STRING_ENABLED
7371 
7372 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7373 {
7374  json.BeginObject();
7375 
7376  json.WriteString("Blocks");
7377  json.WriteNumber(stat.blockCount);
7378 
7379  json.WriteString("Allocations");
7380  json.WriteNumber(stat.allocationCount);
7381 
7382  json.WriteString("UnusedRanges");
7383  json.WriteNumber(stat.unusedRangeCount);
7384 
7385  json.WriteString("UsedBytes");
7386  json.WriteNumber(stat.usedBytes);
7387 
7388  json.WriteString("UnusedBytes");
7389  json.WriteNumber(stat.unusedBytes);
7390 
7391  if(stat.allocationCount > 1)
7392  {
7393  json.WriteString("AllocationSize");
7394  json.BeginObject(true);
7395  json.WriteString("Min");
7396  json.WriteNumber(stat.allocationSizeMin);
7397  json.WriteString("Avg");
7398  json.WriteNumber(stat.allocationSizeAvg);
7399  json.WriteString("Max");
7400  json.WriteNumber(stat.allocationSizeMax);
7401  json.EndObject();
7402  }
7403 
7404  if(stat.unusedRangeCount > 1)
7405  {
7406  json.WriteString("UnusedRangeSize");
7407  json.BeginObject(true);
7408  json.WriteString("Min");
7409  json.WriteNumber(stat.unusedRangeSizeMin);
7410  json.WriteString("Avg");
7411  json.WriteNumber(stat.unusedRangeSizeAvg);
7412  json.WriteString("Max");
7413  json.WriteNumber(stat.unusedRangeSizeMax);
7414  json.EndObject();
7415  }
7416 
7417  json.EndObject();
7418 }
7419 
7420 #endif // #if VMA_STATS_STRING_ENABLED
7421 
7422 struct VmaSuballocationItemSizeLess
7423 {
7424  bool operator()(
7425  const VmaSuballocationList::iterator lhs,
7426  const VmaSuballocationList::iterator rhs) const
7427  {
7428  return lhs->size < rhs->size;
7429  }
7430  bool operator()(
7431  const VmaSuballocationList::iterator lhs,
7432  VkDeviceSize rhsSize) const
7433  {
7434  return lhs->size < rhsSize;
7435  }
7436 };
7437 
7438 
7440 // class VmaBlockMetadata
7441 
7442 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7443  m_Size(0),
7444  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7445 {
7446 }
7447 
7448 #if VMA_STATS_STRING_ENABLED
7449 
7450 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7451  VkDeviceSize unusedBytes,
7452  size_t allocationCount,
7453  size_t unusedRangeCount) const
7454 {
7455  json.BeginObject();
7456 
7457  json.WriteString("TotalBytes");
7458  json.WriteNumber(GetSize());
7459 
7460  json.WriteString("UnusedBytes");
7461  json.WriteNumber(unusedBytes);
7462 
7463  json.WriteString("Allocations");
7464  json.WriteNumber((uint64_t)allocationCount);
7465 
7466  json.WriteString("UnusedRanges");
7467  json.WriteNumber((uint64_t)unusedRangeCount);
7468 
7469  json.WriteString("Suballocations");
7470  json.BeginArray();
7471 }
7472 
7473 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7474  VkDeviceSize offset,
7475  VmaAllocation hAllocation) const
7476 {
7477  json.BeginObject(true);
7478 
7479  json.WriteString("Offset");
7480  json.WriteNumber(offset);
7481 
7482  hAllocation->PrintParameters(json);
7483 
7484  json.EndObject();
7485 }
7486 
7487 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7488  VkDeviceSize offset,
7489  VkDeviceSize size) const
7490 {
7491  json.BeginObject(true);
7492 
7493  json.WriteString("Offset");
7494  json.WriteNumber(offset);
7495 
7496  json.WriteString("Type");
7497  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7498 
7499  json.WriteString("Size");
7500  json.WriteNumber(size);
7501 
7502  json.EndObject();
7503 }
7504 
7505 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7506 {
7507  json.EndArray();
7508  json.EndObject();
7509 }
7510 
7511 #endif // #if VMA_STATS_STRING_ENABLED
7512 
7514 // class VmaBlockMetadata_Generic
7515 
7516 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7517  VmaBlockMetadata(hAllocator),
7518  m_FreeCount(0),
7519  m_SumFreeSize(0),
7520  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7521  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7522 {
7523 }
7524 
7525 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7526 {
7527 }
7528 
7529 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7530 {
7531  VmaBlockMetadata::Init(size);
7532 
7533  m_FreeCount = 1;
7534  m_SumFreeSize = size;
7535 
7536  VmaSuballocation suballoc = {};
7537  suballoc.offset = 0;
7538  suballoc.size = size;
7539  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7540  suballoc.hAllocation = VK_NULL_HANDLE;
7541 
7542  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7543  m_Suballocations.push_back(suballoc);
7544  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7545  --suballocItem;
7546  m_FreeSuballocationsBySize.push_back(suballocItem);
7547 }
7548 
7549 bool VmaBlockMetadata_Generic::Validate() const
7550 {
7551  VMA_VALIDATE(!m_Suballocations.empty());
7552 
7553  // Expected offset of new suballocation as calculated from previous ones.
7554  VkDeviceSize calculatedOffset = 0;
7555  // Expected number of free suballocations as calculated from traversing their list.
7556  uint32_t calculatedFreeCount = 0;
7557  // Expected sum size of free suballocations as calculated from traversing their list.
7558  VkDeviceSize calculatedSumFreeSize = 0;
7559  // Expected number of free suballocations that should be registered in
7560  // m_FreeSuballocationsBySize calculated from traversing their list.
7561  size_t freeSuballocationsToRegister = 0;
7562  // True if previous visited suballocation was free.
7563  bool prevFree = false;
7564 
7565  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7566  suballocItem != m_Suballocations.cend();
7567  ++suballocItem)
7568  {
7569  const VmaSuballocation& subAlloc = *suballocItem;
7570 
7571  // Actual offset of this suballocation doesn't match expected one.
7572  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7573 
7574  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7575  // Two adjacent free suballocations are invalid. They should be merged.
7576  VMA_VALIDATE(!prevFree || !currFree);
7577 
7578  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7579 
7580  if(currFree)
7581  {
7582  calculatedSumFreeSize += subAlloc.size;
7583  ++calculatedFreeCount;
7584  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7585  {
7586  ++freeSuballocationsToRegister;
7587  }
7588 
7589  // Margin required between allocations - every free space must be at least that large.
7590  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7591  }
7592  else
7593  {
7594  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7595  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7596 
7597  // Margin required between allocations - previous allocation must be free.
7598  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7599  }
7600 
7601  calculatedOffset += subAlloc.size;
7602  prevFree = currFree;
7603  }
7604 
7605  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7606  // match expected one.
7607  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7608 
7609  VkDeviceSize lastSize = 0;
7610  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7611  {
7612  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7613 
7614  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7615  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7616  // They must be sorted by size ascending.
7617  VMA_VALIDATE(suballocItem->size >= lastSize);
7618 
7619  lastSize = suballocItem->size;
7620  }
7621 
7622  // Check if totals match calculacted values.
7623  VMA_VALIDATE(ValidateFreeSuballocationList());
7624  VMA_VALIDATE(calculatedOffset == GetSize());
7625  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7626  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7627 
7628  return true;
7629 }
7630 
7631 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7632 {
7633  if(!m_FreeSuballocationsBySize.empty())
7634  {
7635  return m_FreeSuballocationsBySize.back()->size;
7636  }
7637  else
7638  {
7639  return 0;
7640  }
7641 }
7642 
7643 bool VmaBlockMetadata_Generic::IsEmpty() const
7644 {
7645  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7646 }
7647 
7648 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7649 {
7650  outInfo.blockCount = 1;
7651 
7652  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7653  outInfo.allocationCount = rangeCount - m_FreeCount;
7654  outInfo.unusedRangeCount = m_FreeCount;
7655 
7656  outInfo.unusedBytes = m_SumFreeSize;
7657  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7658 
7659  outInfo.allocationSizeMin = UINT64_MAX;
7660  outInfo.allocationSizeMax = 0;
7661  outInfo.unusedRangeSizeMin = UINT64_MAX;
7662  outInfo.unusedRangeSizeMax = 0;
7663 
7664  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7665  suballocItem != m_Suballocations.cend();
7666  ++suballocItem)
7667  {
7668  const VmaSuballocation& suballoc = *suballocItem;
7669  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7670  {
7671  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7672  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7673  }
7674  else
7675  {
7676  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7677  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7678  }
7679  }
7680 }
7681 
7682 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7683 {
7684  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7685 
7686  inoutStats.size += GetSize();
7687  inoutStats.unusedSize += m_SumFreeSize;
7688  inoutStats.allocationCount += rangeCount - m_FreeCount;
7689  inoutStats.unusedRangeCount += m_FreeCount;
7690  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7691 }
7692 
7693 #if VMA_STATS_STRING_ENABLED
7694 
7695 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7696 {
7697  PrintDetailedMap_Begin(json,
7698  m_SumFreeSize, // unusedBytes
7699  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7700  m_FreeCount); // unusedRangeCount
7701 
7702  size_t i = 0;
7703  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7704  suballocItem != m_Suballocations.cend();
7705  ++suballocItem, ++i)
7706  {
7707  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7708  {
7709  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7710  }
7711  else
7712  {
7713  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7714  }
7715  }
7716 
7717  PrintDetailedMap_End(json);
7718 }
7719 
7720 #endif // #if VMA_STATS_STRING_ENABLED
7721 
7722 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7723  uint32_t currentFrameIndex,
7724  uint32_t frameInUseCount,
7725  VkDeviceSize bufferImageGranularity,
7726  VkDeviceSize allocSize,
7727  VkDeviceSize allocAlignment,
7728  bool upperAddress,
7729  VmaSuballocationType allocType,
7730  bool canMakeOtherLost,
7731  uint32_t strategy,
7732  VmaAllocationRequest* pAllocationRequest)
7733 {
7734  VMA_ASSERT(allocSize > 0);
7735  VMA_ASSERT(!upperAddress);
7736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7738  VMA_HEAVY_ASSERT(Validate());
7739 
7740  // There is not enough total free space in this block to fullfill the request: Early return.
7741  if(canMakeOtherLost == false &&
7742  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7743  {
7744  return false;
7745  }
7746 
7747  // New algorithm, efficiently searching freeSuballocationsBySize.
7748  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7749  if(freeSuballocCount > 0)
7750  {
7752  {
7753  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7754  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7755  m_FreeSuballocationsBySize.data(),
7756  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7757  allocSize + 2 * VMA_DEBUG_MARGIN,
7758  VmaSuballocationItemSizeLess());
7759  size_t index = it - m_FreeSuballocationsBySize.data();
7760  for(; index < freeSuballocCount; ++index)
7761  {
7762  if(CheckAllocation(
7763  currentFrameIndex,
7764  frameInUseCount,
7765  bufferImageGranularity,
7766  allocSize,
7767  allocAlignment,
7768  allocType,
7769  m_FreeSuballocationsBySize[index],
7770  false, // canMakeOtherLost
7771  &pAllocationRequest->offset,
7772  &pAllocationRequest->itemsToMakeLostCount,
7773  &pAllocationRequest->sumFreeSize,
7774  &pAllocationRequest->sumItemSize))
7775  {
7776  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7777  return true;
7778  }
7779  }
7780  }
7781  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7782  {
7783  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7784  it != m_Suballocations.end();
7785  ++it)
7786  {
7787  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7788  currentFrameIndex,
7789  frameInUseCount,
7790  bufferImageGranularity,
7791  allocSize,
7792  allocAlignment,
7793  allocType,
7794  it,
7795  false, // canMakeOtherLost
7796  &pAllocationRequest->offset,
7797  &pAllocationRequest->itemsToMakeLostCount,
7798  &pAllocationRequest->sumFreeSize,
7799  &pAllocationRequest->sumItemSize))
7800  {
7801  pAllocationRequest->item = it;
7802  return true;
7803  }
7804  }
7805  }
7806  else // WORST_FIT, FIRST_FIT
7807  {
7808  // Search staring from biggest suballocations.
7809  for(size_t index = freeSuballocCount; index--; )
7810  {
7811  if(CheckAllocation(
7812  currentFrameIndex,
7813  frameInUseCount,
7814  bufferImageGranularity,
7815  allocSize,
7816  allocAlignment,
7817  allocType,
7818  m_FreeSuballocationsBySize[index],
7819  false, // canMakeOtherLost
7820  &pAllocationRequest->offset,
7821  &pAllocationRequest->itemsToMakeLostCount,
7822  &pAllocationRequest->sumFreeSize,
7823  &pAllocationRequest->sumItemSize))
7824  {
7825  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7826  return true;
7827  }
7828  }
7829  }
7830  }
7831 
7832  if(canMakeOtherLost)
7833  {
7834  // Brute-force algorithm. TODO: Come up with something better.
7835 
7836  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7837  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7838 
7839  VmaAllocationRequest tmpAllocRequest = {};
7840  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7841  suballocIt != m_Suballocations.end();
7842  ++suballocIt)
7843  {
7844  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7845  suballocIt->hAllocation->CanBecomeLost())
7846  {
7847  if(CheckAllocation(
7848  currentFrameIndex,
7849  frameInUseCount,
7850  bufferImageGranularity,
7851  allocSize,
7852  allocAlignment,
7853  allocType,
7854  suballocIt,
7855  canMakeOtherLost,
7856  &tmpAllocRequest.offset,
7857  &tmpAllocRequest.itemsToMakeLostCount,
7858  &tmpAllocRequest.sumFreeSize,
7859  &tmpAllocRequest.sumItemSize))
7860  {
7861  tmpAllocRequest.item = suballocIt;
7862 
7863  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7865  {
7866  *pAllocationRequest = tmpAllocRequest;
7867  }
7868  }
7869  }
7870  }
7871 
7872  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7873  {
7874  return true;
7875  }
7876  }
7877 
7878  return false;
7879 }
7880 
7881 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7882  uint32_t currentFrameIndex,
7883  uint32_t frameInUseCount,
7884  VmaAllocationRequest* pAllocationRequest)
7885 {
7886  while(pAllocationRequest->itemsToMakeLostCount > 0)
7887  {
7888  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7889  {
7890  ++pAllocationRequest->item;
7891  }
7892  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7893  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7894  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7895  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7896  {
7897  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7898  --pAllocationRequest->itemsToMakeLostCount;
7899  }
7900  else
7901  {
7902  return false;
7903  }
7904  }
7905 
7906  VMA_HEAVY_ASSERT(Validate());
7907  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7908  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7909 
7910  return true;
7911 }
7912 
7913 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7914 {
7915  uint32_t lostAllocationCount = 0;
7916  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7917  it != m_Suballocations.end();
7918  ++it)
7919  {
7920  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7921  it->hAllocation->CanBecomeLost() &&
7922  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7923  {
7924  it = FreeSuballocation(it);
7925  ++lostAllocationCount;
7926  }
7927  }
7928  return lostAllocationCount;
7929 }
7930 
7931 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7932 {
7933  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7934  it != m_Suballocations.end();
7935  ++it)
7936  {
7937  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7938  {
7939  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7940  {
7941  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7942  return VK_ERROR_VALIDATION_FAILED_EXT;
7943  }
7944  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7945  {
7946  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7947  return VK_ERROR_VALIDATION_FAILED_EXT;
7948  }
7949  }
7950  }
7951 
7952  return VK_SUCCESS;
7953 }
7954 
7955 void VmaBlockMetadata_Generic::Alloc(
7956  const VmaAllocationRequest& request,
7957  VmaSuballocationType type,
7958  VkDeviceSize allocSize,
7959  bool upperAddress,
7960  VmaAllocation hAllocation)
7961 {
7962  VMA_ASSERT(!upperAddress);
7963  VMA_ASSERT(request.item != m_Suballocations.end());
7964  VmaSuballocation& suballoc = *request.item;
7965  // Given suballocation is a free block.
7966  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7967  // Given offset is inside this suballocation.
7968  VMA_ASSERT(request.offset >= suballoc.offset);
7969  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7970  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7971  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7972 
7973  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7974  // it to become used.
7975  UnregisterFreeSuballocation(request.item);
7976 
7977  suballoc.offset = request.offset;
7978  suballoc.size = allocSize;
7979  suballoc.type = type;
7980  suballoc.hAllocation = hAllocation;
7981 
7982  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7983  if(paddingEnd)
7984  {
7985  VmaSuballocation paddingSuballoc = {};
7986  paddingSuballoc.offset = request.offset + allocSize;
7987  paddingSuballoc.size = paddingEnd;
7988  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7989  VmaSuballocationList::iterator next = request.item;
7990  ++next;
7991  const VmaSuballocationList::iterator paddingEndItem =
7992  m_Suballocations.insert(next, paddingSuballoc);
7993  RegisterFreeSuballocation(paddingEndItem);
7994  }
7995 
7996  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7997  if(paddingBegin)
7998  {
7999  VmaSuballocation paddingSuballoc = {};
8000  paddingSuballoc.offset = request.offset - paddingBegin;
8001  paddingSuballoc.size = paddingBegin;
8002  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8003  const VmaSuballocationList::iterator paddingBeginItem =
8004  m_Suballocations.insert(request.item, paddingSuballoc);
8005  RegisterFreeSuballocation(paddingBeginItem);
8006  }
8007 
8008  // Update totals.
8009  m_FreeCount = m_FreeCount - 1;
8010  if(paddingBegin > 0)
8011  {
8012  ++m_FreeCount;
8013  }
8014  if(paddingEnd > 0)
8015  {
8016  ++m_FreeCount;
8017  }
8018  m_SumFreeSize -= allocSize;
8019 }
8020 
8021 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8022 {
8023  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8024  suballocItem != m_Suballocations.end();
8025  ++suballocItem)
8026  {
8027  VmaSuballocation& suballoc = *suballocItem;
8028  if(suballoc.hAllocation == allocation)
8029  {
8030  FreeSuballocation(suballocItem);
8031  VMA_HEAVY_ASSERT(Validate());
8032  return;
8033  }
8034  }
8035  VMA_ASSERT(0 && "Not found!");
8036 }
8037 
8038 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8039 {
8040  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8041  suballocItem != m_Suballocations.end();
8042  ++suballocItem)
8043  {
8044  VmaSuballocation& suballoc = *suballocItem;
8045  if(suballoc.offset == offset)
8046  {
8047  FreeSuballocation(suballocItem);
8048  return;
8049  }
8050  }
8051  VMA_ASSERT(0 && "Not found!");
8052 }
8053 
8054 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8055 {
8056  typedef VmaSuballocationList::iterator iter_type;
8057  for(iter_type suballocItem = m_Suballocations.begin();
8058  suballocItem != m_Suballocations.end();
8059  ++suballocItem)
8060  {
8061  VmaSuballocation& suballoc = *suballocItem;
8062  if(suballoc.hAllocation == alloc)
8063  {
8064  iter_type nextItem = suballocItem;
8065  ++nextItem;
8066 
8067  // Should have been ensured on higher level.
8068  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8069 
8070  // Shrinking.
8071  if(newSize < alloc->GetSize())
8072  {
8073  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8074 
8075  // There is next item.
8076  if(nextItem != m_Suballocations.end())
8077  {
8078  // Next item is free.
8079  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8080  {
8081  // Grow this next item backward.
8082  UnregisterFreeSuballocation(nextItem);
8083  nextItem->offset -= sizeDiff;
8084  nextItem->size += sizeDiff;
8085  RegisterFreeSuballocation(nextItem);
8086  }
8087  // Next item is not free.
8088  else
8089  {
8090  // Create free item after current one.
8091  VmaSuballocation newFreeSuballoc;
8092  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8093  newFreeSuballoc.offset = suballoc.offset + newSize;
8094  newFreeSuballoc.size = sizeDiff;
8095  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8096  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8097  RegisterFreeSuballocation(newFreeSuballocIt);
8098 
8099  ++m_FreeCount;
8100  }
8101  }
8102  // This is the last item.
8103  else
8104  {
8105  // Create free item at the end.
8106  VmaSuballocation newFreeSuballoc;
8107  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8108  newFreeSuballoc.offset = suballoc.offset + newSize;
8109  newFreeSuballoc.size = sizeDiff;
8110  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8111  m_Suballocations.push_back(newFreeSuballoc);
8112 
8113  iter_type newFreeSuballocIt = m_Suballocations.end();
8114  RegisterFreeSuballocation(--newFreeSuballocIt);
8115 
8116  ++m_FreeCount;
8117  }
8118 
8119  suballoc.size = newSize;
8120  m_SumFreeSize += sizeDiff;
8121  }
8122  // Growing.
8123  else
8124  {
8125  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8126 
8127  // There is next item.
8128  if(nextItem != m_Suballocations.end())
8129  {
8130  // Next item is free.
8131  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8132  {
8133  // There is not enough free space, including margin.
8134  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8135  {
8136  return false;
8137  }
8138 
8139  // There is more free space than required.
8140  if(nextItem->size > sizeDiff)
8141  {
8142  // Move and shrink this next item.
8143  UnregisterFreeSuballocation(nextItem);
8144  nextItem->offset += sizeDiff;
8145  nextItem->size -= sizeDiff;
8146  RegisterFreeSuballocation(nextItem);
8147  }
8148  // There is exactly the amount of free space required.
8149  else
8150  {
8151  // Remove this next free item.
8152  UnregisterFreeSuballocation(nextItem);
8153  m_Suballocations.erase(nextItem);
8154  --m_FreeCount;
8155  }
8156  }
8157  // Next item is not free - there is no space to grow.
8158  else
8159  {
8160  return false;
8161  }
8162  }
8163  // This is the last item - there is no space to grow.
8164  else
8165  {
8166  return false;
8167  }
8168 
8169  suballoc.size = newSize;
8170  m_SumFreeSize -= sizeDiff;
8171  }
8172 
8173  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8174  return true;
8175  }
8176  }
8177  VMA_ASSERT(0 && "Not found!");
8178  return false;
8179 }
8180 
8181 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8182 {
8183  VkDeviceSize lastSize = 0;
8184  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8185  {
8186  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8187 
8188  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8189  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8190  VMA_VALIDATE(it->size >= lastSize);
8191  lastSize = it->size;
8192  }
8193  return true;
8194 }
8195 
8196 bool VmaBlockMetadata_Generic::CheckAllocation(
8197  uint32_t currentFrameIndex,
8198  uint32_t frameInUseCount,
8199  VkDeviceSize bufferImageGranularity,
8200  VkDeviceSize allocSize,
8201  VkDeviceSize allocAlignment,
8202  VmaSuballocationType allocType,
8203  VmaSuballocationList::const_iterator suballocItem,
8204  bool canMakeOtherLost,
8205  VkDeviceSize* pOffset,
8206  size_t* itemsToMakeLostCount,
8207  VkDeviceSize* pSumFreeSize,
8208  VkDeviceSize* pSumItemSize) const
8209 {
8210  VMA_ASSERT(allocSize > 0);
8211  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8212  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8213  VMA_ASSERT(pOffset != VMA_NULL);
8214 
8215  *itemsToMakeLostCount = 0;
8216  *pSumFreeSize = 0;
8217  *pSumItemSize = 0;
8218 
8219  if(canMakeOtherLost)
8220  {
8221  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8222  {
8223  *pSumFreeSize = suballocItem->size;
8224  }
8225  else
8226  {
8227  if(suballocItem->hAllocation->CanBecomeLost() &&
8228  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8229  {
8230  ++*itemsToMakeLostCount;
8231  *pSumItemSize = suballocItem->size;
8232  }
8233  else
8234  {
8235  return false;
8236  }
8237  }
8238 
8239  // Remaining size is too small for this request: Early return.
8240  if(GetSize() - suballocItem->offset < allocSize)
8241  {
8242  return false;
8243  }
8244 
8245  // Start from offset equal to beginning of this suballocation.
8246  *pOffset = suballocItem->offset;
8247 
8248  // Apply VMA_DEBUG_MARGIN at the beginning.
8249  if(VMA_DEBUG_MARGIN > 0)
8250  {
8251  *pOffset += VMA_DEBUG_MARGIN;
8252  }
8253 
8254  // Apply alignment.
8255  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8256 
8257  // Check previous suballocations for BufferImageGranularity conflicts.
8258  // Make bigger alignment if necessary.
8259  if(bufferImageGranularity > 1)
8260  {
8261  bool bufferImageGranularityConflict = false;
8262  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8263  while(prevSuballocItem != m_Suballocations.cbegin())
8264  {
8265  --prevSuballocItem;
8266  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8267  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8268  {
8269  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8270  {
8271  bufferImageGranularityConflict = true;
8272  break;
8273  }
8274  }
8275  else
8276  // Already on previous page.
8277  break;
8278  }
8279  if(bufferImageGranularityConflict)
8280  {
8281  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8282  }
8283  }
8284 
8285  // Now that we have final *pOffset, check if we are past suballocItem.
8286  // If yes, return false - this function should be called for another suballocItem as starting point.
8287  if(*pOffset >= suballocItem->offset + suballocItem->size)
8288  {
8289  return false;
8290  }
8291 
8292  // Calculate padding at the beginning based on current offset.
8293  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8294 
8295  // Calculate required margin at the end.
8296  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8297 
8298  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8299  // Another early return check.
8300  if(suballocItem->offset + totalSize > GetSize())
8301  {
8302  return false;
8303  }
8304 
8305  // Advance lastSuballocItem until desired size is reached.
8306  // Update itemsToMakeLostCount.
8307  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8308  if(totalSize > suballocItem->size)
8309  {
8310  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8311  while(remainingSize > 0)
8312  {
8313  ++lastSuballocItem;
8314  if(lastSuballocItem == m_Suballocations.cend())
8315  {
8316  return false;
8317  }
8318  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8319  {
8320  *pSumFreeSize += lastSuballocItem->size;
8321  }
8322  else
8323  {
8324  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8325  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8326  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8327  {
8328  ++*itemsToMakeLostCount;
8329  *pSumItemSize += lastSuballocItem->size;
8330  }
8331  else
8332  {
8333  return false;
8334  }
8335  }
8336  remainingSize = (lastSuballocItem->size < remainingSize) ?
8337  remainingSize - lastSuballocItem->size : 0;
8338  }
8339  }
8340 
8341  // Check next suballocations for BufferImageGranularity conflicts.
8342  // If conflict exists, we must mark more allocations lost or fail.
8343  if(bufferImageGranularity > 1)
8344  {
8345  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8346  ++nextSuballocItem;
8347  while(nextSuballocItem != m_Suballocations.cend())
8348  {
8349  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8350  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8351  {
8352  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8353  {
8354  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8355  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8356  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8357  {
8358  ++*itemsToMakeLostCount;
8359  }
8360  else
8361  {
8362  return false;
8363  }
8364  }
8365  }
8366  else
8367  {
8368  // Already on next page.
8369  break;
8370  }
8371  ++nextSuballocItem;
8372  }
8373  }
8374  }
8375  else
8376  {
8377  const VmaSuballocation& suballoc = *suballocItem;
8378  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8379 
8380  *pSumFreeSize = suballoc.size;
8381 
8382  // Size of this suballocation is too small for this request: Early return.
8383  if(suballoc.size < allocSize)
8384  {
8385  return false;
8386  }
8387 
8388  // Start from offset equal to beginning of this suballocation.
8389  *pOffset = suballoc.offset;
8390 
8391  // Apply VMA_DEBUG_MARGIN at the beginning.
8392  if(VMA_DEBUG_MARGIN > 0)
8393  {
8394  *pOffset += VMA_DEBUG_MARGIN;
8395  }
8396 
8397  // Apply alignment.
8398  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8399 
8400  // Check previous suballocations for BufferImageGranularity conflicts.
8401  // Make bigger alignment if necessary.
8402  if(bufferImageGranularity > 1)
8403  {
8404  bool bufferImageGranularityConflict = false;
8405  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8406  while(prevSuballocItem != m_Suballocations.cbegin())
8407  {
8408  --prevSuballocItem;
8409  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8410  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8411  {
8412  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8413  {
8414  bufferImageGranularityConflict = true;
8415  break;
8416  }
8417  }
8418  else
8419  // Already on previous page.
8420  break;
8421  }
8422  if(bufferImageGranularityConflict)
8423  {
8424  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8425  }
8426  }
8427 
8428  // Calculate padding at the beginning based on current offset.
8429  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8430 
8431  // Calculate required margin at the end.
8432  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8433 
8434  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8435  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8436  {
8437  return false;
8438  }
8439 
8440  // Check next suballocations for BufferImageGranularity conflicts.
8441  // If conflict exists, allocation cannot be made here.
8442  if(bufferImageGranularity > 1)
8443  {
8444  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8445  ++nextSuballocItem;
8446  while(nextSuballocItem != m_Suballocations.cend())
8447  {
8448  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8449  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8450  {
8451  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8452  {
8453  return false;
8454  }
8455  }
8456  else
8457  {
8458  // Already on next page.
8459  break;
8460  }
8461  ++nextSuballocItem;
8462  }
8463  }
8464  }
8465 
8466  // All tests passed: Success. pOffset is already filled.
8467  return true;
8468 }
8469 
8470 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8471 {
8472  VMA_ASSERT(item != m_Suballocations.end());
8473  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8474 
8475  VmaSuballocationList::iterator nextItem = item;
8476  ++nextItem;
8477  VMA_ASSERT(nextItem != m_Suballocations.end());
8478  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8479 
8480  item->size += nextItem->size;
8481  --m_FreeCount;
8482  m_Suballocations.erase(nextItem);
8483 }
8484 
8485 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8486 {
8487  // Change this suballocation to be marked as free.
8488  VmaSuballocation& suballoc = *suballocItem;
8489  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8490  suballoc.hAllocation = VK_NULL_HANDLE;
8491 
8492  // Update totals.
8493  ++m_FreeCount;
8494  m_SumFreeSize += suballoc.size;
8495 
8496  // Merge with previous and/or next suballocation if it's also free.
8497  bool mergeWithNext = false;
8498  bool mergeWithPrev = false;
8499 
8500  VmaSuballocationList::iterator nextItem = suballocItem;
8501  ++nextItem;
8502  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8503  {
8504  mergeWithNext = true;
8505  }
8506 
8507  VmaSuballocationList::iterator prevItem = suballocItem;
8508  if(suballocItem != m_Suballocations.begin())
8509  {
8510  --prevItem;
8511  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8512  {
8513  mergeWithPrev = true;
8514  }
8515  }
8516 
8517  if(mergeWithNext)
8518  {
8519  UnregisterFreeSuballocation(nextItem);
8520  MergeFreeWithNext(suballocItem);
8521  }
8522 
8523  if(mergeWithPrev)
8524  {
8525  UnregisterFreeSuballocation(prevItem);
8526  MergeFreeWithNext(prevItem);
8527  RegisterFreeSuballocation(prevItem);
8528  return prevItem;
8529  }
8530  else
8531  {
8532  RegisterFreeSuballocation(suballocItem);
8533  return suballocItem;
8534  }
8535 }
8536 
8537 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8538 {
8539  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8540  VMA_ASSERT(item->size > 0);
8541 
8542  // You may want to enable this validation at the beginning or at the end of
8543  // this function, depending on what do you want to check.
8544  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8545 
8546  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8547  {
8548  if(m_FreeSuballocationsBySize.empty())
8549  {
8550  m_FreeSuballocationsBySize.push_back(item);
8551  }
8552  else
8553  {
8554  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8555  }
8556  }
8557 
8558  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8559 }
8560 
8561 
8562 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8563 {
8564  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8565  VMA_ASSERT(item->size > 0);
8566 
8567  // You may want to enable this validation at the beginning or at the end of
8568  // this function, depending on what do you want to check.
8569  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8570 
8571  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8572  {
8573  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8574  m_FreeSuballocationsBySize.data(),
8575  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8576  item,
8577  VmaSuballocationItemSizeLess());
8578  for(size_t index = it - m_FreeSuballocationsBySize.data();
8579  index < m_FreeSuballocationsBySize.size();
8580  ++index)
8581  {
8582  if(m_FreeSuballocationsBySize[index] == item)
8583  {
8584  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8585  return;
8586  }
8587  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8588  }
8589  VMA_ASSERT(0 && "Not found.");
8590  }
8591 
8592  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8593 }
8594 
8595 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8596  VkDeviceSize bufferImageGranularity,
8597  VmaSuballocationType& inOutPrevSuballocType) const
8598 {
8599  if(bufferImageGranularity == 1 || IsEmpty())
8600  {
8601  return false;
8602  }
8603 
8604  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8605  bool typeConflictFound = false;
8606  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8607  it != m_Suballocations.cend();
8608  ++it)
8609  {
8610  const VmaSuballocationType suballocType = it->type;
8611  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8612  {
8613  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8614  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8615  {
8616  typeConflictFound = true;
8617  }
8618  inOutPrevSuballocType = suballocType;
8619  }
8620  }
8621 
8622  return typeConflictFound || minAlignment >= bufferImageGranularity;
8623 }
8624 
8626 // class VmaBlockMetadata_Linear
8627 
8628 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8629  VmaBlockMetadata(hAllocator),
8630  m_SumFreeSize(0),
8631  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8632  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8633  m_1stVectorIndex(0),
8634  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8635  m_1stNullItemsBeginCount(0),
8636  m_1stNullItemsMiddleCount(0),
8637  m_2ndNullItemsCount(0)
8638 {
8639 }
8640 
8641 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8642 {
8643 }
8644 
8645 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8646 {
8647  VmaBlockMetadata::Init(size);
8648  m_SumFreeSize = size;
8649 }
8650 
8651 bool VmaBlockMetadata_Linear::Validate() const
8652 {
8653  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8654  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8655 
8656  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8657  VMA_VALIDATE(!suballocations1st.empty() ||
8658  suballocations2nd.empty() ||
8659  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8660 
8661  if(!suballocations1st.empty())
8662  {
8663  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8664  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8665  // Null item at the end should be just pop_back().
8666  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8667  }
8668  if(!suballocations2nd.empty())
8669  {
8670  // Null item at the end should be just pop_back().
8671  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8672  }
8673 
8674  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8675  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8676 
8677  VkDeviceSize sumUsedSize = 0;
8678  const size_t suballoc1stCount = suballocations1st.size();
8679  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8680 
8681  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8682  {
8683  const size_t suballoc2ndCount = suballocations2nd.size();
8684  size_t nullItem2ndCount = 0;
8685  for(size_t i = 0; i < suballoc2ndCount; ++i)
8686  {
8687  const VmaSuballocation& suballoc = suballocations2nd[i];
8688  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8689 
8690  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8691  VMA_VALIDATE(suballoc.offset >= offset);
8692 
8693  if(!currFree)
8694  {
8695  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8696  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8697  sumUsedSize += suballoc.size;
8698  }
8699  else
8700  {
8701  ++nullItem2ndCount;
8702  }
8703 
8704  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8705  }
8706 
8707  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8708  }
8709 
8710  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8711  {
8712  const VmaSuballocation& suballoc = suballocations1st[i];
8713  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8714  suballoc.hAllocation == VK_NULL_HANDLE);
8715  }
8716 
8717  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8718 
8719  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8720  {
8721  const VmaSuballocation& suballoc = suballocations1st[i];
8722  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8723 
8724  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8725  VMA_VALIDATE(suballoc.offset >= offset);
8726  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8727 
8728  if(!currFree)
8729  {
8730  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8731  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8732  sumUsedSize += suballoc.size;
8733  }
8734  else
8735  {
8736  ++nullItem1stCount;
8737  }
8738 
8739  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8740  }
8741  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8742 
8743  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8744  {
8745  const size_t suballoc2ndCount = suballocations2nd.size();
8746  size_t nullItem2ndCount = 0;
8747  for(size_t i = suballoc2ndCount; i--; )
8748  {
8749  const VmaSuballocation& suballoc = suballocations2nd[i];
8750  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8751 
8752  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8753  VMA_VALIDATE(suballoc.offset >= offset);
8754 
8755  if(!currFree)
8756  {
8757  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8758  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8759  sumUsedSize += suballoc.size;
8760  }
8761  else
8762  {
8763  ++nullItem2ndCount;
8764  }
8765 
8766  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8767  }
8768 
8769  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8770  }
8771 
8772  VMA_VALIDATE(offset <= GetSize());
8773  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8774 
8775  return true;
8776 }
8777 
8778 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8779 {
8780  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8781  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8782 }
8783 
8784 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8785 {
8786  const VkDeviceSize size = GetSize();
8787 
8788  /*
8789  We don't consider gaps inside allocation vectors with freed allocations because
8790  they are not suitable for reuse in linear allocator. We consider only space that
8791  is available for new allocations.
8792  */
8793  if(IsEmpty())
8794  {
8795  return size;
8796  }
8797 
8798  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8799 
8800  switch(m_2ndVectorMode)
8801  {
8802  case SECOND_VECTOR_EMPTY:
8803  /*
8804  Available space is after end of 1st, as well as before beginning of 1st (which
8805  whould make it a ring buffer).
8806  */
8807  {
8808  const size_t suballocations1stCount = suballocations1st.size();
8809  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8810  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8811  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8812  return VMA_MAX(
8813  firstSuballoc.offset,
8814  size - (lastSuballoc.offset + lastSuballoc.size));
8815  }
8816  break;
8817 
8818  case SECOND_VECTOR_RING_BUFFER:
8819  /*
8820  Available space is only between end of 2nd and beginning of 1st.
8821  */
8822  {
8823  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8824  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8825  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8826  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8827  }
8828  break;
8829 
8830  case SECOND_VECTOR_DOUBLE_STACK:
8831  /*
8832  Available space is only between end of 1st and top of 2nd.
8833  */
8834  {
8835  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8836  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8837  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8838  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8839  }
8840  break;
8841 
8842  default:
8843  VMA_ASSERT(0);
8844  return 0;
8845  }
8846 }
8847 
8848 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8849 {
8850  const VkDeviceSize size = GetSize();
8851  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8852  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8853  const size_t suballoc1stCount = suballocations1st.size();
8854  const size_t suballoc2ndCount = suballocations2nd.size();
8855 
8856  outInfo.blockCount = 1;
8857  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8858  outInfo.unusedRangeCount = 0;
8859  outInfo.usedBytes = 0;
8860  outInfo.allocationSizeMin = UINT64_MAX;
8861  outInfo.allocationSizeMax = 0;
8862  outInfo.unusedRangeSizeMin = UINT64_MAX;
8863  outInfo.unusedRangeSizeMax = 0;
8864 
8865  VkDeviceSize lastOffset = 0;
8866 
8867  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8868  {
8869  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8870  size_t nextAlloc2ndIndex = 0;
8871  while(lastOffset < freeSpace2ndTo1stEnd)
8872  {
8873  // Find next non-null allocation or move nextAllocIndex to the end.
8874  while(nextAlloc2ndIndex < suballoc2ndCount &&
8875  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8876  {
8877  ++nextAlloc2ndIndex;
8878  }
8879 
8880  // Found non-null allocation.
8881  if(nextAlloc2ndIndex < suballoc2ndCount)
8882  {
8883  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8884 
8885  // 1. Process free space before this allocation.
8886  if(lastOffset < suballoc.offset)
8887  {
8888  // There is free space from lastOffset to suballoc.offset.
8889  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8890  ++outInfo.unusedRangeCount;
8891  outInfo.unusedBytes += unusedRangeSize;
8892  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8893  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8894  }
8895 
8896  // 2. Process this allocation.
8897  // There is allocation with suballoc.offset, suballoc.size.
8898  outInfo.usedBytes += suballoc.size;
8899  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8900  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8901 
8902  // 3. Prepare for next iteration.
8903  lastOffset = suballoc.offset + suballoc.size;
8904  ++nextAlloc2ndIndex;
8905  }
8906  // We are at the end.
8907  else
8908  {
8909  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8910  if(lastOffset < freeSpace2ndTo1stEnd)
8911  {
8912  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8913  ++outInfo.unusedRangeCount;
8914  outInfo.unusedBytes += unusedRangeSize;
8915  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8916  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8917  }
8918 
8919  // End of loop.
8920  lastOffset = freeSpace2ndTo1stEnd;
8921  }
8922  }
8923  }
8924 
8925  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8926  const VkDeviceSize freeSpace1stTo2ndEnd =
8927  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8928  while(lastOffset < freeSpace1stTo2ndEnd)
8929  {
8930  // Find next non-null allocation or move nextAllocIndex to the end.
8931  while(nextAlloc1stIndex < suballoc1stCount &&
8932  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8933  {
8934  ++nextAlloc1stIndex;
8935  }
8936 
8937  // Found non-null allocation.
8938  if(nextAlloc1stIndex < suballoc1stCount)
8939  {
8940  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8941 
8942  // 1. Process free space before this allocation.
8943  if(lastOffset < suballoc.offset)
8944  {
8945  // There is free space from lastOffset to suballoc.offset.
8946  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8947  ++outInfo.unusedRangeCount;
8948  outInfo.unusedBytes += unusedRangeSize;
8949  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8950  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8951  }
8952 
8953  // 2. Process this allocation.
8954  // There is allocation with suballoc.offset, suballoc.size.
8955  outInfo.usedBytes += suballoc.size;
8956  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8957  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8958 
8959  // 3. Prepare for next iteration.
8960  lastOffset = suballoc.offset + suballoc.size;
8961  ++nextAlloc1stIndex;
8962  }
8963  // We are at the end.
8964  else
8965  {
8966  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8967  if(lastOffset < freeSpace1stTo2ndEnd)
8968  {
8969  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8970  ++outInfo.unusedRangeCount;
8971  outInfo.unusedBytes += unusedRangeSize;
8972  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8973  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8974  }
8975 
8976  // End of loop.
8977  lastOffset = freeSpace1stTo2ndEnd;
8978  }
8979  }
8980 
8981  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8982  {
8983  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8984  while(lastOffset < size)
8985  {
8986  // Find next non-null allocation or move nextAllocIndex to the end.
8987  while(nextAlloc2ndIndex != SIZE_MAX &&
8988  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8989  {
8990  --nextAlloc2ndIndex;
8991  }
8992 
8993  // Found non-null allocation.
8994  if(nextAlloc2ndIndex != SIZE_MAX)
8995  {
8996  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8997 
8998  // 1. Process free space before this allocation.
8999  if(lastOffset < suballoc.offset)
9000  {
9001  // There is free space from lastOffset to suballoc.offset.
9002  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9003  ++outInfo.unusedRangeCount;
9004  outInfo.unusedBytes += unusedRangeSize;
9005  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9006  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9007  }
9008 
9009  // 2. Process this allocation.
9010  // There is allocation with suballoc.offset, suballoc.size.
9011  outInfo.usedBytes += suballoc.size;
9012  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9013  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9014 
9015  // 3. Prepare for next iteration.
9016  lastOffset = suballoc.offset + suballoc.size;
9017  --nextAlloc2ndIndex;
9018  }
9019  // We are at the end.
9020  else
9021  {
9022  // There is free space from lastOffset to size.
9023  if(lastOffset < size)
9024  {
9025  const VkDeviceSize unusedRangeSize = size - lastOffset;
9026  ++outInfo.unusedRangeCount;
9027  outInfo.unusedBytes += unusedRangeSize;
9028  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9029  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9030  }
9031 
9032  // End of loop.
9033  lastOffset = size;
9034  }
9035  }
9036  }
9037 
9038  outInfo.unusedBytes = size - outInfo.usedBytes;
9039 }
9040 
9041 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9042 {
9043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9045  const VkDeviceSize size = GetSize();
9046  const size_t suballoc1stCount = suballocations1st.size();
9047  const size_t suballoc2ndCount = suballocations2nd.size();
9048 
9049  inoutStats.size += size;
9050 
9051  VkDeviceSize lastOffset = 0;
9052 
9053  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9054  {
9055  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9056  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9057  while(lastOffset < freeSpace2ndTo1stEnd)
9058  {
9059  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9060  while(nextAlloc2ndIndex < suballoc2ndCount &&
9061  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9062  {
9063  ++nextAlloc2ndIndex;
9064  }
9065 
9066  // Found non-null allocation.
9067  if(nextAlloc2ndIndex < suballoc2ndCount)
9068  {
9069  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9070 
9071  // 1. Process free space before this allocation.
9072  if(lastOffset < suballoc.offset)
9073  {
9074  // There is free space from lastOffset to suballoc.offset.
9075  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9076  inoutStats.unusedSize += unusedRangeSize;
9077  ++inoutStats.unusedRangeCount;
9078  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9079  }
9080 
9081  // 2. Process this allocation.
9082  // There is allocation with suballoc.offset, suballoc.size.
9083  ++inoutStats.allocationCount;
9084 
9085  // 3. Prepare for next iteration.
9086  lastOffset = suballoc.offset + suballoc.size;
9087  ++nextAlloc2ndIndex;
9088  }
9089  // We are at the end.
9090  else
9091  {
9092  if(lastOffset < freeSpace2ndTo1stEnd)
9093  {
9094  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9095  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9096  inoutStats.unusedSize += unusedRangeSize;
9097  ++inoutStats.unusedRangeCount;
9098  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9099  }
9100 
9101  // End of loop.
9102  lastOffset = freeSpace2ndTo1stEnd;
9103  }
9104  }
9105  }
9106 
9107  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9108  const VkDeviceSize freeSpace1stTo2ndEnd =
9109  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9110  while(lastOffset < freeSpace1stTo2ndEnd)
9111  {
9112  // Find next non-null allocation or move nextAllocIndex to the end.
9113  while(nextAlloc1stIndex < suballoc1stCount &&
9114  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9115  {
9116  ++nextAlloc1stIndex;
9117  }
9118 
9119  // Found non-null allocation.
9120  if(nextAlloc1stIndex < suballoc1stCount)
9121  {
9122  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9123 
9124  // 1. Process free space before this allocation.
9125  if(lastOffset < suballoc.offset)
9126  {
9127  // There is free space from lastOffset to suballoc.offset.
9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9129  inoutStats.unusedSize += unusedRangeSize;
9130  ++inoutStats.unusedRangeCount;
9131  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9132  }
9133 
9134  // 2. Process this allocation.
9135  // There is allocation with suballoc.offset, suballoc.size.
9136  ++inoutStats.allocationCount;
9137 
9138  // 3. Prepare for next iteration.
9139  lastOffset = suballoc.offset + suballoc.size;
9140  ++nextAlloc1stIndex;
9141  }
9142  // We are at the end.
9143  else
9144  {
9145  if(lastOffset < freeSpace1stTo2ndEnd)
9146  {
9147  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9148  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9149  inoutStats.unusedSize += unusedRangeSize;
9150  ++inoutStats.unusedRangeCount;
9151  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // End of loop.
9155  lastOffset = freeSpace1stTo2ndEnd;
9156  }
9157  }
9158 
9159  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9160  {
9161  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9162  while(lastOffset < size)
9163  {
9164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9165  while(nextAlloc2ndIndex != SIZE_MAX &&
9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9167  {
9168  --nextAlloc2ndIndex;
9169  }
9170 
9171  // Found non-null allocation.
9172  if(nextAlloc2ndIndex != SIZE_MAX)
9173  {
9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9175 
9176  // 1. Process free space before this allocation.
9177  if(lastOffset < suballoc.offset)
9178  {
9179  // There is free space from lastOffset to suballoc.offset.
9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9181  inoutStats.unusedSize += unusedRangeSize;
9182  ++inoutStats.unusedRangeCount;
9183  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9184  }
9185 
9186  // 2. Process this allocation.
9187  // There is allocation with suballoc.offset, suballoc.size.
9188  ++inoutStats.allocationCount;
9189 
9190  // 3. Prepare for next iteration.
9191  lastOffset = suballoc.offset + suballoc.size;
9192  --nextAlloc2ndIndex;
9193  }
9194  // We are at the end.
9195  else
9196  {
9197  if(lastOffset < size)
9198  {
9199  // There is free space from lastOffset to size.
9200  const VkDeviceSize unusedRangeSize = size - lastOffset;
9201  inoutStats.unusedSize += unusedRangeSize;
9202  ++inoutStats.unusedRangeCount;
9203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9204  }
9205 
9206  // End of loop.
9207  lastOffset = size;
9208  }
9209  }
9210  }
9211 }
9212 
9213 #if VMA_STATS_STRING_ENABLED
9214 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9215 {
9216  const VkDeviceSize size = GetSize();
9217  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9218  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9219  const size_t suballoc1stCount = suballocations1st.size();
9220  const size_t suballoc2ndCount = suballocations2nd.size();
9221 
9222  // FIRST PASS
9223 
9224  size_t unusedRangeCount = 0;
9225  VkDeviceSize usedBytes = 0;
9226 
9227  VkDeviceSize lastOffset = 0;
9228 
9229  size_t alloc2ndCount = 0;
9230  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9231  {
9232  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9233  size_t nextAlloc2ndIndex = 0;
9234  while(lastOffset < freeSpace2ndTo1stEnd)
9235  {
9236  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9237  while(nextAlloc2ndIndex < suballoc2ndCount &&
9238  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9239  {
9240  ++nextAlloc2ndIndex;
9241  }
9242 
9243  // Found non-null allocation.
9244  if(nextAlloc2ndIndex < suballoc2ndCount)
9245  {
9246  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9247 
9248  // 1. Process free space before this allocation.
9249  if(lastOffset < suballoc.offset)
9250  {
9251  // There is free space from lastOffset to suballoc.offset.
9252  ++unusedRangeCount;
9253  }
9254 
9255  // 2. Process this allocation.
9256  // There is allocation with suballoc.offset, suballoc.size.
9257  ++alloc2ndCount;
9258  usedBytes += suballoc.size;
9259 
9260  // 3. Prepare for next iteration.
9261  lastOffset = suballoc.offset + suballoc.size;
9262  ++nextAlloc2ndIndex;
9263  }
9264  // We are at the end.
9265  else
9266  {
9267  if(lastOffset < freeSpace2ndTo1stEnd)
9268  {
9269  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9270  ++unusedRangeCount;
9271  }
9272 
9273  // End of loop.
9274  lastOffset = freeSpace2ndTo1stEnd;
9275  }
9276  }
9277  }
9278 
9279  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9280  size_t alloc1stCount = 0;
9281  const VkDeviceSize freeSpace1stTo2ndEnd =
9282  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9283  while(lastOffset < freeSpace1stTo2ndEnd)
9284  {
9285  // Find next non-null allocation or move nextAllocIndex to the end.
9286  while(nextAlloc1stIndex < suballoc1stCount &&
9287  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9288  {
9289  ++nextAlloc1stIndex;
9290  }
9291 
9292  // Found non-null allocation.
9293  if(nextAlloc1stIndex < suballoc1stCount)
9294  {
9295  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9296 
9297  // 1. Process free space before this allocation.
9298  if(lastOffset < suballoc.offset)
9299  {
9300  // There is free space from lastOffset to suballoc.offset.
9301  ++unusedRangeCount;
9302  }
9303 
9304  // 2. Process this allocation.
9305  // There is allocation with suballoc.offset, suballoc.size.
9306  ++alloc1stCount;
9307  usedBytes += suballoc.size;
9308 
9309  // 3. Prepare for next iteration.
9310  lastOffset = suballoc.offset + suballoc.size;
9311  ++nextAlloc1stIndex;
9312  }
9313  // We are at the end.
9314  else
9315  {
9316  if(lastOffset < size)
9317  {
9318  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9319  ++unusedRangeCount;
9320  }
9321 
9322  // End of loop.
9323  lastOffset = freeSpace1stTo2ndEnd;
9324  }
9325  }
9326 
9327  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9328  {
9329  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9330  while(lastOffset < size)
9331  {
9332  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9333  while(nextAlloc2ndIndex != SIZE_MAX &&
9334  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9335  {
9336  --nextAlloc2ndIndex;
9337  }
9338 
9339  // Found non-null allocation.
9340  if(nextAlloc2ndIndex != SIZE_MAX)
9341  {
9342  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9343 
9344  // 1. Process free space before this allocation.
9345  if(lastOffset < suballoc.offset)
9346  {
9347  // There is free space from lastOffset to suballoc.offset.
9348  ++unusedRangeCount;
9349  }
9350 
9351  // 2. Process this allocation.
9352  // There is allocation with suballoc.offset, suballoc.size.
9353  ++alloc2ndCount;
9354  usedBytes += suballoc.size;
9355 
9356  // 3. Prepare for next iteration.
9357  lastOffset = suballoc.offset + suballoc.size;
9358  --nextAlloc2ndIndex;
9359  }
9360  // We are at the end.
9361  else
9362  {
9363  if(lastOffset < size)
9364  {
9365  // There is free space from lastOffset to size.
9366  ++unusedRangeCount;
9367  }
9368 
9369  // End of loop.
9370  lastOffset = size;
9371  }
9372  }
9373  }
9374 
9375  const VkDeviceSize unusedBytes = size - usedBytes;
9376  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9377 
9378  // SECOND PASS
9379  lastOffset = 0;
9380 
9381  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9382  {
9383  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9384  size_t nextAlloc2ndIndex = 0;
9385  while(lastOffset < freeSpace2ndTo1stEnd)
9386  {
9387  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9388  while(nextAlloc2ndIndex < suballoc2ndCount &&
9389  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9390  {
9391  ++nextAlloc2ndIndex;
9392  }
9393 
9394  // Found non-null allocation.
9395  if(nextAlloc2ndIndex < suballoc2ndCount)
9396  {
9397  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9398 
9399  // 1. Process free space before this allocation.
9400  if(lastOffset < suballoc.offset)
9401  {
9402  // There is free space from lastOffset to suballoc.offset.
9403  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9404  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9405  }
9406 
9407  // 2. Process this allocation.
9408  // There is allocation with suballoc.offset, suballoc.size.
9409  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9410 
9411  // 3. Prepare for next iteration.
9412  lastOffset = suballoc.offset + suballoc.size;
9413  ++nextAlloc2ndIndex;
9414  }
9415  // We are at the end.
9416  else
9417  {
9418  if(lastOffset < freeSpace2ndTo1stEnd)
9419  {
9420  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9421  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9422  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9423  }
9424 
9425  // End of loop.
9426  lastOffset = freeSpace2ndTo1stEnd;
9427  }
9428  }
9429  }
9430 
9431  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9432  while(lastOffset < freeSpace1stTo2ndEnd)
9433  {
9434  // Find next non-null allocation or move nextAllocIndex to the end.
9435  while(nextAlloc1stIndex < suballoc1stCount &&
9436  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9437  {
9438  ++nextAlloc1stIndex;
9439  }
9440 
9441  // Found non-null allocation.
9442  if(nextAlloc1stIndex < suballoc1stCount)
9443  {
9444  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9445 
9446  // 1. Process free space before this allocation.
9447  if(lastOffset < suballoc.offset)
9448  {
9449  // There is free space from lastOffset to suballoc.offset.
9450  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9451  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9452  }
9453 
9454  // 2. Process this allocation.
9455  // There is allocation with suballoc.offset, suballoc.size.
9456  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9457 
9458  // 3. Prepare for next iteration.
9459  lastOffset = suballoc.offset + suballoc.size;
9460  ++nextAlloc1stIndex;
9461  }
9462  // We are at the end.
9463  else
9464  {
9465  if(lastOffset < freeSpace1stTo2ndEnd)
9466  {
9467  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9468  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9469  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9470  }
9471 
9472  // End of loop.
9473  lastOffset = freeSpace1stTo2ndEnd;
9474  }
9475  }
9476 
9477  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9478  {
9479  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9480  while(lastOffset < size)
9481  {
9482  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9483  while(nextAlloc2ndIndex != SIZE_MAX &&
9484  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9485  {
9486  --nextAlloc2ndIndex;
9487  }
9488 
9489  // Found non-null allocation.
9490  if(nextAlloc2ndIndex != SIZE_MAX)
9491  {
9492  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9493 
9494  // 1. Process free space before this allocation.
9495  if(lastOffset < suballoc.offset)
9496  {
9497  // There is free space from lastOffset to suballoc.offset.
9498  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9499  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9500  }
9501 
9502  // 2. Process this allocation.
9503  // There is allocation with suballoc.offset, suballoc.size.
9504  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9505 
9506  // 3. Prepare for next iteration.
9507  lastOffset = suballoc.offset + suballoc.size;
9508  --nextAlloc2ndIndex;
9509  }
9510  // We are at the end.
9511  else
9512  {
9513  if(lastOffset < size)
9514  {
9515  // There is free space from lastOffset to size.
9516  const VkDeviceSize unusedRangeSize = size - lastOffset;
9517  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9518  }
9519 
9520  // End of loop.
9521  lastOffset = size;
9522  }
9523  }
9524  }
9525 
9526  PrintDetailedMap_End(json);
9527 }
9528 #endif // #if VMA_STATS_STRING_ENABLED
9529 
9530 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9531  uint32_t currentFrameIndex,
9532  uint32_t frameInUseCount,
9533  VkDeviceSize bufferImageGranularity,
9534  VkDeviceSize allocSize,
9535  VkDeviceSize allocAlignment,
9536  bool upperAddress,
9537  VmaSuballocationType allocType,
9538  bool canMakeOtherLost,
9539  uint32_t strategy,
9540  VmaAllocationRequest* pAllocationRequest)
9541 {
9542  VMA_ASSERT(allocSize > 0);
9543  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9544  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9545  VMA_HEAVY_ASSERT(Validate());
9546 
9547  const VkDeviceSize size = GetSize();
9548  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9549  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9550 
9551  if(upperAddress)
9552  {
9553  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9554  {
9555  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9556  return false;
9557  }
9558 
9559  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9560  if(allocSize > size)
9561  {
9562  return false;
9563  }
9564  VkDeviceSize resultBaseOffset = size - allocSize;
9565  if(!suballocations2nd.empty())
9566  {
9567  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9568  resultBaseOffset = lastSuballoc.offset - allocSize;
9569  if(allocSize > lastSuballoc.offset)
9570  {
9571  return false;
9572  }
9573  }
9574 
9575  // Start from offset equal to end of free space.
9576  VkDeviceSize resultOffset = resultBaseOffset;
9577 
9578  // Apply VMA_DEBUG_MARGIN at the end.
9579  if(VMA_DEBUG_MARGIN > 0)
9580  {
9581  if(resultOffset < VMA_DEBUG_MARGIN)
9582  {
9583  return false;
9584  }
9585  resultOffset -= VMA_DEBUG_MARGIN;
9586  }
9587 
9588  // Apply alignment.
9589  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9590 
9591  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9592  // Make bigger alignment if necessary.
9593  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9594  {
9595  bool bufferImageGranularityConflict = false;
9596  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9597  {
9598  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9599  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9600  {
9601  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9602  {
9603  bufferImageGranularityConflict = true;
9604  break;
9605  }
9606  }
9607  else
9608  // Already on previous page.
9609  break;
9610  }
9611  if(bufferImageGranularityConflict)
9612  {
9613  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9614  }
9615  }
9616 
9617  // There is enough free space.
9618  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9619  suballocations1st.back().offset + suballocations1st.back().size :
9620  0;
9621  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9622  {
9623  // Check previous suballocations for BufferImageGranularity conflicts.
9624  // If conflict exists, allocation cannot be made here.
9625  if(bufferImageGranularity > 1)
9626  {
9627  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9628  {
9629  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9630  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9631  {
9632  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9633  {
9634  return false;
9635  }
9636  }
9637  else
9638  {
9639  // Already on next page.
9640  break;
9641  }
9642  }
9643  }
9644 
9645  // All tests passed: Success.
9646  pAllocationRequest->offset = resultOffset;
9647  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9648  pAllocationRequest->sumItemSize = 0;
9649  // pAllocationRequest->item unused.
9650  pAllocationRequest->itemsToMakeLostCount = 0;
9651  return true;
9652  }
9653  }
9654  else // !upperAddress
9655  {
9656  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9657  {
9658  // Try to allocate at the end of 1st vector.
9659 
9660  VkDeviceSize resultBaseOffset = 0;
9661  if(!suballocations1st.empty())
9662  {
9663  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9664  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9665  }
9666 
9667  // Start from offset equal to beginning of free space.
9668  VkDeviceSize resultOffset = resultBaseOffset;
9669 
9670  // Apply VMA_DEBUG_MARGIN at the beginning.
9671  if(VMA_DEBUG_MARGIN > 0)
9672  {
9673  resultOffset += VMA_DEBUG_MARGIN;
9674  }
9675 
9676  // Apply alignment.
9677  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9678 
9679  // Check previous suballocations for BufferImageGranularity conflicts.
9680  // Make bigger alignment if necessary.
9681  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9682  {
9683  bool bufferImageGranularityConflict = false;
9684  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9685  {
9686  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9687  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9688  {
9689  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9690  {
9691  bufferImageGranularityConflict = true;
9692  break;
9693  }
9694  }
9695  else
9696  // Already on previous page.
9697  break;
9698  }
9699  if(bufferImageGranularityConflict)
9700  {
9701  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9702  }
9703  }
9704 
9705  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9706  suballocations2nd.back().offset : size;
9707 
9708  // There is enough free space at the end after alignment.
9709  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9710  {
9711  // Check next suballocations for BufferImageGranularity conflicts.
9712  // If conflict exists, allocation cannot be made here.
9713  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9714  {
9715  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9716  {
9717  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9718  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9719  {
9720  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9721  {
9722  return false;
9723  }
9724  }
9725  else
9726  {
9727  // Already on previous page.
9728  break;
9729  }
9730  }
9731  }
9732 
9733  // All tests passed: Success.
9734  pAllocationRequest->offset = resultOffset;
9735  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9736  pAllocationRequest->sumItemSize = 0;
9737  // pAllocationRequest->item unused.
9738  pAllocationRequest->itemsToMakeLostCount = 0;
9739  return true;
9740  }
9741  }
9742 
9743  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9744  // beginning of 1st vector as the end of free space.
9745  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9746  {
9747  VMA_ASSERT(!suballocations1st.empty());
9748 
9749  VkDeviceSize resultBaseOffset = 0;
9750  if(!suballocations2nd.empty())
9751  {
9752  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9753  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9754  }
9755 
9756  // Start from offset equal to beginning of free space.
9757  VkDeviceSize resultOffset = resultBaseOffset;
9758 
9759  // Apply VMA_DEBUG_MARGIN at the beginning.
9760  if(VMA_DEBUG_MARGIN > 0)
9761  {
9762  resultOffset += VMA_DEBUG_MARGIN;
9763  }
9764 
9765  // Apply alignment.
9766  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9767 
9768  // Check previous suballocations for BufferImageGranularity conflicts.
9769  // Make bigger alignment if necessary.
9770  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9771  {
9772  bool bufferImageGranularityConflict = false;
9773  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9774  {
9775  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9776  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9777  {
9778  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9779  {
9780  bufferImageGranularityConflict = true;
9781  break;
9782  }
9783  }
9784  else
9785  // Already on previous page.
9786  break;
9787  }
9788  if(bufferImageGranularityConflict)
9789  {
9790  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9791  }
9792  }
9793 
9794  pAllocationRequest->itemsToMakeLostCount = 0;
9795  pAllocationRequest->sumItemSize = 0;
9796  size_t index1st = m_1stNullItemsBeginCount;
9797 
9798  if(canMakeOtherLost)
9799  {
9800  while(index1st < suballocations1st.size() &&
9801  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9802  {
9803  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9804  const VmaSuballocation& suballoc = suballocations1st[index1st];
9805  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9806  {
9807  // No problem.
9808  }
9809  else
9810  {
9811  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9812  if(suballoc.hAllocation->CanBecomeLost() &&
9813  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9814  {
9815  ++pAllocationRequest->itemsToMakeLostCount;
9816  pAllocationRequest->sumItemSize += suballoc.size;
9817  }
9818  else
9819  {
9820  return false;
9821  }
9822  }
9823  ++index1st;
9824  }
9825 
9826  // Check next suballocations for BufferImageGranularity conflicts.
9827  // If conflict exists, we must mark more allocations lost or fail.
9828  if(bufferImageGranularity > 1)
9829  {
9830  while(index1st < suballocations1st.size())
9831  {
9832  const VmaSuballocation& suballoc = suballocations1st[index1st];
9833  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9834  {
9835  if(suballoc.hAllocation != VK_NULL_HANDLE)
9836  {
9837  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9838  if(suballoc.hAllocation->CanBecomeLost() &&
9839  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9840  {
9841  ++pAllocationRequest->itemsToMakeLostCount;
9842  pAllocationRequest->sumItemSize += suballoc.size;
9843  }
9844  else
9845  {
9846  return false;
9847  }
9848  }
9849  }
9850  else
9851  {
9852  // Already on next page.
9853  break;
9854  }
9855  ++index1st;
9856  }
9857  }
9858  }
9859 
9860  // There is enough free space at the end after alignment.
9861  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9862  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9863  {
9864  // Check next suballocations for BufferImageGranularity conflicts.
9865  // If conflict exists, allocation cannot be made here.
9866  if(bufferImageGranularity > 1)
9867  {
9868  for(size_t nextSuballocIndex = index1st;
9869  nextSuballocIndex < suballocations1st.size();
9870  nextSuballocIndex++)
9871  {
9872  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9873  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9874  {
9875  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9876  {
9877  return false;
9878  }
9879  }
9880  else
9881  {
9882  // Already on next page.
9883  break;
9884  }
9885  }
9886  }
9887 
9888  // All tests passed: Success.
9889  pAllocationRequest->offset = resultOffset;
9890  pAllocationRequest->sumFreeSize =
9891  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9892  - resultBaseOffset
9893  - pAllocationRequest->sumItemSize;
9894  // pAllocationRequest->item unused.
9895  return true;
9896  }
9897  }
9898  }
9899 
9900  return false;
9901 }
9902 
9903 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9904  uint32_t currentFrameIndex,
9905  uint32_t frameInUseCount,
9906  VmaAllocationRequest* pAllocationRequest)
9907 {
9908  if(pAllocationRequest->itemsToMakeLostCount == 0)
9909  {
9910  return true;
9911  }
9912 
9913  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9914 
9915  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9916  size_t index1st = m_1stNullItemsBeginCount;
9917  size_t madeLostCount = 0;
9918  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9919  {
9920  VMA_ASSERT(index1st < suballocations1st.size());
9921  VmaSuballocation& suballoc = suballocations1st[index1st];
9922  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9923  {
9924  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9925  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9926  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9927  {
9928  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9929  suballoc.hAllocation = VK_NULL_HANDLE;
9930  m_SumFreeSize += suballoc.size;
9931  ++m_1stNullItemsMiddleCount;
9932  ++madeLostCount;
9933  }
9934  else
9935  {
9936  return false;
9937  }
9938  }
9939  ++index1st;
9940  }
9941 
9942  CleanupAfterFree();
9943  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9944 
9945  return true;
9946 }
9947 
9948 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9949 {
9950  uint32_t lostAllocationCount = 0;
9951 
9952  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9953  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9954  {
9955  VmaSuballocation& suballoc = suballocations1st[i];
9956  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9957  suballoc.hAllocation->CanBecomeLost() &&
9958  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9959  {
9960  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9961  suballoc.hAllocation = VK_NULL_HANDLE;
9962  ++m_1stNullItemsMiddleCount;
9963  m_SumFreeSize += suballoc.size;
9964  ++lostAllocationCount;
9965  }
9966  }
9967 
9968  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9969  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9970  {
9971  VmaSuballocation& suballoc = suballocations2nd[i];
9972  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9973  suballoc.hAllocation->CanBecomeLost() &&
9974  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9975  {
9976  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9977  suballoc.hAllocation = VK_NULL_HANDLE;
9978  ++m_2ndNullItemsCount;
9979  ++lostAllocationCount;
9980  }
9981  }
9982 
9983  if(lostAllocationCount)
9984  {
9985  CleanupAfterFree();
9986  }
9987 
9988  return lostAllocationCount;
9989 }
9990 
9991 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9992 {
9993  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9994  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9995  {
9996  const VmaSuballocation& suballoc = suballocations1st[i];
9997  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9998  {
9999  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10000  {
10001  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10002  return VK_ERROR_VALIDATION_FAILED_EXT;
10003  }
10004  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10005  {
10006  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10007  return VK_ERROR_VALIDATION_FAILED_EXT;
10008  }
10009  }
10010  }
10011 
10012  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10013  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10014  {
10015  const VmaSuballocation& suballoc = suballocations2nd[i];
10016  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10017  {
10018  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10019  {
10020  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10021  return VK_ERROR_VALIDATION_FAILED_EXT;
10022  }
10023  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10024  {
10025  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10026  return VK_ERROR_VALIDATION_FAILED_EXT;
10027  }
10028  }
10029  }
10030 
10031  return VK_SUCCESS;
10032 }
10033 
10034 void VmaBlockMetadata_Linear::Alloc(
10035  const VmaAllocationRequest& request,
10036  VmaSuballocationType type,
10037  VkDeviceSize allocSize,
10038  bool upperAddress,
10039  VmaAllocation hAllocation)
10040 {
10041  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10042 
10043  if(upperAddress)
10044  {
10045  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10046  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10047  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10048  suballocations2nd.push_back(newSuballoc);
10049  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10050  }
10051  else
10052  {
10053  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10054 
10055  // First allocation.
10056  if(suballocations1st.empty())
10057  {
10058  suballocations1st.push_back(newSuballoc);
10059  }
10060  else
10061  {
10062  // New allocation at the end of 1st vector.
10063  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10064  {
10065  // Check if it fits before the end of the block.
10066  VMA_ASSERT(request.offset + allocSize <= GetSize());
10067  suballocations1st.push_back(newSuballoc);
10068  }
10069  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10070  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10071  {
10072  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10073 
10074  switch(m_2ndVectorMode)
10075  {
10076  case SECOND_VECTOR_EMPTY:
10077  // First allocation from second part ring buffer.
10078  VMA_ASSERT(suballocations2nd.empty());
10079  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10080  break;
10081  case SECOND_VECTOR_RING_BUFFER:
10082  // 2-part ring buffer is already started.
10083  VMA_ASSERT(!suballocations2nd.empty());
10084  break;
10085  case SECOND_VECTOR_DOUBLE_STACK:
10086  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10087  break;
10088  default:
10089  VMA_ASSERT(0);
10090  }
10091 
10092  suballocations2nd.push_back(newSuballoc);
10093  }
10094  else
10095  {
10096  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10097  }
10098  }
10099  }
10100 
10101  m_SumFreeSize -= newSuballoc.size;
10102 }
10103 
10104 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10105 {
10106  FreeAtOffset(allocation->GetOffset());
10107 }
10108 
10109 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10110 {
10111  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10112  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10113 
10114  if(!suballocations1st.empty())
10115  {
10116  // First allocation: Mark it as next empty at the beginning.
10117  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10118  if(firstSuballoc.offset == offset)
10119  {
10120  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10121  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10122  m_SumFreeSize += firstSuballoc.size;
10123  ++m_1stNullItemsBeginCount;
10124  CleanupAfterFree();
10125  return;
10126  }
10127  }
10128 
10129  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10130  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10131  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10132  {
10133  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10134  if(lastSuballoc.offset == offset)
10135  {
10136  m_SumFreeSize += lastSuballoc.size;
10137  suballocations2nd.pop_back();
10138  CleanupAfterFree();
10139  return;
10140  }
10141  }
10142  // Last allocation in 1st vector.
10143  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10144  {
10145  VmaSuballocation& lastSuballoc = suballocations1st.back();
10146  if(lastSuballoc.offset == offset)
10147  {
10148  m_SumFreeSize += lastSuballoc.size;
10149  suballocations1st.pop_back();
10150  CleanupAfterFree();
10151  return;
10152  }
10153  }
10154 
10155  // Item from the middle of 1st vector.
10156  {
10157  VmaSuballocation refSuballoc;
10158  refSuballoc.offset = offset;
10159  // Rest of members stays uninitialized intentionally for better performance.
10160  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10161  suballocations1st.begin() + m_1stNullItemsBeginCount,
10162  suballocations1st.end(),
10163  refSuballoc);
10164  if(it != suballocations1st.end())
10165  {
10166  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10167  it->hAllocation = VK_NULL_HANDLE;
10168  ++m_1stNullItemsMiddleCount;
10169  m_SumFreeSize += it->size;
10170  CleanupAfterFree();
10171  return;
10172  }
10173  }
10174 
10175  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10176  {
10177  // Item from the middle of 2nd vector.
10178  VmaSuballocation refSuballoc;
10179  refSuballoc.offset = offset;
10180  // Rest of members stays uninitialized intentionally for better performance.
10181  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10182  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10183  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10184  if(it != suballocations2nd.end())
10185  {
10186  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10187  it->hAllocation = VK_NULL_HANDLE;
10188  ++m_2ndNullItemsCount;
10189  m_SumFreeSize += it->size;
10190  CleanupAfterFree();
10191  return;
10192  }
10193  }
10194 
10195  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10196 }
10197 
10198 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10199 {
10200  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10201  const size_t suballocCount = AccessSuballocations1st().size();
10202  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10203 }
10204 
10205 void VmaBlockMetadata_Linear::CleanupAfterFree()
10206 {
10207  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10209 
10210  if(IsEmpty())
10211  {
10212  suballocations1st.clear();
10213  suballocations2nd.clear();
10214  m_1stNullItemsBeginCount = 0;
10215  m_1stNullItemsMiddleCount = 0;
10216  m_2ndNullItemsCount = 0;
10217  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10218  }
10219  else
10220  {
10221  const size_t suballoc1stCount = suballocations1st.size();
10222  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10223  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10224 
10225  // Find more null items at the beginning of 1st vector.
10226  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10227  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10228  {
10229  ++m_1stNullItemsBeginCount;
10230  --m_1stNullItemsMiddleCount;
10231  }
10232 
10233  // Find more null items at the end of 1st vector.
10234  while(m_1stNullItemsMiddleCount > 0 &&
10235  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10236  {
10237  --m_1stNullItemsMiddleCount;
10238  suballocations1st.pop_back();
10239  }
10240 
10241  // Find more null items at the end of 2nd vector.
10242  while(m_2ndNullItemsCount > 0 &&
10243  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10244  {
10245  --m_2ndNullItemsCount;
10246  suballocations2nd.pop_back();
10247  }
10248 
10249  if(ShouldCompact1st())
10250  {
10251  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10252  size_t srcIndex = m_1stNullItemsBeginCount;
10253  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10254  {
10255  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10256  {
10257  ++srcIndex;
10258  }
10259  if(dstIndex != srcIndex)
10260  {
10261  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10262  }
10263  ++srcIndex;
10264  }
10265  suballocations1st.resize(nonNullItemCount);
10266  m_1stNullItemsBeginCount = 0;
10267  m_1stNullItemsMiddleCount = 0;
10268  }
10269 
10270  // 2nd vector became empty.
10271  if(suballocations2nd.empty())
10272  {
10273  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10274  }
10275 
10276  // 1st vector became empty.
10277  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10278  {
10279  suballocations1st.clear();
10280  m_1stNullItemsBeginCount = 0;
10281 
10282  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10283  {
10284  // Swap 1st with 2nd. Now 2nd is empty.
10285  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10286  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10287  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10288  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10289  {
10290  ++m_1stNullItemsBeginCount;
10291  --m_1stNullItemsMiddleCount;
10292  }
10293  m_2ndNullItemsCount = 0;
10294  m_1stVectorIndex ^= 1;
10295  }
10296  }
10297  }
10298 
10299  VMA_HEAVY_ASSERT(Validate());
10300 }
10301 
10302 
10304 // class VmaBlockMetadata_Buddy
10305 
10306 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10307  VmaBlockMetadata(hAllocator),
10308  m_Root(VMA_NULL),
10309  m_AllocationCount(0),
10310  m_FreeCount(1),
10311  m_SumFreeSize(0)
10312 {
10313  memset(m_FreeList, 0, sizeof(m_FreeList));
10314 }
10315 
10316 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10317 {
10318  DeleteNode(m_Root);
10319 }
10320 
10321 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10322 {
10323  VmaBlockMetadata::Init(size);
10324 
10325  m_UsableSize = VmaPrevPow2(size);
10326  m_SumFreeSize = m_UsableSize;
10327 
10328  // Calculate m_LevelCount.
10329  m_LevelCount = 1;
10330  while(m_LevelCount < MAX_LEVELS &&
10331  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10332  {
10333  ++m_LevelCount;
10334  }
10335 
10336  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10337  rootNode->offset = 0;
10338  rootNode->type = Node::TYPE_FREE;
10339  rootNode->parent = VMA_NULL;
10340  rootNode->buddy = VMA_NULL;
10341 
10342  m_Root = rootNode;
10343  AddToFreeListFront(0, rootNode);
10344 }
10345 
10346 bool VmaBlockMetadata_Buddy::Validate() const
10347 {
10348  // Validate tree.
10349  ValidationContext ctx;
10350  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10351  {
10352  VMA_VALIDATE(false && "ValidateNode failed.");
10353  }
10354  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10355  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10356 
10357  // Validate free node lists.
10358  for(uint32_t level = 0; level < m_LevelCount; ++level)
10359  {
10360  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10361  m_FreeList[level].front->free.prev == VMA_NULL);
10362 
10363  for(Node* node = m_FreeList[level].front;
10364  node != VMA_NULL;
10365  node = node->free.next)
10366  {
10367  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10368 
10369  if(node->free.next == VMA_NULL)
10370  {
10371  VMA_VALIDATE(m_FreeList[level].back == node);
10372  }
10373  else
10374  {
10375  VMA_VALIDATE(node->free.next->free.prev == node);
10376  }
10377  }
10378  }
10379 
10380  // Validate that free lists ar higher levels are empty.
10381  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10382  {
10383  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10384  }
10385 
10386  return true;
10387 }
10388 
10389 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10390 {
10391  for(uint32_t level = 0; level < m_LevelCount; ++level)
10392  {
10393  if(m_FreeList[level].front != VMA_NULL)
10394  {
10395  return LevelToNodeSize(level);
10396  }
10397  }
10398  return 0;
10399 }
10400 
10401 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10402 {
10403  const VkDeviceSize unusableSize = GetUnusableSize();
10404 
10405  outInfo.blockCount = 1;
10406 
10407  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10408  outInfo.usedBytes = outInfo.unusedBytes = 0;
10409 
10410  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10411  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10412  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10413 
10414  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10415 
10416  if(unusableSize > 0)
10417  {
10418  ++outInfo.unusedRangeCount;
10419  outInfo.unusedBytes += unusableSize;
10420  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10421  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10422  }
10423 }
10424 
10425 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10426 {
10427  const VkDeviceSize unusableSize = GetUnusableSize();
10428 
10429  inoutStats.size += GetSize();
10430  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10431  inoutStats.allocationCount += m_AllocationCount;
10432  inoutStats.unusedRangeCount += m_FreeCount;
10433  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10434 
10435  if(unusableSize > 0)
10436  {
10437  ++inoutStats.unusedRangeCount;
10438  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10439  }
10440 }
10441 
10442 #if VMA_STATS_STRING_ENABLED
10443 
10444 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10445 {
10446  // TODO optimize
10447  VmaStatInfo stat;
10448  CalcAllocationStatInfo(stat);
10449 
10450  PrintDetailedMap_Begin(
10451  json,
10452  stat.unusedBytes,
10453  stat.allocationCount,
10454  stat.unusedRangeCount);
10455 
10456  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10457 
10458  const VkDeviceSize unusableSize = GetUnusableSize();
10459  if(unusableSize > 0)
10460  {
10461  PrintDetailedMap_UnusedRange(json,
10462  m_UsableSize, // offset
10463  unusableSize); // size
10464  }
10465 
10466  PrintDetailedMap_End(json);
10467 }
10468 
10469 #endif // #if VMA_STATS_STRING_ENABLED
10470 
10471 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10472  uint32_t currentFrameIndex,
10473  uint32_t frameInUseCount,
10474  VkDeviceSize bufferImageGranularity,
10475  VkDeviceSize allocSize,
10476  VkDeviceSize allocAlignment,
10477  bool upperAddress,
10478  VmaSuballocationType allocType,
10479  bool canMakeOtherLost,
10480  uint32_t strategy,
10481  VmaAllocationRequest* pAllocationRequest)
10482 {
10483  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10484 
10485  // Simple way to respect bufferImageGranularity. May be optimized some day.
10486  // Whenever it might be an OPTIMAL image...
10487  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10488  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10489  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10490  {
10491  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10492  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10493  }
10494 
10495  if(allocSize > m_UsableSize)
10496  {
10497  return false;
10498  }
10499 
10500  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10501  for(uint32_t level = targetLevel + 1; level--; )
10502  {
10503  for(Node* freeNode = m_FreeList[level].front;
10504  freeNode != VMA_NULL;
10505  freeNode = freeNode->free.next)
10506  {
10507  if(freeNode->offset % allocAlignment == 0)
10508  {
10509  pAllocationRequest->offset = freeNode->offset;
10510  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10511  pAllocationRequest->sumItemSize = 0;
10512  pAllocationRequest->itemsToMakeLostCount = 0;
10513  pAllocationRequest->customData = (void*)(uintptr_t)level;
10514  return true;
10515  }
10516  }
10517  }
10518 
10519  return false;
10520 }
10521 
10522 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10523  uint32_t currentFrameIndex,
10524  uint32_t frameInUseCount,
10525  VmaAllocationRequest* pAllocationRequest)
10526 {
10527  /*
10528  Lost allocations are not supported in buddy allocator at the moment.
10529  Support might be added in the future.
10530  */
10531  return pAllocationRequest->itemsToMakeLostCount == 0;
10532 }
10533 
10534 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10535 {
10536  /*
10537  Lost allocations are not supported in buddy allocator at the moment.
10538  Support might be added in the future.
10539  */
10540  return 0;
10541 }
10542 
10543 void VmaBlockMetadata_Buddy::Alloc(
10544  const VmaAllocationRequest& request,
10545  VmaSuballocationType type,
10546  VkDeviceSize allocSize,
10547  bool upperAddress,
10548  VmaAllocation hAllocation)
10549 {
10550  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10551  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10552 
10553  Node* currNode = m_FreeList[currLevel].front;
10554  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10555  while(currNode->offset != request.offset)
10556  {
10557  currNode = currNode->free.next;
10558  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10559  }
10560 
10561  // Go down, splitting free nodes.
10562  while(currLevel < targetLevel)
10563  {
10564  // currNode is already first free node at currLevel.
10565  // Remove it from list of free nodes at this currLevel.
10566  RemoveFromFreeList(currLevel, currNode);
10567 
10568  const uint32_t childrenLevel = currLevel + 1;
10569 
10570  // Create two free sub-nodes.
10571  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10572  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10573 
10574  leftChild->offset = currNode->offset;
10575  leftChild->type = Node::TYPE_FREE;
10576  leftChild->parent = currNode;
10577  leftChild->buddy = rightChild;
10578 
10579  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10580  rightChild->type = Node::TYPE_FREE;
10581  rightChild->parent = currNode;
10582  rightChild->buddy = leftChild;
10583 
10584  // Convert current currNode to split type.
10585  currNode->type = Node::TYPE_SPLIT;
10586  currNode->split.leftChild = leftChild;
10587 
10588  // Add child nodes to free list. Order is important!
10589  AddToFreeListFront(childrenLevel, rightChild);
10590  AddToFreeListFront(childrenLevel, leftChild);
10591 
10592  ++m_FreeCount;
10593  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10594  ++currLevel;
10595  currNode = m_FreeList[currLevel].front;
10596 
10597  /*
10598  We can be sure that currNode, as left child of node previously split,
10599  also fullfills the alignment requirement.
10600  */
10601  }
10602 
10603  // Remove from free list.
10604  VMA_ASSERT(currLevel == targetLevel &&
10605  currNode != VMA_NULL &&
10606  currNode->type == Node::TYPE_FREE);
10607  RemoveFromFreeList(currLevel, currNode);
10608 
10609  // Convert to allocation node.
10610  currNode->type = Node::TYPE_ALLOCATION;
10611  currNode->allocation.alloc = hAllocation;
10612 
10613  ++m_AllocationCount;
10614  --m_FreeCount;
10615  m_SumFreeSize -= allocSize;
10616 }
10617 
10618 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10619 {
10620  if(node->type == Node::TYPE_SPLIT)
10621  {
10622  DeleteNode(node->split.leftChild->buddy);
10623  DeleteNode(node->split.leftChild);
10624  }
10625 
10626  vma_delete(GetAllocationCallbacks(), node);
10627 }
10628 
10629 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10630 {
10631  VMA_VALIDATE(level < m_LevelCount);
10632  VMA_VALIDATE(curr->parent == parent);
10633  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10634  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10635  switch(curr->type)
10636  {
10637  case Node::TYPE_FREE:
10638  // curr->free.prev, next are validated separately.
10639  ctx.calculatedSumFreeSize += levelNodeSize;
10640  ++ctx.calculatedFreeCount;
10641  break;
10642  case Node::TYPE_ALLOCATION:
10643  ++ctx.calculatedAllocationCount;
10644  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10645  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10646  break;
10647  case Node::TYPE_SPLIT:
10648  {
10649  const uint32_t childrenLevel = level + 1;
10650  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10651  const Node* const leftChild = curr->split.leftChild;
10652  VMA_VALIDATE(leftChild != VMA_NULL);
10653  VMA_VALIDATE(leftChild->offset == curr->offset);
10654  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10655  {
10656  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10657  }
10658  const Node* const rightChild = leftChild->buddy;
10659  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10660  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10661  {
10662  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10663  }
10664  }
10665  break;
10666  default:
10667  return false;
10668  }
10669 
10670  return true;
10671 }
10672 
10673 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10674 {
10675  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10676  uint32_t level = 0;
10677  VkDeviceSize currLevelNodeSize = m_UsableSize;
10678  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10679  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10680  {
10681  ++level;
10682  currLevelNodeSize = nextLevelNodeSize;
10683  nextLevelNodeSize = currLevelNodeSize >> 1;
10684  }
10685  return level;
10686 }
10687 
10688 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10689 {
10690  // Find node and level.
10691  Node* node = m_Root;
10692  VkDeviceSize nodeOffset = 0;
10693  uint32_t level = 0;
10694  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10695  while(node->type == Node::TYPE_SPLIT)
10696  {
10697  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10698  if(offset < nodeOffset + nextLevelSize)
10699  {
10700  node = node->split.leftChild;
10701  }
10702  else
10703  {
10704  node = node->split.leftChild->buddy;
10705  nodeOffset += nextLevelSize;
10706  }
10707  ++level;
10708  levelNodeSize = nextLevelSize;
10709  }
10710 
10711  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10712  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10713 
10714  ++m_FreeCount;
10715  --m_AllocationCount;
10716  m_SumFreeSize += alloc->GetSize();
10717 
10718  node->type = Node::TYPE_FREE;
10719 
10720  // Join free nodes if possible.
10721  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10722  {
10723  RemoveFromFreeList(level, node->buddy);
10724  Node* const parent = node->parent;
10725 
10726  vma_delete(GetAllocationCallbacks(), node->buddy);
10727  vma_delete(GetAllocationCallbacks(), node);
10728  parent->type = Node::TYPE_FREE;
10729 
10730  node = parent;
10731  --level;
10732  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10733  --m_FreeCount;
10734  }
10735 
10736  AddToFreeListFront(level, node);
10737 }
10738 
10739 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10740 {
10741  switch(node->type)
10742  {
10743  case Node::TYPE_FREE:
10744  ++outInfo.unusedRangeCount;
10745  outInfo.unusedBytes += levelNodeSize;
10746  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10747  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10748  break;
10749  case Node::TYPE_ALLOCATION:
10750  {
10751  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10752  ++outInfo.allocationCount;
10753  outInfo.usedBytes += allocSize;
10754  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10755  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10756 
10757  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10758  if(unusedRangeSize > 0)
10759  {
10760  ++outInfo.unusedRangeCount;
10761  outInfo.unusedBytes += unusedRangeSize;
10762  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10763  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10764  }
10765  }
10766  break;
10767  case Node::TYPE_SPLIT:
10768  {
10769  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10770  const Node* const leftChild = node->split.leftChild;
10771  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10772  const Node* const rightChild = leftChild->buddy;
10773  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10774  }
10775  break;
10776  default:
10777  VMA_ASSERT(0);
10778  }
10779 }
10780 
10781 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10782 {
10783  VMA_ASSERT(node->type == Node::TYPE_FREE);
10784 
10785  // List is empty.
10786  Node* const frontNode = m_FreeList[level].front;
10787  if(frontNode == VMA_NULL)
10788  {
10789  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10790  node->free.prev = node->free.next = VMA_NULL;
10791  m_FreeList[level].front = m_FreeList[level].back = node;
10792  }
10793  else
10794  {
10795  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10796  node->free.prev = VMA_NULL;
10797  node->free.next = frontNode;
10798  frontNode->free.prev = node;
10799  m_FreeList[level].front = node;
10800  }
10801 }
10802 
10803 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10804 {
10805  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10806 
10807  // It is at the front.
10808  if(node->free.prev == VMA_NULL)
10809  {
10810  VMA_ASSERT(m_FreeList[level].front == node);
10811  m_FreeList[level].front = node->free.next;
10812  }
10813  else
10814  {
10815  Node* const prevFreeNode = node->free.prev;
10816  VMA_ASSERT(prevFreeNode->free.next == node);
10817  prevFreeNode->free.next = node->free.next;
10818  }
10819 
10820  // It is at the back.
10821  if(node->free.next == VMA_NULL)
10822  {
10823  VMA_ASSERT(m_FreeList[level].back == node);
10824  m_FreeList[level].back = node->free.prev;
10825  }
10826  else
10827  {
10828  Node* const nextFreeNode = node->free.next;
10829  VMA_ASSERT(nextFreeNode->free.prev == node);
10830  nextFreeNode->free.prev = node->free.prev;
10831  }
10832 }
10833 
10834 #if VMA_STATS_STRING_ENABLED
10835 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10836 {
10837  switch(node->type)
10838  {
10839  case Node::TYPE_FREE:
10840  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10841  break;
10842  case Node::TYPE_ALLOCATION:
10843  {
10844  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10845  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10846  if(allocSize < levelNodeSize)
10847  {
10848  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10849  }
10850  }
10851  break;
10852  case Node::TYPE_SPLIT:
10853  {
10854  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10855  const Node* const leftChild = node->split.leftChild;
10856  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10857  const Node* const rightChild = leftChild->buddy;
10858  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10859  }
10860  break;
10861  default:
10862  VMA_ASSERT(0);
10863  }
10864 }
10865 #endif // #if VMA_STATS_STRING_ENABLED
10866 
10867 
10869 // class VmaDeviceMemoryBlock
10870 
10871 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10872  m_pMetadata(VMA_NULL),
10873  m_MemoryTypeIndex(UINT32_MAX),
10874  m_Id(0),
10875  m_hMemory(VK_NULL_HANDLE),
10876  m_MapCount(0),
10877  m_pMappedData(VMA_NULL)
10878 {
10879 }
10880 
10881 void VmaDeviceMemoryBlock::Init(
10882  VmaAllocator hAllocator,
10883  uint32_t newMemoryTypeIndex,
10884  VkDeviceMemory newMemory,
10885  VkDeviceSize newSize,
10886  uint32_t id,
10887  uint32_t algorithm)
10888 {
10889  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10890 
10891  m_MemoryTypeIndex = newMemoryTypeIndex;
10892  m_Id = id;
10893  m_hMemory = newMemory;
10894 
10895  switch(algorithm)
10896  {
10898  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10899  break;
10901  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10902  break;
10903  default:
10904  VMA_ASSERT(0);
10905  // Fall-through.
10906  case 0:
10907  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10908  }
10909  m_pMetadata->Init(newSize);
10910 }
10911 
10912 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10913 {
10914  // This is the most important assert in the entire library.
10915  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10916  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10917 
10918  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10919  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10920  m_hMemory = VK_NULL_HANDLE;
10921 
10922  vma_delete(allocator, m_pMetadata);
10923  m_pMetadata = VMA_NULL;
10924 }
10925 
10926 bool VmaDeviceMemoryBlock::Validate() const
10927 {
10928  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10929  (m_pMetadata->GetSize() != 0));
10930 
10931  return m_pMetadata->Validate();
10932 }
10933 
10934 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10935 {
10936  void* pData = nullptr;
10937  VkResult res = Map(hAllocator, 1, &pData);
10938  if(res != VK_SUCCESS)
10939  {
10940  return res;
10941  }
10942 
10943  res = m_pMetadata->CheckCorruption(pData);
10944 
10945  Unmap(hAllocator, 1);
10946 
10947  return res;
10948 }
10949 
10950 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10951 {
10952  if(count == 0)
10953  {
10954  return VK_SUCCESS;
10955  }
10956 
10957  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10958  if(m_MapCount != 0)
10959  {
10960  m_MapCount += count;
10961  VMA_ASSERT(m_pMappedData != VMA_NULL);
10962  if(ppData != VMA_NULL)
10963  {
10964  *ppData = m_pMappedData;
10965  }
10966  return VK_SUCCESS;
10967  }
10968  else
10969  {
10970  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10971  hAllocator->m_hDevice,
10972  m_hMemory,
10973  0, // offset
10974  VK_WHOLE_SIZE,
10975  0, // flags
10976  &m_pMappedData);
10977  if(result == VK_SUCCESS)
10978  {
10979  if(ppData != VMA_NULL)
10980  {
10981  *ppData = m_pMappedData;
10982  }
10983  m_MapCount = count;
10984  }
10985  return result;
10986  }
10987 }
10988 
10989 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10990 {
10991  if(count == 0)
10992  {
10993  return;
10994  }
10995 
10996  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10997  if(m_MapCount >= count)
10998  {
10999  m_MapCount -= count;
11000  if(m_MapCount == 0)
11001  {
11002  m_pMappedData = VMA_NULL;
11003  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11004  }
11005  }
11006  else
11007  {
11008  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11009  }
11010 }
11011 
11012 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11013 {
11014  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11015  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11016 
11017  void* pData;
11018  VkResult res = Map(hAllocator, 1, &pData);
11019  if(res != VK_SUCCESS)
11020  {
11021  return res;
11022  }
11023 
11024  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11025  VmaWriteMagicValue(pData, allocOffset + allocSize);
11026 
11027  Unmap(hAllocator, 1);
11028 
11029  return VK_SUCCESS;
11030 }
11031 
11032 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11033 {
11034  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11035  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11036 
11037  void* pData;
11038  VkResult res = Map(hAllocator, 1, &pData);
11039  if(res != VK_SUCCESS)
11040  {
11041  return res;
11042  }
11043 
11044  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11045  {
11046  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11047  }
11048  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11049  {
11050  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11051  }
11052 
11053  Unmap(hAllocator, 1);
11054 
11055  return VK_SUCCESS;
11056 }
11057 
11058 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11059  const VmaAllocator hAllocator,
11060  const VmaAllocation hAllocation,
11061  VkBuffer hBuffer)
11062 {
11063  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11064  hAllocation->GetBlock() == this);
11065  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11066  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11067  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11068  hAllocator->m_hDevice,
11069  hBuffer,
11070  m_hMemory,
11071  hAllocation->GetOffset());
11072 }
11073 
11074 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11075  const VmaAllocator hAllocator,
11076  const VmaAllocation hAllocation,
11077  VkImage hImage)
11078 {
11079  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11080  hAllocation->GetBlock() == this);
11081  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11082  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11083  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11084  hAllocator->m_hDevice,
11085  hImage,
11086  m_hMemory,
11087  hAllocation->GetOffset());
11088 }
11089 
11090 static void InitStatInfo(VmaStatInfo& outInfo)
11091 {
11092  memset(&outInfo, 0, sizeof(outInfo));
11093  outInfo.allocationSizeMin = UINT64_MAX;
11094  outInfo.unusedRangeSizeMin = UINT64_MAX;
11095 }
11096 
11097 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11098 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11099 {
11100  inoutInfo.blockCount += srcInfo.blockCount;
11101  inoutInfo.allocationCount += srcInfo.allocationCount;
11102  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11103  inoutInfo.usedBytes += srcInfo.usedBytes;
11104  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11105  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11106  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11107  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11108  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11109 }
11110 
11111 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11112 {
11113  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11114  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11115  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11116  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11117 }
11118 
11119 VmaPool_T::VmaPool_T(
11120  VmaAllocator hAllocator,
11121  const VmaPoolCreateInfo& createInfo,
11122  VkDeviceSize preferredBlockSize) :
11123  m_BlockVector(
11124  hAllocator,
11125  createInfo.memoryTypeIndex,
11126  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11127  createInfo.minBlockCount,
11128  createInfo.maxBlockCount,
11129  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11130  createInfo.frameInUseCount,
11131  true, // isCustomPool
11132  createInfo.blockSize != 0, // explicitBlockSize
11133  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11134  m_Id(0)
11135 {
11136 }
11137 
11138 VmaPool_T::~VmaPool_T()
11139 {
11140 }
11141 
11142 #if VMA_STATS_STRING_ENABLED
11143 
11144 #endif // #if VMA_STATS_STRING_ENABLED
11145 
11146 VmaBlockVector::VmaBlockVector(
11147  VmaAllocator hAllocator,
11148  uint32_t memoryTypeIndex,
11149  VkDeviceSize preferredBlockSize,
11150  size_t minBlockCount,
11151  size_t maxBlockCount,
11152  VkDeviceSize bufferImageGranularity,
11153  uint32_t frameInUseCount,
11154  bool isCustomPool,
11155  bool explicitBlockSize,
11156  uint32_t algorithm) :
11157  m_hAllocator(hAllocator),
11158  m_MemoryTypeIndex(memoryTypeIndex),
11159  m_PreferredBlockSize(preferredBlockSize),
11160  m_MinBlockCount(minBlockCount),
11161  m_MaxBlockCount(maxBlockCount),
11162  m_BufferImageGranularity(bufferImageGranularity),
11163  m_FrameInUseCount(frameInUseCount),
11164  m_IsCustomPool(isCustomPool),
11165  m_ExplicitBlockSize(explicitBlockSize),
11166  m_Algorithm(algorithm),
11167  m_HasEmptyBlock(false),
11168  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11169  m_NextBlockId(0)
11170 {
11171 }
11172 
11173 VmaBlockVector::~VmaBlockVector()
11174 {
11175  for(size_t i = m_Blocks.size(); i--; )
11176  {
11177  m_Blocks[i]->Destroy(m_hAllocator);
11178  vma_delete(m_hAllocator, m_Blocks[i]);
11179  }
11180 }
11181 
11182 VkResult VmaBlockVector::CreateMinBlocks()
11183 {
11184  for(size_t i = 0; i < m_MinBlockCount; ++i)
11185  {
11186  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11187  if(res != VK_SUCCESS)
11188  {
11189  return res;
11190  }
11191  }
11192  return VK_SUCCESS;
11193 }
11194 
11195 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11196 {
11197  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11198 
11199  const size_t blockCount = m_Blocks.size();
11200 
11201  pStats->size = 0;
11202  pStats->unusedSize = 0;
11203  pStats->allocationCount = 0;
11204  pStats->unusedRangeCount = 0;
11205  pStats->unusedRangeSizeMax = 0;
11206  pStats->blockCount = blockCount;
11207 
11208  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11209  {
11210  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11211  VMA_ASSERT(pBlock);
11212  VMA_HEAVY_ASSERT(pBlock->Validate());
11213  pBlock->m_pMetadata->AddPoolStats(*pStats);
11214  }
11215 }
11216 
11217 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11218 {
11219  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11220  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11221  (VMA_DEBUG_MARGIN > 0) &&
11222  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11223 }
11224 
11225 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11226 
11227 VkResult VmaBlockVector::Allocate(
11228  VmaPool hCurrentPool,
11229  uint32_t currentFrameIndex,
11230  VkDeviceSize size,
11231  VkDeviceSize alignment,
11232  const VmaAllocationCreateInfo& createInfo,
11233  VmaSuballocationType suballocType,
11234  VmaAllocation* pAllocation)
11235 {
11236  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11237  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11238  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11239  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11240  const bool canCreateNewBlock =
11241  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11242  (m_Blocks.size() < m_MaxBlockCount);
11243  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11244 
11245  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11246  // Which in turn is available only when maxBlockCount = 1.
11247  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11248  {
11249  canMakeOtherLost = false;
11250  }
11251 
11252  // Upper address can only be used with linear allocator and within single memory block.
11253  if(isUpperAddress &&
11254  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11255  {
11256  return VK_ERROR_FEATURE_NOT_PRESENT;
11257  }
11258 
11259  // Validate strategy.
11260  switch(strategy)
11261  {
11262  case 0:
11264  break;
11268  break;
11269  default:
11270  return VK_ERROR_FEATURE_NOT_PRESENT;
11271  }
11272 
11273  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11274  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11275  {
11276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11277  }
11278 
11279  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11280 
11281  /*
11282  Under certain condition, this whole section can be skipped for optimization, so
11283  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11284  e.g. for custom pools with linear algorithm.
11285  */
11286  if(!canMakeOtherLost || canCreateNewBlock)
11287  {
11288  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11289  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11291 
11292  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11293  {
11294  // Use only last block.
11295  if(!m_Blocks.empty())
11296  {
11297  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11298  VMA_ASSERT(pCurrBlock);
11299  VkResult res = AllocateFromBlock(
11300  pCurrBlock,
11301  hCurrentPool,
11302  currentFrameIndex,
11303  size,
11304  alignment,
11305  allocFlagsCopy,
11306  createInfo.pUserData,
11307  suballocType,
11308  strategy,
11309  pAllocation);
11310  if(res == VK_SUCCESS)
11311  {
11312  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11313  return VK_SUCCESS;
11314  }
11315  }
11316  }
11317  else
11318  {
11320  {
11321  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11322  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11323  {
11324  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11325  VMA_ASSERT(pCurrBlock);
11326  VkResult res = AllocateFromBlock(
11327  pCurrBlock,
11328  hCurrentPool,
11329  currentFrameIndex,
11330  size,
11331  alignment,
11332  allocFlagsCopy,
11333  createInfo.pUserData,
11334  suballocType,
11335  strategy,
11336  pAllocation);
11337  if(res == VK_SUCCESS)
11338  {
11339  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11340  return VK_SUCCESS;
11341  }
11342  }
11343  }
11344  else // WORST_FIT, FIRST_FIT
11345  {
11346  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11347  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11348  {
11349  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11350  VMA_ASSERT(pCurrBlock);
11351  VkResult res = AllocateFromBlock(
11352  pCurrBlock,
11353  hCurrentPool,
11354  currentFrameIndex,
11355  size,
11356  alignment,
11357  allocFlagsCopy,
11358  createInfo.pUserData,
11359  suballocType,
11360  strategy,
11361  pAllocation);
11362  if(res == VK_SUCCESS)
11363  {
11364  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11365  return VK_SUCCESS;
11366  }
11367  }
11368  }
11369  }
11370 
11371  // 2. Try to create new block.
11372  if(canCreateNewBlock)
11373  {
11374  // Calculate optimal size for new block.
11375  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11376  uint32_t newBlockSizeShift = 0;
11377  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11378 
11379  if(!m_ExplicitBlockSize)
11380  {
11381  // Allocate 1/8, 1/4, 1/2 as first blocks.
11382  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11383  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11384  {
11385  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11386  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11387  {
11388  newBlockSize = smallerNewBlockSize;
11389  ++newBlockSizeShift;
11390  }
11391  else
11392  {
11393  break;
11394  }
11395  }
11396  }
11397 
11398  size_t newBlockIndex = 0;
11399  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11400  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11401  if(!m_ExplicitBlockSize)
11402  {
11403  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11404  {
11405  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11406  if(smallerNewBlockSize >= size)
11407  {
11408  newBlockSize = smallerNewBlockSize;
11409  ++newBlockSizeShift;
11410  res = CreateBlock(newBlockSize, &newBlockIndex);
11411  }
11412  else
11413  {
11414  break;
11415  }
11416  }
11417  }
11418 
11419  if(res == VK_SUCCESS)
11420  {
11421  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11422  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11423 
11424  res = AllocateFromBlock(
11425  pBlock,
11426  hCurrentPool,
11427  currentFrameIndex,
11428  size,
11429  alignment,
11430  allocFlagsCopy,
11431  createInfo.pUserData,
11432  suballocType,
11433  strategy,
11434  pAllocation);
11435  if(res == VK_SUCCESS)
11436  {
11437  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11438  return VK_SUCCESS;
11439  }
11440  else
11441  {
11442  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11444  }
11445  }
11446  }
11447  }
11448 
11449  // 3. Try to allocate from existing blocks with making other allocations lost.
11450  if(canMakeOtherLost)
11451  {
11452  uint32_t tryIndex = 0;
11453  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11454  {
11455  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11456  VmaAllocationRequest bestRequest = {};
11457  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11458 
11459  // 1. Search existing allocations.
11461  {
11462  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11463  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11464  {
11465  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11466  VMA_ASSERT(pCurrBlock);
11467  VmaAllocationRequest currRequest = {};
11468  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11469  currentFrameIndex,
11470  m_FrameInUseCount,
11471  m_BufferImageGranularity,
11472  size,
11473  alignment,
11474  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11475  suballocType,
11476  canMakeOtherLost,
11477  strategy,
11478  &currRequest))
11479  {
11480  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11481  if(pBestRequestBlock == VMA_NULL ||
11482  currRequestCost < bestRequestCost)
11483  {
11484  pBestRequestBlock = pCurrBlock;
11485  bestRequest = currRequest;
11486  bestRequestCost = currRequestCost;
11487 
11488  if(bestRequestCost == 0)
11489  {
11490  break;
11491  }
11492  }
11493  }
11494  }
11495  }
11496  else // WORST_FIT, FIRST_FIT
11497  {
11498  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11499  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11500  {
11501  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11502  VMA_ASSERT(pCurrBlock);
11503  VmaAllocationRequest currRequest = {};
11504  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11505  currentFrameIndex,
11506  m_FrameInUseCount,
11507  m_BufferImageGranularity,
11508  size,
11509  alignment,
11510  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11511  suballocType,
11512  canMakeOtherLost,
11513  strategy,
11514  &currRequest))
11515  {
11516  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11517  if(pBestRequestBlock == VMA_NULL ||
11518  currRequestCost < bestRequestCost ||
11520  {
11521  pBestRequestBlock = pCurrBlock;
11522  bestRequest = currRequest;
11523  bestRequestCost = currRequestCost;
11524 
11525  if(bestRequestCost == 0 ||
11527  {
11528  break;
11529  }
11530  }
11531  }
11532  }
11533  }
11534 
11535  if(pBestRequestBlock != VMA_NULL)
11536  {
11537  if(mapped)
11538  {
11539  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11540  if(res != VK_SUCCESS)
11541  {
11542  return res;
11543  }
11544  }
11545 
11546  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11547  currentFrameIndex,
11548  m_FrameInUseCount,
11549  &bestRequest))
11550  {
11551  // We no longer have an empty Allocation.
11552  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11553  {
11554  m_HasEmptyBlock = false;
11555  }
11556  // Allocate from this pBlock.
11557  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11558  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11559  (*pAllocation)->InitBlockAllocation(
11560  hCurrentPool,
11561  pBestRequestBlock,
11562  bestRequest.offset,
11563  alignment,
11564  size,
11565  suballocType,
11566  mapped,
11567  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11568  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11569  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11570  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11571  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11572  {
11573  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11574  }
11575  if(IsCorruptionDetectionEnabled())
11576  {
11577  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11578  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11579  }
11580  return VK_SUCCESS;
11581  }
11582  // else: Some allocations must have been touched while we are here. Next try.
11583  }
11584  else
11585  {
11586  // Could not find place in any of the blocks - break outer loop.
11587  break;
11588  }
11589  }
11590  /* Maximum number of tries exceeded - a very unlike event when many other
11591  threads are simultaneously touching allocations making it impossible to make
11592  lost at the same time as we try to allocate. */
11593  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11594  {
11595  return VK_ERROR_TOO_MANY_OBJECTS;
11596  }
11597  }
11598 
11599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11600 }
11601 
11602 void VmaBlockVector::Free(
11603  VmaAllocation hAllocation)
11604 {
11605  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11606 
11607  // Scope for lock.
11608  {
11609  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11610 
11611  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11612 
11613  if(IsCorruptionDetectionEnabled())
11614  {
11615  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11616  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11617  }
11618 
11619  if(hAllocation->IsPersistentMap())
11620  {
11621  pBlock->Unmap(m_hAllocator, 1);
11622  }
11623 
11624  pBlock->m_pMetadata->Free(hAllocation);
11625  VMA_HEAVY_ASSERT(pBlock->Validate());
11626 
11627  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11628 
11629  // pBlock became empty after this deallocation.
11630  if(pBlock->m_pMetadata->IsEmpty())
11631  {
11632  // Already has empty Allocation. We don't want to have two, so delete this one.
11633  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11634  {
11635  pBlockToDelete = pBlock;
11636  Remove(pBlock);
11637  }
11638  // We now have first empty block.
11639  else
11640  {
11641  m_HasEmptyBlock = true;
11642  }
11643  }
11644  // pBlock didn't become empty, but we have another empty block - find and free that one.
11645  // (This is optional, heuristics.)
11646  else if(m_HasEmptyBlock)
11647  {
11648  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11649  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11650  {
11651  pBlockToDelete = pLastBlock;
11652  m_Blocks.pop_back();
11653  m_HasEmptyBlock = false;
11654  }
11655  }
11656 
11657  IncrementallySortBlocks();
11658  }
11659 
11660  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11661  // lock, for performance reason.
11662  if(pBlockToDelete != VMA_NULL)
11663  {
11664  VMA_DEBUG_LOG(" Deleted empty allocation");
11665  pBlockToDelete->Destroy(m_hAllocator);
11666  vma_delete(m_hAllocator, pBlockToDelete);
11667  }
11668 }
11669 
11670 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11671 {
11672  VkDeviceSize result = 0;
11673  for(size_t i = m_Blocks.size(); i--; )
11674  {
11675  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11676  if(result >= m_PreferredBlockSize)
11677  {
11678  break;
11679  }
11680  }
11681  return result;
11682 }
11683 
11684 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11685 {
11686  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11687  {
11688  if(m_Blocks[blockIndex] == pBlock)
11689  {
11690  VmaVectorRemove(m_Blocks, blockIndex);
11691  return;
11692  }
11693  }
11694  VMA_ASSERT(0);
11695 }
11696 
11697 void VmaBlockVector::IncrementallySortBlocks()
11698 {
11699  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11700  {
11701  // Bubble sort only until first swap.
11702  for(size_t i = 1; i < m_Blocks.size(); ++i)
11703  {
11704  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11705  {
11706  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11707  return;
11708  }
11709  }
11710  }
11711 }
11712 
11713 VkResult VmaBlockVector::AllocateFromBlock(
11714  VmaDeviceMemoryBlock* pBlock,
11715  VmaPool hCurrentPool,
11716  uint32_t currentFrameIndex,
11717  VkDeviceSize size,
11718  VkDeviceSize alignment,
11719  VmaAllocationCreateFlags allocFlags,
11720  void* pUserData,
11721  VmaSuballocationType suballocType,
11722  uint32_t strategy,
11723  VmaAllocation* pAllocation)
11724 {
11725  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11726  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11727  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11728  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11729 
11730  VmaAllocationRequest currRequest = {};
11731  if(pBlock->m_pMetadata->CreateAllocationRequest(
11732  currentFrameIndex,
11733  m_FrameInUseCount,
11734  m_BufferImageGranularity,
11735  size,
11736  alignment,
11737  isUpperAddress,
11738  suballocType,
11739  false, // canMakeOtherLost
11740  strategy,
11741  &currRequest))
11742  {
11743  // Allocate from pCurrBlock.
11744  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11745 
11746  if(mapped)
11747  {
11748  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11749  if(res != VK_SUCCESS)
11750  {
11751  return res;
11752  }
11753  }
11754 
11755  // We no longer have an empty Allocation.
11756  if(pBlock->m_pMetadata->IsEmpty())
11757  {
11758  m_HasEmptyBlock = false;
11759  }
11760 
11761  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11762  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11763  (*pAllocation)->InitBlockAllocation(
11764  hCurrentPool,
11765  pBlock,
11766  currRequest.offset,
11767  alignment,
11768  size,
11769  suballocType,
11770  mapped,
11771  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11772  VMA_HEAVY_ASSERT(pBlock->Validate());
11773  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11774  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11775  {
11776  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11777  }
11778  if(IsCorruptionDetectionEnabled())
11779  {
11780  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11781  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11782  }
11783  return VK_SUCCESS;
11784  }
11785  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11786 }
11787 
11788 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11789 {
11790  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11791  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11792  allocInfo.allocationSize = blockSize;
11793  VkDeviceMemory mem = VK_NULL_HANDLE;
11794  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11795  if(res < 0)
11796  {
11797  return res;
11798  }
11799 
11800  // New VkDeviceMemory successfully created.
11801 
11802  // Create new Allocation for it.
11803  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11804  pBlock->Init(
11805  m_hAllocator,
11806  m_MemoryTypeIndex,
11807  mem,
11808  allocInfo.allocationSize,
11809  m_NextBlockId++,
11810  m_Algorithm);
11811 
11812  m_Blocks.push_back(pBlock);
11813  if(pNewBlockIndex != VMA_NULL)
11814  {
11815  *pNewBlockIndex = m_Blocks.size() - 1;
11816  }
11817 
11818  return VK_SUCCESS;
11819 }
11820 
11821 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11822  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11823  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11824 {
11825  const size_t blockCount = m_Blocks.size();
11826  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11827 
11828  enum BLOCK_FLAG
11829  {
11830  BLOCK_FLAG_USED = 0x00000001,
11831  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11832  };
11833 
11834  struct BlockInfo
11835  {
11836  uint32_t flags;
11837  void* pMappedData;
11838  };
11839  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11840  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11841  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11842 
11843  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11844  const size_t moveCount = moves.size();
11845  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11846  {
11847  const VmaDefragmentationMove& move = moves[moveIndex];
11848  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11849  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11850  }
11851 
11852  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11853 
11854  // Go over all blocks. Get mapped pointer or map if necessary.
11855  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11856  {
11857  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11858  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11859  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11860  {
11861  currBlockInfo.pMappedData = pBlock->GetMappedData();
11862  // It is not originally mapped - map it.
11863  if(currBlockInfo.pMappedData == VMA_NULL)
11864  {
11865  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11866  if(pDefragCtx->res == VK_SUCCESS)
11867  {
11868  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11869  }
11870  }
11871  }
11872  }
11873 
11874  // Go over all moves. Do actual data transfer.
11875  if(pDefragCtx->res == VK_SUCCESS)
11876  {
11877  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11878  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11879 
11880  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11881  {
11882  const VmaDefragmentationMove& move = moves[moveIndex];
11883 
11884  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
11885  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
11886 
11887  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
11888 
11889  // Invalidate source.
11890  if(isNonCoherent)
11891  {
11892  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
11893  memRange.memory = pSrcBlock->GetDeviceMemory();
11894  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
11895  memRange.size = VMA_MIN(
11896  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
11897  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
11898  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11899  }
11900 
11901  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11902  memmove(
11903  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
11904  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
11905  static_cast<size_t>(move.size));
11906 
11907  if(IsCorruptionDetectionEnabled())
11908  {
11909  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
11910  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
11911  }
11912 
11913  // Flush destination.
11914  if(isNonCoherent)
11915  {
11916  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
11917  memRange.memory = pDstBlock->GetDeviceMemory();
11918  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
11919  memRange.size = VMA_MIN(
11920  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
11921  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
11922  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11923  }
11924  }
11925  }
11926 
11927  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
11928  // Regardless of pCtx->res == VK_SUCCESS.
11929  for(size_t blockIndex = blockCount; blockIndex--; )
11930  {
11931  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
11932  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
11933  {
11934  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11935  pBlock->Unmap(m_hAllocator, 1);
11936  }
11937  }
11938 }
11939 
11940 void VmaBlockVector::ApplyDefragmentationMovesGpu(
11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
11943  VkCommandBuffer commandBuffer)
11944 {
11945  const size_t blockCount = m_Blocks.size();
11946 
11947  pDefragCtx->blockContexts.resize(blockCount);
11948  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
11949 
11950  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11951  const size_t moveCount = moves.size();
11952  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11953  {
11954  const VmaDefragmentationMove& move = moves[moveIndex];
11955  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11956  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11957  }
11958 
11959  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11960 
11961  // Go over all blocks. Create and bind buffer for whole block if necessary.
11962  {
11963  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
11964  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
11965  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
11966 
11967  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11968  {
11969  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
11970  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11971  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
11972  {
11973  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
11974  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
11975  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
11976  if(pDefragCtx->res == VK_SUCCESS)
11977  {
11978  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
11979  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
11980  }
11981  }
11982  }
11983  }
11984 
11985  // Go over all moves. Post data transfer commands to command buffer.
11986  if(pDefragCtx->res == VK_SUCCESS)
11987  {
11988  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11989  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11990 
11991  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11992  {
11993  const VmaDefragmentationMove& move = moves[moveIndex];
11994 
11995  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
11996  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
11997 
11998  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
11999 
12000  VkBufferCopy region = {
12001  move.srcOffset,
12002  move.dstOffset,
12003  move.size };
12004  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12005  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12006  }
12007  }
12008 
12009  // Save buffers to defrag context for later destruction.
12010  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12011  {
12012  pDefragCtx->res = VK_NOT_READY;
12013  }
12014 }
12015 
12016 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12017 {
12018  m_HasEmptyBlock = false;
12019  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12020  {
12021  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12022  if(pBlock->m_pMetadata->IsEmpty())
12023  {
12024  if(m_Blocks.size() > m_MinBlockCount)
12025  {
12026  if(pDefragmentationStats != VMA_NULL)
12027  {
12028  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12029  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12030  }
12031 
12032  VmaVectorRemove(m_Blocks, blockIndex);
12033  pBlock->Destroy(m_hAllocator);
12034  vma_delete(m_hAllocator, pBlock);
12035  }
12036  else
12037  {
12038  m_HasEmptyBlock = true;
12039  }
12040  }
12041  }
12042 }
12043 
12044 #if VMA_STATS_STRING_ENABLED
12045 
12046 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12047 {
12048  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12049 
12050  json.BeginObject();
12051 
12052  if(m_IsCustomPool)
12053  {
12054  json.WriteString("MemoryTypeIndex");
12055  json.WriteNumber(m_MemoryTypeIndex);
12056 
12057  json.WriteString("BlockSize");
12058  json.WriteNumber(m_PreferredBlockSize);
12059 
12060  json.WriteString("BlockCount");
12061  json.BeginObject(true);
12062  if(m_MinBlockCount > 0)
12063  {
12064  json.WriteString("Min");
12065  json.WriteNumber((uint64_t)m_MinBlockCount);
12066  }
12067  if(m_MaxBlockCount < SIZE_MAX)
12068  {
12069  json.WriteString("Max");
12070  json.WriteNumber((uint64_t)m_MaxBlockCount);
12071  }
12072  json.WriteString("Cur");
12073  json.WriteNumber((uint64_t)m_Blocks.size());
12074  json.EndObject();
12075 
12076  if(m_FrameInUseCount > 0)
12077  {
12078  json.WriteString("FrameInUseCount");
12079  json.WriteNumber(m_FrameInUseCount);
12080  }
12081 
12082  if(m_Algorithm != 0)
12083  {
12084  json.WriteString("Algorithm");
12085  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12086  }
12087  }
12088  else
12089  {
12090  json.WriteString("PreferredBlockSize");
12091  json.WriteNumber(m_PreferredBlockSize);
12092  }
12093 
12094  json.WriteString("Blocks");
12095  json.BeginObject();
12096  for(size_t i = 0; i < m_Blocks.size(); ++i)
12097  {
12098  json.BeginString();
12099  json.ContinueString(m_Blocks[i]->GetId());
12100  json.EndString();
12101 
12102  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12103  }
12104  json.EndObject();
12105 
12106  json.EndObject();
12107 }
12108 
12109 #endif // #if VMA_STATS_STRING_ENABLED
12110 
12111 void VmaBlockVector::Defragment(
12112  class VmaBlockVectorDefragmentationContext* pCtx,
12113  VmaDefragmentationStats* pStats,
12114  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12115  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12116  VkCommandBuffer commandBuffer)
12117 {
12118  pCtx->res = VK_SUCCESS;
12119 
12120  const VkMemoryPropertyFlags memPropFlags =
12121  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12122  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12123  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12124 
12125  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12126  isHostVisible;
12127  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12128  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12129 
12130  // There are options to defragment this memory type.
12131  if(canDefragmentOnCpu || canDefragmentOnGpu)
12132  {
12133  bool defragmentOnGpu;
12134  // There is only one option to defragment this memory type.
12135  if(canDefragmentOnGpu != canDefragmentOnCpu)
12136  {
12137  defragmentOnGpu = canDefragmentOnGpu;
12138  }
12139  // Both options are available: Heuristics to choose the best one.
12140  else
12141  {
12142  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12143  m_hAllocator->IsIntegratedGpu();
12144  }
12145 
12146  bool overlappingMoveSupported = !defragmentOnGpu;
12147 
12148  if(m_hAllocator->m_UseMutex)
12149  {
12150  m_Mutex.LockWrite();
12151  pCtx->mutexLocked = true;
12152  }
12153 
12154  pCtx->Begin(overlappingMoveSupported);
12155 
12156  // Defragment.
12157 
12158  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12159  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12160  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12161  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12162  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12163 
12164  // Accumulate statistics.
12165  if(pStats != VMA_NULL)
12166  {
12167  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12168  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12169  pStats->bytesMoved += bytesMoved;
12170  pStats->allocationsMoved += allocationsMoved;
12171  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12172  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12173  if(defragmentOnGpu)
12174  {
12175  maxGpuBytesToMove -= bytesMoved;
12176  maxGpuAllocationsToMove -= allocationsMoved;
12177  }
12178  else
12179  {
12180  maxCpuBytesToMove -= bytesMoved;
12181  maxCpuAllocationsToMove -= allocationsMoved;
12182  }
12183  }
12184 
12185  if(pCtx->res >= VK_SUCCESS)
12186  {
12187  if(defragmentOnGpu)
12188  {
12189  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12190  }
12191  else
12192  {
12193  ApplyDefragmentationMovesCpu(pCtx, moves);
12194  }
12195  }
12196  }
12197 }
12198 
12199 void VmaBlockVector::DefragmentationEnd(
12200  class VmaBlockVectorDefragmentationContext* pCtx,
12201  VmaDefragmentationStats* pStats)
12202 {
12203  // Destroy buffers.
12204  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12205  {
12206  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12207  if(blockCtx.hBuffer)
12208  {
12209  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12210  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12211  }
12212  }
12213 
12214  if(pCtx->res >= VK_SUCCESS)
12215  {
12216  FreeEmptyBlocks(pStats);
12217  }
12218 
12219  if(pCtx->mutexLocked)
12220  {
12221  VMA_ASSERT(m_hAllocator->m_UseMutex);
12222  m_Mutex.UnlockWrite();
12223  }
12224 }
12225 
12226 size_t VmaBlockVector::CalcAllocationCount() const
12227 {
12228  size_t result = 0;
12229  for(size_t i = 0; i < m_Blocks.size(); ++i)
12230  {
12231  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12232  }
12233  return result;
12234 }
12235 
12236 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12237 {
12238  if(m_BufferImageGranularity == 1)
12239  {
12240  return false;
12241  }
12242  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12243  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12244  {
12245  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12246  VMA_ASSERT(m_Algorithm == 0);
12247  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12248  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12249  {
12250  return true;
12251  }
12252  }
12253  return false;
12254 }
12255 
12256 void VmaBlockVector::MakePoolAllocationsLost(
12257  uint32_t currentFrameIndex,
12258  size_t* pLostAllocationCount)
12259 {
12260  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12261  size_t lostAllocationCount = 0;
12262  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12263  {
12264  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12265  VMA_ASSERT(pBlock);
12266  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12267  }
12268  if(pLostAllocationCount != VMA_NULL)
12269  {
12270  *pLostAllocationCount = lostAllocationCount;
12271  }
12272 }
12273 
12274 VkResult VmaBlockVector::CheckCorruption()
12275 {
12276  if(!IsCorruptionDetectionEnabled())
12277  {
12278  return VK_ERROR_FEATURE_NOT_PRESENT;
12279  }
12280 
12281  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12282  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12283  {
12284  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12285  VMA_ASSERT(pBlock);
12286  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12287  if(res != VK_SUCCESS)
12288  {
12289  return res;
12290  }
12291  }
12292  return VK_SUCCESS;
12293 }
12294 
12295 void VmaBlockVector::AddStats(VmaStats* pStats)
12296 {
12297  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12298  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12299 
12300  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12301 
12302  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12303  {
12304  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12305  VMA_ASSERT(pBlock);
12306  VMA_HEAVY_ASSERT(pBlock->Validate());
12307  VmaStatInfo allocationStatInfo;
12308  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12309  VmaAddStatInfo(pStats->total, allocationStatInfo);
12310  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12311  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12312  }
12313 }
12314 
12316 // VmaDefragmentationAlgorithm_Generic members definition
12317 
12318 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12319  VmaAllocator hAllocator,
12320  VmaBlockVector* pBlockVector,
12321  uint32_t currentFrameIndex,
12322  bool overlappingMoveSupported) :
12323  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12324  m_AllAllocations(false),
12325  m_AllocationCount(0),
12326  m_BytesMoved(0),
12327  m_AllocationsMoved(0),
12328  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12329 {
12330  // Create block info for each block.
12331  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12332  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12333  {
12334  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12335  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12336  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12337  m_Blocks.push_back(pBlockInfo);
12338  }
12339 
12340  // Sort them by m_pBlock pointer value.
12341  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12342 }
12343 
12344 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12345 {
12346  for(size_t i = m_Blocks.size(); i--; )
12347  {
12348  vma_delete(m_hAllocator, m_Blocks[i]);
12349  }
12350 }
12351 
12352 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12353 {
12354  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12355  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12356  {
12357  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12358  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12359  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12360  {
12361  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12362  (*it)->m_Allocations.push_back(allocInfo);
12363  }
12364  else
12365  {
12366  VMA_ASSERT(0);
12367  }
12368 
12369  ++m_AllocationCount;
12370  }
12371 }
12372 
12373 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12374  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12375  VkDeviceSize maxBytesToMove,
12376  uint32_t maxAllocationsToMove)
12377 {
12378  if(m_Blocks.empty())
12379  {
12380  return VK_SUCCESS;
12381  }
12382 
12383  // This is a choice based on research.
12384  // Option 1:
12385  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12386  // Option 2:
12387  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12388  // Option 3:
12389  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12390 
12391  size_t srcBlockMinIndex = 0;
12392  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12393  /*
12394  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12395  {
12396  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12397  if(blocksWithNonMovableCount > 0)
12398  {
12399  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12400  }
12401  }
12402  */
12403 
12404  size_t srcBlockIndex = m_Blocks.size() - 1;
12405  size_t srcAllocIndex = SIZE_MAX;
12406  for(;;)
12407  {
12408  // 1. Find next allocation to move.
12409  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12410  // 1.2. Then start from last to first m_Allocations.
12411  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12412  {
12413  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12414  {
12415  // Finished: no more allocations to process.
12416  if(srcBlockIndex == srcBlockMinIndex)
12417  {
12418  return VK_SUCCESS;
12419  }
12420  else
12421  {
12422  --srcBlockIndex;
12423  srcAllocIndex = SIZE_MAX;
12424  }
12425  }
12426  else
12427  {
12428  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12429  }
12430  }
12431 
12432  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12433  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12434 
12435  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12436  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12437  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12438  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12439 
12440  // 2. Try to find new place for this allocation in preceding or current block.
12441  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12442  {
12443  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12444  VmaAllocationRequest dstAllocRequest;
12445  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12446  m_CurrentFrameIndex,
12447  m_pBlockVector->GetFrameInUseCount(),
12448  m_pBlockVector->GetBufferImageGranularity(),
12449  size,
12450  alignment,
12451  false, // upperAddress
12452  suballocType,
12453  false, // canMakeOtherLost
12454  strategy,
12455  &dstAllocRequest) &&
12456  MoveMakesSense(
12457  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12458  {
12459  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12460 
12461  // Reached limit on number of allocations or bytes to move.
12462  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12463  (m_BytesMoved + size > maxBytesToMove))
12464  {
12465  return VK_SUCCESS;
12466  }
12467 
12468  VmaDefragmentationMove move;
12469  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12470  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12471  move.srcOffset = srcOffset;
12472  move.dstOffset = dstAllocRequest.offset;
12473  move.size = size;
12474  moves.push_back(move);
12475 
12476  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12477  dstAllocRequest,
12478  suballocType,
12479  size,
12480  false, // upperAddress
12481  allocInfo.m_hAllocation);
12482  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12483 
12484  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12485 
12486  if(allocInfo.m_pChanged != VMA_NULL)
12487  {
12488  *allocInfo.m_pChanged = VK_TRUE;
12489  }
12490 
12491  ++m_AllocationsMoved;
12492  m_BytesMoved += size;
12493 
12494  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12495 
12496  break;
12497  }
12498  }
12499 
12500  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12501 
12502  if(srcAllocIndex > 0)
12503  {
12504  --srcAllocIndex;
12505  }
12506  else
12507  {
12508  if(srcBlockIndex > 0)
12509  {
12510  --srcBlockIndex;
12511  srcAllocIndex = SIZE_MAX;
12512  }
12513  else
12514  {
12515  return VK_SUCCESS;
12516  }
12517  }
12518  }
12519 }
12520 
12521 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12522 {
12523  size_t result = 0;
12524  for(size_t i = 0; i < m_Blocks.size(); ++i)
12525  {
12526  if(m_Blocks[i]->m_HasNonMovableAllocations)
12527  {
12528  ++result;
12529  }
12530  }
12531  return result;
12532 }
12533 
12534 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12535  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12536  VkDeviceSize maxBytesToMove,
12537  uint32_t maxAllocationsToMove)
12538 {
12539  if(!m_AllAllocations && m_AllocationCount == 0)
12540  {
12541  return VK_SUCCESS;
12542  }
12543 
12544  const size_t blockCount = m_Blocks.size();
12545  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12546  {
12547  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12548 
12549  if(m_AllAllocations)
12550  {
12551  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12552  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12553  it != pMetadata->m_Suballocations.end();
12554  ++it)
12555  {
12556  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12557  {
12558  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12559  pBlockInfo->m_Allocations.push_back(allocInfo);
12560  }
12561  }
12562  }
12563 
12564  pBlockInfo->CalcHasNonMovableAllocations();
12565 
12566  // This is a choice based on research.
12567  // Option 1:
12568  pBlockInfo->SortAllocationsByOffsetDescending();
12569  // Option 2:
12570  //pBlockInfo->SortAllocationsBySizeDescending();
12571  }
12572 
12573  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12574  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12575 
12576  // This is a choice based on research.
12577  const uint32_t roundCount = 2;
12578 
12579  // Execute defragmentation rounds (the main part).
12580  VkResult result = VK_SUCCESS;
12581  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12582  {
12583  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12584  }
12585 
12586  return result;
12587 }
12588 
12589 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12590  size_t dstBlockIndex, VkDeviceSize dstOffset,
12591  size_t srcBlockIndex, VkDeviceSize srcOffset)
12592 {
12593  if(dstBlockIndex < srcBlockIndex)
12594  {
12595  return true;
12596  }
12597  if(dstBlockIndex > srcBlockIndex)
12598  {
12599  return false;
12600  }
12601  if(dstOffset < srcOffset)
12602  {
12603  return true;
12604  }
12605  return false;
12606 }
12607 
12609 // VmaDefragmentationAlgorithm_Fast
12610 
12611 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12612  VmaAllocator hAllocator,
12613  VmaBlockVector* pBlockVector,
12614  uint32_t currentFrameIndex,
12615  bool overlappingMoveSupported) :
12616  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12617  m_OverlappingMoveSupported(overlappingMoveSupported),
12618  m_AllocationCount(0),
12619  m_AllAllocations(false),
12620  m_BytesMoved(0),
12621  m_AllocationsMoved(0),
12622  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12623 {
12624  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12625 
12626 }
12627 
12628 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12629 {
12630 }
12631 
12632 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12633  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12634  VkDeviceSize maxBytesToMove,
12635  uint32_t maxAllocationsToMove)
12636 {
12637  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12638 
12639  const size_t blockCount = m_pBlockVector->GetBlockCount();
12640  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12641  {
12642  return VK_SUCCESS;
12643  }
12644 
12645  PreprocessMetadata();
12646 
12647  // Sort blocks in order from most destination.
12648 
12649  m_BlockInfos.resize(blockCount);
12650  for(size_t i = 0; i < blockCount; ++i)
12651  {
12652  m_BlockInfos[i].origBlockIndex = i;
12653  }
12654 
12655  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12656  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12657  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12658  });
12659 
12660  // THE MAIN ALGORITHM
12661 
12662  FreeSpaceDatabase freeSpaceDb;
12663 
12664  size_t dstBlockInfoIndex = 0;
12665  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12666  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12667  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12668  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12669  VkDeviceSize dstOffset = 0;
12670 
12671  bool end = false;
12672  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12673  {
12674  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12675  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12676  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12677  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12678  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12679  {
12680  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12681  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12682  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12683  if(m_AllocationsMoved == maxAllocationsToMove ||
12684  m_BytesMoved + srcAllocSize > maxBytesToMove)
12685  {
12686  end = true;
12687  break;
12688  }
12689  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12690 
12691  // Try to place it in one of free spaces from the database.
12692  size_t freeSpaceInfoIndex;
12693  VkDeviceSize dstAllocOffset;
12694  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12695  freeSpaceInfoIndex, dstAllocOffset))
12696  {
12697  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12698  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12699  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12700  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12701 
12702  // Same block
12703  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12704  {
12705  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12706 
12707  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12708 
12709  VmaSuballocation suballoc = *srcSuballocIt;
12710  suballoc.offset = dstAllocOffset;
12711  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12712  m_BytesMoved += srcAllocSize;
12713  ++m_AllocationsMoved;
12714 
12715  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12716  ++nextSuballocIt;
12717  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12718  srcSuballocIt = nextSuballocIt;
12719 
12720  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12721 
12722  VmaDefragmentationMove move = {
12723  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12724  srcAllocOffset, dstAllocOffset,
12725  srcAllocSize };
12726  moves.push_back(move);
12727  }
12728  // Different block
12729  else
12730  {
12731  // MOVE OPTION 2: Move the allocation to a different block.
12732 
12733  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12734 
12735  VmaSuballocation suballoc = *srcSuballocIt;
12736  suballoc.offset = dstAllocOffset;
12737  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12738  m_BytesMoved += srcAllocSize;
12739  ++m_AllocationsMoved;
12740 
12741  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12742  ++nextSuballocIt;
12743  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12744  srcSuballocIt = nextSuballocIt;
12745 
12746  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12747 
12748  VmaDefragmentationMove move = {
12749  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12750  srcAllocOffset, dstAllocOffset,
12751  srcAllocSize };
12752  moves.push_back(move);
12753  }
12754  }
12755  else
12756  {
12757  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12758 
12759  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12760  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12761  dstAllocOffset + srcAllocSize > dstBlockSize)
12762  {
12763  // But before that, register remaining free space at the end of dst block.
12764  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12765 
12766  ++dstBlockInfoIndex;
12767  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12768  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12769  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12770  dstBlockSize = pDstMetadata->GetSize();
12771  dstOffset = 0;
12772  dstAllocOffset = 0;
12773  }
12774 
12775  // Same block
12776  if(dstBlockInfoIndex == srcBlockInfoIndex)
12777  {
12778  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12779 
12780  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12781 
12782  bool skipOver = overlap;
12783  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12784  {
12785  // If destination and source place overlap, skip if it would move it
12786  // by only < 1/64 of its size.
12787  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12788  }
12789 
12790  if(skipOver)
12791  {
12792  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12793 
12794  dstOffset = srcAllocOffset + srcAllocSize;
12795  ++srcSuballocIt;
12796  }
12797  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12798  else
12799  {
12800  srcSuballocIt->offset = dstAllocOffset;
12801  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12802  dstOffset = dstAllocOffset + srcAllocSize;
12803  m_BytesMoved += srcAllocSize;
12804  ++m_AllocationsMoved;
12805  ++srcSuballocIt;
12806  VmaDefragmentationMove move = {
12807  srcOrigBlockIndex, dstOrigBlockIndex,
12808  srcAllocOffset, dstAllocOffset,
12809  srcAllocSize };
12810  moves.push_back(move);
12811  }
12812  }
12813  // Different block
12814  else
12815  {
12816  // MOVE OPTION 2: Move the allocation to a different block.
12817 
12818  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12819  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12820 
12821  VmaSuballocation suballoc = *srcSuballocIt;
12822  suballoc.offset = dstAllocOffset;
12823  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12824  dstOffset = dstAllocOffset + srcAllocSize;
12825  m_BytesMoved += srcAllocSize;
12826  ++m_AllocationsMoved;
12827 
12828  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12829  ++nextSuballocIt;
12830  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12831  srcSuballocIt = nextSuballocIt;
12832 
12833  pDstMetadata->m_Suballocations.push_back(suballoc);
12834 
12835  VmaDefragmentationMove move = {
12836  srcOrigBlockIndex, dstOrigBlockIndex,
12837  srcAllocOffset, dstAllocOffset,
12838  srcAllocSize };
12839  moves.push_back(move);
12840  }
12841  }
12842  }
12843  }
12844 
12845  m_BlockInfos.clear();
12846 
12847  PostprocessMetadata();
12848 
12849  return VK_SUCCESS;
12850 }
12851 
12852 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12853 {
12854  const size_t blockCount = m_pBlockVector->GetBlockCount();
12855  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12856  {
12857  VmaBlockMetadata_Generic* const pMetadata =
12858  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12859  pMetadata->m_FreeCount = 0;
12860  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12861  pMetadata->m_FreeSuballocationsBySize.clear();
12862  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12863  it != pMetadata->m_Suballocations.end(); )
12864  {
12865  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12866  {
12867  VmaSuballocationList::iterator nextIt = it;
12868  ++nextIt;
12869  pMetadata->m_Suballocations.erase(it);
12870  it = nextIt;
12871  }
12872  else
12873  {
12874  ++it;
12875  }
12876  }
12877  }
12878 }
12879 
12880 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
12881 {
12882  const size_t blockCount = m_pBlockVector->GetBlockCount();
12883  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12884  {
12885  VmaBlockMetadata_Generic* const pMetadata =
12886  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12887  const VkDeviceSize blockSize = pMetadata->GetSize();
12888 
12889  // No allocations in this block - entire area is free.
12890  if(pMetadata->m_Suballocations.empty())
12891  {
12892  pMetadata->m_FreeCount = 1;
12893  //pMetadata->m_SumFreeSize is already set to blockSize.
12894  VmaSuballocation suballoc = {
12895  0, // offset
12896  blockSize, // size
12897  VMA_NULL, // hAllocation
12898  VMA_SUBALLOCATION_TYPE_FREE };
12899  pMetadata->m_Suballocations.push_back(suballoc);
12900  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
12901  }
12902  // There are some allocations in this block.
12903  else
12904  {
12905  VkDeviceSize offset = 0;
12906  VmaSuballocationList::iterator it;
12907  for(it = pMetadata->m_Suballocations.begin();
12908  it != pMetadata->m_Suballocations.end();
12909  ++it)
12910  {
12911  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
12912  VMA_ASSERT(it->offset >= offset);
12913 
12914  // Need to insert preceding free space.
12915  if(it->offset > offset)
12916  {
12917  ++pMetadata->m_FreeCount;
12918  const VkDeviceSize freeSize = it->offset - offset;
12919  VmaSuballocation suballoc = {
12920  offset, // offset
12921  freeSize, // size
12922  VMA_NULL, // hAllocation
12923  VMA_SUBALLOCATION_TYPE_FREE };
12924  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12925  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12926  {
12927  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
12928  }
12929  }
12930 
12931  pMetadata->m_SumFreeSize -= it->size;
12932  offset = it->offset + it->size;
12933  }
12934 
12935  // Need to insert trailing free space.
12936  if(offset < blockSize)
12937  {
12938  ++pMetadata->m_FreeCount;
12939  const VkDeviceSize freeSize = blockSize - offset;
12940  VmaSuballocation suballoc = {
12941  offset, // offset
12942  freeSize, // size
12943  VMA_NULL, // hAllocation
12944  VMA_SUBALLOCATION_TYPE_FREE };
12945  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
12946  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12947  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12948  {
12949  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
12950  }
12951  }
12952 
12953  VMA_SORT(
12954  pMetadata->m_FreeSuballocationsBySize.begin(),
12955  pMetadata->m_FreeSuballocationsBySize.end(),
12956  VmaSuballocationItemSizeLess());
12957  }
12958 
12959  VMA_HEAVY_ASSERT(pMetadata->Validate());
12960  }
12961 }
12962 
12963 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
12964 {
12965  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
12966  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12967  while(it != pMetadata->m_Suballocations.end())
12968  {
12969  if(it->offset < suballoc.offset)
12970  {
12971  ++it;
12972  }
12973  }
12974  pMetadata->m_Suballocations.insert(it, suballoc);
12975 }
12976 
12978 // VmaBlockVectorDefragmentationContext
12979 
12980 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
12981  VmaAllocator hAllocator,
12982  VmaPool hCustomPool,
12983  VmaBlockVector* pBlockVector,
12984  uint32_t currFrameIndex,
12985  uint32_t algorithmFlags) :
12986  res(VK_SUCCESS),
12987  mutexLocked(false),
12988  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
12989  m_hAllocator(hAllocator),
12990  m_hCustomPool(hCustomPool),
12991  m_pBlockVector(pBlockVector),
12992  m_CurrFrameIndex(currFrameIndex),
12993  m_AlgorithmFlags(algorithmFlags),
12994  m_pAlgorithm(VMA_NULL),
12995  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
12996  m_AllAllocations(false)
12997 {
12998 }
12999 
13000 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13001 {
13002  vma_delete(m_hAllocator, m_pAlgorithm);
13003 }
13004 
13005 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13006 {
13007  AllocInfo info = { hAlloc, pChanged };
13008  m_Allocations.push_back(info);
13009 }
13010 
13011 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13012 {
13013  const bool allAllocations = m_AllAllocations ||
13014  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13015 
13016  /********************************
13017  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13018  ********************************/
13019 
13020  /*
13021  Fast algorithm is supported only when certain criteria are met:
13022  - VMA_DEBUG_MARGIN is 0.
13023  - All allocations in this block vector are moveable.
13024  - There is no possibility of image/buffer granularity conflict.
13025  */
13026  if(VMA_DEBUG_MARGIN == 0 &&
13027  allAllocations &&
13028  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13029  {
13030  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13031  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13032  }
13033  else
13034  {
13035  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13036  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13037  }
13038 
13039  if(allAllocations)
13040  {
13041  m_pAlgorithm->AddAll();
13042  }
13043  else
13044  {
13045  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13046  {
13047  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13048  }
13049  }
13050 }
13051 
13053 // VmaDefragmentationContext
13054 
13055 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13056  VmaAllocator hAllocator,
13057  uint32_t currFrameIndex,
13058  uint32_t flags,
13059  VmaDefragmentationStats* pStats) :
13060  m_hAllocator(hAllocator),
13061  m_CurrFrameIndex(currFrameIndex),
13062  m_Flags(flags),
13063  m_pStats(pStats),
13064  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13065 {
13066  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13067 }
13068 
13069 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13070 {
13071  for(size_t i = m_CustomPoolContexts.size(); i--; )
13072  {
13073  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13074  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13075  vma_delete(m_hAllocator, pBlockVectorCtx);
13076  }
13077  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13078  {
13079  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13080  if(pBlockVectorCtx)
13081  {
13082  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13083  vma_delete(m_hAllocator, pBlockVectorCtx);
13084  }
13085  }
13086 }
13087 
13088 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13089 {
13090  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13091  {
13092  VmaPool pool = pPools[poolIndex];
13093  VMA_ASSERT(pool);
13094  // Pools with algorithm other than default are not defragmented.
13095  if(pool->m_BlockVector.GetAlgorithm() == 0)
13096  {
13097  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13098 
13099  for(size_t i = m_CustomPoolContexts.size(); i--; )
13100  {
13101  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13102  {
13103  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13104  break;
13105  }
13106  }
13107 
13108  if(!pBlockVectorDefragCtx)
13109  {
13110  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13111  m_hAllocator,
13112  pool,
13113  &pool->m_BlockVector,
13114  m_CurrFrameIndex,
13115  m_Flags);
13116  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13117  }
13118 
13119  pBlockVectorDefragCtx->AddAll();
13120  }
13121  }
13122 }
13123 
13124 void VmaDefragmentationContext_T::AddAllocations(
13125  uint32_t allocationCount,
13126  VmaAllocation* pAllocations,
13127  VkBool32* pAllocationsChanged)
13128 {
13129  // Dispatch pAllocations among defragmentators. Create them when necessary.
13130  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13131  {
13132  const VmaAllocation hAlloc = pAllocations[allocIndex];
13133  VMA_ASSERT(hAlloc);
13134  // DedicatedAlloc cannot be defragmented.
13135  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13136  // Lost allocation cannot be defragmented.
13137  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13138  {
13139  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13140 
13141  const VmaPool hAllocPool = hAlloc->GetPool();
13142  // This allocation belongs to custom pool.
13143  if(hAllocPool != VK_NULL_HANDLE)
13144  {
13145  // Pools with algorithm other than default are not defragmented.
13146  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13147  {
13148  for(size_t i = m_CustomPoolContexts.size(); i--; )
13149  {
13150  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13151  {
13152  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13153  break;
13154  }
13155  }
13156  if(!pBlockVectorDefragCtx)
13157  {
13158  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13159  m_hAllocator,
13160  hAllocPool,
13161  &hAllocPool->m_BlockVector,
13162  m_CurrFrameIndex,
13163  m_Flags);
13164  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13165  }
13166  }
13167  }
13168  // This allocation belongs to default pool.
13169  else
13170  {
13171  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13172  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13173  if(!pBlockVectorDefragCtx)
13174  {
13175  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13176  m_hAllocator,
13177  VMA_NULL, // hCustomPool
13178  m_hAllocator->m_pBlockVectors[memTypeIndex],
13179  m_CurrFrameIndex,
13180  m_Flags);
13181  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13182  }
13183  }
13184 
13185  if(pBlockVectorDefragCtx)
13186  {
13187  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13188  &pAllocationsChanged[allocIndex] : VMA_NULL;
13189  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13190  }
13191  }
13192  }
13193 }
13194 
13195 VkResult VmaDefragmentationContext_T::Defragment(
13196  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13197  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13198  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13199 {
13200  if(pStats)
13201  {
13202  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13203  }
13204 
13205  if(commandBuffer == VK_NULL_HANDLE)
13206  {
13207  maxGpuBytesToMove = 0;
13208  maxGpuAllocationsToMove = 0;
13209  }
13210 
13211  VkResult res = VK_SUCCESS;
13212 
13213  // Process default pools.
13214  for(uint32_t memTypeIndex = 0;
13215  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13216  ++memTypeIndex)
13217  {
13218  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13219  if(pBlockVectorCtx)
13220  {
13221  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13222  pBlockVectorCtx->GetBlockVector()->Defragment(
13223  pBlockVectorCtx,
13224  pStats,
13225  maxCpuBytesToMove, maxCpuAllocationsToMove,
13226  maxGpuBytesToMove, maxGpuAllocationsToMove,
13227  commandBuffer);
13228  if(pBlockVectorCtx->res != VK_SUCCESS)
13229  {
13230  res = pBlockVectorCtx->res;
13231  }
13232  }
13233  }
13234 
13235  // Process custom pools.
13236  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13237  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13238  ++customCtxIndex)
13239  {
13240  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13241  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13242  pBlockVectorCtx->GetBlockVector()->Defragment(
13243  pBlockVectorCtx,
13244  pStats,
13245  maxCpuBytesToMove, maxCpuAllocationsToMove,
13246  maxGpuBytesToMove, maxGpuAllocationsToMove,
13247  commandBuffer);
13248  if(pBlockVectorCtx->res != VK_SUCCESS)
13249  {
13250  res = pBlockVectorCtx->res;
13251  }
13252  }
13253 
13254  return res;
13255 }
13256 
13258 // VmaRecorder
13259 
13260 #if VMA_RECORDING_ENABLED
13261 
13262 VmaRecorder::VmaRecorder() :
13263  m_UseMutex(true),
13264  m_Flags(0),
13265  m_File(VMA_NULL),
13266  m_Freq(INT64_MAX),
13267  m_StartCounter(INT64_MAX)
13268 {
13269 }
13270 
13271 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13272 {
13273  m_UseMutex = useMutex;
13274  m_Flags = settings.flags;
13275 
13276  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13277  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13278 
13279  // Open file for writing.
13280  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13281  if(err != 0)
13282  {
13283  return VK_ERROR_INITIALIZATION_FAILED;
13284  }
13285 
13286  // Write header.
13287  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13288  fprintf(m_File, "%s\n", "1,5");
13289 
13290  return VK_SUCCESS;
13291 }
13292 
13293 VmaRecorder::~VmaRecorder()
13294 {
13295  if(m_File != VMA_NULL)
13296  {
13297  fclose(m_File);
13298  }
13299 }
13300 
13301 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13302 {
13303  CallParams callParams;
13304  GetBasicParams(callParams);
13305 
13306  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13307  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13308  Flush();
13309 }
13310 
13311 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13312 {
13313  CallParams callParams;
13314  GetBasicParams(callParams);
13315 
13316  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13317  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13318  Flush();
13319 }
13320 
13321 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13322 {
13323  CallParams callParams;
13324  GetBasicParams(callParams);
13325 
13326  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13327  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13328  createInfo.memoryTypeIndex,
13329  createInfo.flags,
13330  createInfo.blockSize,
13331  (uint64_t)createInfo.minBlockCount,
13332  (uint64_t)createInfo.maxBlockCount,
13333  createInfo.frameInUseCount,
13334  pool);
13335  Flush();
13336 }
13337 
13338 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13339 {
13340  CallParams callParams;
13341  GetBasicParams(callParams);
13342 
13343  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13344  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13345  pool);
13346  Flush();
13347 }
13348 
13349 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13350  const VkMemoryRequirements& vkMemReq,
13351  const VmaAllocationCreateInfo& createInfo,
13352  VmaAllocation allocation)
13353 {
13354  CallParams callParams;
13355  GetBasicParams(callParams);
13356 
13357  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13358  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13359  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13360  vkMemReq.size,
13361  vkMemReq.alignment,
13362  vkMemReq.memoryTypeBits,
13363  createInfo.flags,
13364  createInfo.usage,
13365  createInfo.requiredFlags,
13366  createInfo.preferredFlags,
13367  createInfo.memoryTypeBits,
13368  createInfo.pool,
13369  allocation,
13370  userDataStr.GetString());
13371  Flush();
13372 }
13373 
13374 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13375  const VkMemoryRequirements& vkMemReq,
13376  bool requiresDedicatedAllocation,
13377  bool prefersDedicatedAllocation,
13378  const VmaAllocationCreateInfo& createInfo,
13379  VmaAllocation allocation)
13380 {
13381  CallParams callParams;
13382  GetBasicParams(callParams);
13383 
13384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13385  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13386  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13387  vkMemReq.size,
13388  vkMemReq.alignment,
13389  vkMemReq.memoryTypeBits,
13390  requiresDedicatedAllocation ? 1 : 0,
13391  prefersDedicatedAllocation ? 1 : 0,
13392  createInfo.flags,
13393  createInfo.usage,
13394  createInfo.requiredFlags,
13395  createInfo.preferredFlags,
13396  createInfo.memoryTypeBits,
13397  createInfo.pool,
13398  allocation,
13399  userDataStr.GetString());
13400  Flush();
13401 }
13402 
13403 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13404  const VkMemoryRequirements& vkMemReq,
13405  bool requiresDedicatedAllocation,
13406  bool prefersDedicatedAllocation,
13407  const VmaAllocationCreateInfo& createInfo,
13408  VmaAllocation allocation)
13409 {
13410  CallParams callParams;
13411  GetBasicParams(callParams);
13412 
13413  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13414  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13415  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13416  vkMemReq.size,
13417  vkMemReq.alignment,
13418  vkMemReq.memoryTypeBits,
13419  requiresDedicatedAllocation ? 1 : 0,
13420  prefersDedicatedAllocation ? 1 : 0,
13421  createInfo.flags,
13422  createInfo.usage,
13423  createInfo.requiredFlags,
13424  createInfo.preferredFlags,
13425  createInfo.memoryTypeBits,
13426  createInfo.pool,
13427  allocation,
13428  userDataStr.GetString());
13429  Flush();
13430 }
13431 
13432 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13433  VmaAllocation allocation)
13434 {
13435  CallParams callParams;
13436  GetBasicParams(callParams);
13437 
13438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13439  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13440  allocation);
13441  Flush();
13442 }
13443 
13444 void VmaRecorder::RecordResizeAllocation(
13445  uint32_t frameIndex,
13446  VmaAllocation allocation,
13447  VkDeviceSize newSize)
13448 {
13449  CallParams callParams;
13450  GetBasicParams(callParams);
13451 
13452  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13453  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13454  allocation, newSize);
13455  Flush();
13456 }
13457 
13458 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13459  VmaAllocation allocation,
13460  const void* pUserData)
13461 {
13462  CallParams callParams;
13463  GetBasicParams(callParams);
13464 
13465  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13466  UserDataString userDataStr(
13467  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13468  pUserData);
13469  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13470  allocation,
13471  userDataStr.GetString());
13472  Flush();
13473 }
13474 
13475 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13476  VmaAllocation allocation)
13477 {
13478  CallParams callParams;
13479  GetBasicParams(callParams);
13480 
13481  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13482  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13483  allocation);
13484  Flush();
13485 }
13486 
13487 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13488  VmaAllocation allocation)
13489 {
13490  CallParams callParams;
13491  GetBasicParams(callParams);
13492 
13493  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13494  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13495  allocation);
13496  Flush();
13497 }
13498 
13499 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13500  VmaAllocation allocation)
13501 {
13502  CallParams callParams;
13503  GetBasicParams(callParams);
13504 
13505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13506  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13507  allocation);
13508  Flush();
13509 }
13510 
13511 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13512  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13513 {
13514  CallParams callParams;
13515  GetBasicParams(callParams);
13516 
13517  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13518  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13519  allocation,
13520  offset,
13521  size);
13522  Flush();
13523 }
13524 
13525 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13526  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13527 {
13528  CallParams callParams;
13529  GetBasicParams(callParams);
13530 
13531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13532  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13533  allocation,
13534  offset,
13535  size);
13536  Flush();
13537 }
13538 
13539 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13540  const VkBufferCreateInfo& bufCreateInfo,
13541  const VmaAllocationCreateInfo& allocCreateInfo,
13542  VmaAllocation allocation)
13543 {
13544  CallParams callParams;
13545  GetBasicParams(callParams);
13546 
13547  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13548  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13549  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13550  bufCreateInfo.flags,
13551  bufCreateInfo.size,
13552  bufCreateInfo.usage,
13553  bufCreateInfo.sharingMode,
13554  allocCreateInfo.flags,
13555  allocCreateInfo.usage,
13556  allocCreateInfo.requiredFlags,
13557  allocCreateInfo.preferredFlags,
13558  allocCreateInfo.memoryTypeBits,
13559  allocCreateInfo.pool,
13560  allocation,
13561  userDataStr.GetString());
13562  Flush();
13563 }
13564 
13565 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13566  const VkImageCreateInfo& imageCreateInfo,
13567  const VmaAllocationCreateInfo& allocCreateInfo,
13568  VmaAllocation allocation)
13569 {
13570  CallParams callParams;
13571  GetBasicParams(callParams);
13572 
13573  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13574  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13575  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13576  imageCreateInfo.flags,
13577  imageCreateInfo.imageType,
13578  imageCreateInfo.format,
13579  imageCreateInfo.extent.width,
13580  imageCreateInfo.extent.height,
13581  imageCreateInfo.extent.depth,
13582  imageCreateInfo.mipLevels,
13583  imageCreateInfo.arrayLayers,
13584  imageCreateInfo.samples,
13585  imageCreateInfo.tiling,
13586  imageCreateInfo.usage,
13587  imageCreateInfo.sharingMode,
13588  imageCreateInfo.initialLayout,
13589  allocCreateInfo.flags,
13590  allocCreateInfo.usage,
13591  allocCreateInfo.requiredFlags,
13592  allocCreateInfo.preferredFlags,
13593  allocCreateInfo.memoryTypeBits,
13594  allocCreateInfo.pool,
13595  allocation,
13596  userDataStr.GetString());
13597  Flush();
13598 }
13599 
13600 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13601  VmaAllocation allocation)
13602 {
13603  CallParams callParams;
13604  GetBasicParams(callParams);
13605 
13606  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13607  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13608  allocation);
13609  Flush();
13610 }
13611 
13612 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13613  VmaAllocation allocation)
13614 {
13615  CallParams callParams;
13616  GetBasicParams(callParams);
13617 
13618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13619  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13620  allocation);
13621  Flush();
13622 }
13623 
13624 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13625  VmaAllocation allocation)
13626 {
13627  CallParams callParams;
13628  GetBasicParams(callParams);
13629 
13630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13631  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13632  allocation);
13633  Flush();
13634 }
13635 
13636 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13637  VmaAllocation allocation)
13638 {
13639  CallParams callParams;
13640  GetBasicParams(callParams);
13641 
13642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13643  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13644  allocation);
13645  Flush();
13646 }
13647 
13648 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13649  VmaPool pool)
13650 {
13651  CallParams callParams;
13652  GetBasicParams(callParams);
13653 
13654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13655  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13656  pool);
13657  Flush();
13658 }
13659 
13660 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13661  const VmaDefragmentationInfo2& info,
13663 {
13664  CallParams callParams;
13665  GetBasicParams(callParams);
13666 
13667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13668  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13669  info.flags);
13670  PrintPointerList(info.allocationCount, info.pAllocations);
13671  fprintf(m_File, ",");
13672  PrintPointerList(info.poolCount, info.pPools);
13673  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13674  info.maxCpuBytesToMove,
13676  info.maxGpuBytesToMove,
13678  info.commandBuffer,
13679  ctx);
13680  Flush();
13681 }
13682 
13683 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13685 {
13686  CallParams callParams;
13687  GetBasicParams(callParams);
13688 
13689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13690  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13691  ctx);
13692  Flush();
13693 }
13694 
13695 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13696 {
13697  if(pUserData != VMA_NULL)
13698  {
13699  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13700  {
13701  m_Str = (const char*)pUserData;
13702  }
13703  else
13704  {
13705  sprintf_s(m_PtrStr, "%p", pUserData);
13706  m_Str = m_PtrStr;
13707  }
13708  }
13709  else
13710  {
13711  m_Str = "";
13712  }
13713 }
13714 
13715 void VmaRecorder::WriteConfiguration(
13716  const VkPhysicalDeviceProperties& devProps,
13717  const VkPhysicalDeviceMemoryProperties& memProps,
13718  bool dedicatedAllocationExtensionEnabled)
13719 {
13720  fprintf(m_File, "Config,Begin\n");
13721 
13722  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13723  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13724  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13725  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13726  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13727  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13728 
13729  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13730  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13731  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13732 
13733  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13734  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13735  {
13736  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13737  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13738  }
13739  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13740  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13741  {
13742  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13743  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13744  }
13745 
13746  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13747 
13748  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13749  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13750  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13751  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13752  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13753  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13754  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13755  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13756  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13757 
13758  fprintf(m_File, "Config,End\n");
13759 }
13760 
13761 void VmaRecorder::GetBasicParams(CallParams& outParams)
13762 {
13763  outParams.threadId = GetCurrentThreadId();
13764 
13765  LARGE_INTEGER counter;
13766  QueryPerformanceCounter(&counter);
13767  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13768 }
13769 
13770 void VmaRecorder::Flush()
13771 {
13772  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13773  {
13774  fflush(m_File);
13775  }
13776 }
13777 
13778 #endif // #if VMA_RECORDING_ENABLED
13779 
13781 // VmaAllocator_T
13782 
13783 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13784  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13785  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13786  m_hDevice(pCreateInfo->device),
13787  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13788  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13789  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13790  m_PreferredLargeHeapBlockSize(0),
13791  m_PhysicalDevice(pCreateInfo->physicalDevice),
13792  m_CurrentFrameIndex(0),
13793  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13794  m_NextPoolId(0)
13796  ,m_pRecorder(VMA_NULL)
13797 #endif
13798 {
13799  if(VMA_DEBUG_DETECT_CORRUPTION)
13800  {
13801  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13802  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13803  }
13804 
13805  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13806 
13807 #if !(VMA_DEDICATED_ALLOCATION)
13809  {
13810  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13811  }
13812 #endif
13813 
13814  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13815  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13816  memset(&m_MemProps, 0, sizeof(m_MemProps));
13817 
13818  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13819  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13820 
13821  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13822  {
13823  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13824  }
13825 
13826  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13827  {
13828  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
13829  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
13830  }
13831 
13832  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
13833 
13834  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
13835  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
13836 
13837  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
13838  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
13839  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
13840  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
13841 
13842  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
13843  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13844 
13845  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
13846  {
13847  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
13848  {
13849  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
13850  if(limit != VK_WHOLE_SIZE)
13851  {
13852  m_HeapSizeLimit[heapIndex] = limit;
13853  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
13854  {
13855  m_MemProps.memoryHeaps[heapIndex].size = limit;
13856  }
13857  }
13858  }
13859  }
13860 
13861  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13862  {
13863  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
13864 
13865  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
13866  this,
13867  memTypeIndex,
13868  preferredBlockSize,
13869  0,
13870  SIZE_MAX,
13871  GetBufferImageGranularity(),
13872  pCreateInfo->frameInUseCount,
13873  false, // isCustomPool
13874  false, // explicitBlockSize
13875  false); // linearAlgorithm
13876  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
13877  // becase minBlockCount is 0.
13878  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
13879 
13880  }
13881 }
13882 
13883 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
13884 {
13885  VkResult res = VK_SUCCESS;
13886 
13887  if(pCreateInfo->pRecordSettings != VMA_NULL &&
13888  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
13889  {
13890 #if VMA_RECORDING_ENABLED
13891  m_pRecorder = vma_new(this, VmaRecorder)();
13892  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
13893  if(res != VK_SUCCESS)
13894  {
13895  return res;
13896  }
13897  m_pRecorder->WriteConfiguration(
13898  m_PhysicalDeviceProperties,
13899  m_MemProps,
13900  m_UseKhrDedicatedAllocation);
13901  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
13902 #else
13903  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
13904  return VK_ERROR_FEATURE_NOT_PRESENT;
13905 #endif
13906  }
13907 
13908  return res;
13909 }
13910 
13911 VmaAllocator_T::~VmaAllocator_T()
13912 {
13913 #if VMA_RECORDING_ENABLED
13914  if(m_pRecorder != VMA_NULL)
13915  {
13916  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
13917  vma_delete(this, m_pRecorder);
13918  }
13919 #endif
13920 
13921  VMA_ASSERT(m_Pools.empty());
13922 
13923  for(size_t i = GetMemoryTypeCount(); i--; )
13924  {
13925  vma_delete(this, m_pDedicatedAllocations[i]);
13926  vma_delete(this, m_pBlockVectors[i]);
13927  }
13928 }
13929 
13930 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
13931 {
13932 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13933  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
13934  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
13935  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
13936  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
13937  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
13938  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
13939  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
13940  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
13941  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
13942  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
13943  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
13944  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
13945  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
13946  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
13947  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
13948  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
13949  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
13950 #if VMA_DEDICATED_ALLOCATION
13951  if(m_UseKhrDedicatedAllocation)
13952  {
13953  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
13954  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
13955  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
13956  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
13957  }
13958 #endif // #if VMA_DEDICATED_ALLOCATION
13959 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13960 
13961 #define VMA_COPY_IF_NOT_NULL(funcName) \
13962  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
13963 
13964  if(pVulkanFunctions != VMA_NULL)
13965  {
13966  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
13967  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
13968  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
13969  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
13970  VMA_COPY_IF_NOT_NULL(vkMapMemory);
13971  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
13972  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
13973  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
13974  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
13975  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
13976  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
13977  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
13978  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
13979  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
13980  VMA_COPY_IF_NOT_NULL(vkCreateImage);
13981  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
13982  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
13983 #if VMA_DEDICATED_ALLOCATION
13984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
13985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
13986 #endif
13987  }
13988 
13989 #undef VMA_COPY_IF_NOT_NULL
13990 
13991  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
13992  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
13993  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
13994  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
13995  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
13996  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
13997  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
13998  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
13999  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14000  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14001  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14002  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14003  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14004  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14005  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14006  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14007  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14008  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14009  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14010 #if VMA_DEDICATED_ALLOCATION
14011  if(m_UseKhrDedicatedAllocation)
14012  {
14013  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14014  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14015  }
14016 #endif
14017 }
14018 
14019 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14020 {
14021  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14022  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14023  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14024  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14025 }
14026 
14027 VkResult VmaAllocator_T::AllocateMemoryOfType(
14028  VkDeviceSize size,
14029  VkDeviceSize alignment,
14030  bool dedicatedAllocation,
14031  VkBuffer dedicatedBuffer,
14032  VkImage dedicatedImage,
14033  const VmaAllocationCreateInfo& createInfo,
14034  uint32_t memTypeIndex,
14035  VmaSuballocationType suballocType,
14036  VmaAllocation* pAllocation)
14037 {
14038  VMA_ASSERT(pAllocation != VMA_NULL);
14039  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
14040 
14041  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14042 
14043  // If memory type is not HOST_VISIBLE, disable MAPPED.
14044  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14045  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14046  {
14047  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14048  }
14049 
14050  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14051  VMA_ASSERT(blockVector);
14052 
14053  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14054  bool preferDedicatedMemory =
14055  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14056  dedicatedAllocation ||
14057  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14058  size > preferredBlockSize / 2;
14059 
14060  if(preferDedicatedMemory &&
14061  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14062  finalCreateInfo.pool == VK_NULL_HANDLE)
14063  {
14065  }
14066 
14067  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14068  {
14069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14070  {
14071  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14072  }
14073  else
14074  {
14075  return AllocateDedicatedMemory(
14076  size,
14077  suballocType,
14078  memTypeIndex,
14079  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14081  finalCreateInfo.pUserData,
14082  dedicatedBuffer,
14083  dedicatedImage,
14084  pAllocation);
14085  }
14086  }
14087  else
14088  {
14089  VkResult res = blockVector->Allocate(
14090  VK_NULL_HANDLE, // hCurrentPool
14091  m_CurrentFrameIndex.load(),
14092  size,
14093  alignment,
14094  finalCreateInfo,
14095  suballocType,
14096  pAllocation);
14097  if(res == VK_SUCCESS)
14098  {
14099  return res;
14100  }
14101 
14102  // 5. Try dedicated memory.
14103  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14104  {
14105  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14106  }
14107  else
14108  {
14109  res = AllocateDedicatedMemory(
14110  size,
14111  suballocType,
14112  memTypeIndex,
14113  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14114  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14115  finalCreateInfo.pUserData,
14116  dedicatedBuffer,
14117  dedicatedImage,
14118  pAllocation);
14119  if(res == VK_SUCCESS)
14120  {
14121  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14122  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14123  return VK_SUCCESS;
14124  }
14125  else
14126  {
14127  // Everything failed: Return error code.
14128  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14129  return res;
14130  }
14131  }
14132  }
14133 }
14134 
14135 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14136  VkDeviceSize size,
14137  VmaSuballocationType suballocType,
14138  uint32_t memTypeIndex,
14139  bool map,
14140  bool isUserDataString,
14141  void* pUserData,
14142  VkBuffer dedicatedBuffer,
14143  VkImage dedicatedImage,
14144  VmaAllocation* pAllocation)
14145 {
14146  VMA_ASSERT(pAllocation);
14147 
14148  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14149  allocInfo.memoryTypeIndex = memTypeIndex;
14150  allocInfo.allocationSize = size;
14151 
14152 #if VMA_DEDICATED_ALLOCATION
14153  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14154  if(m_UseKhrDedicatedAllocation)
14155  {
14156  if(dedicatedBuffer != VK_NULL_HANDLE)
14157  {
14158  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14159  dedicatedAllocInfo.buffer = dedicatedBuffer;
14160  allocInfo.pNext = &dedicatedAllocInfo;
14161  }
14162  else if(dedicatedImage != VK_NULL_HANDLE)
14163  {
14164  dedicatedAllocInfo.image = dedicatedImage;
14165  allocInfo.pNext = &dedicatedAllocInfo;
14166  }
14167  }
14168 #endif // #if VMA_DEDICATED_ALLOCATION
14169 
14170  // Allocate VkDeviceMemory.
14171  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14172  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14173  if(res < 0)
14174  {
14175  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14176  return res;
14177  }
14178 
14179  void* pMappedData = VMA_NULL;
14180  if(map)
14181  {
14182  res = (*m_VulkanFunctions.vkMapMemory)(
14183  m_hDevice,
14184  hMemory,
14185  0,
14186  VK_WHOLE_SIZE,
14187  0,
14188  &pMappedData);
14189  if(res < 0)
14190  {
14191  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14192  FreeVulkanMemory(memTypeIndex, size, hMemory);
14193  return res;
14194  }
14195  }
14196 
14197  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14198  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14199  (*pAllocation)->SetUserData(this, pUserData);
14200  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14201  {
14202  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14203  }
14204 
14205  // Register it in m_pDedicatedAllocations.
14206  {
14207  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14208  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14209  VMA_ASSERT(pDedicatedAllocations);
14210  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
14211  }
14212 
14213  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
14214 
14215  return VK_SUCCESS;
14216 }
14217 
14218 void VmaAllocator_T::GetBufferMemoryRequirements(
14219  VkBuffer hBuffer,
14220  VkMemoryRequirements& memReq,
14221  bool& requiresDedicatedAllocation,
14222  bool& prefersDedicatedAllocation) const
14223 {
14224 #if VMA_DEDICATED_ALLOCATION
14225  if(m_UseKhrDedicatedAllocation)
14226  {
14227  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14228  memReqInfo.buffer = hBuffer;
14229 
14230  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14231 
14232  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14233  memReq2.pNext = &memDedicatedReq;
14234 
14235  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14236 
14237  memReq = memReq2.memoryRequirements;
14238  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14239  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14240  }
14241  else
14242 #endif // #if VMA_DEDICATED_ALLOCATION
14243  {
14244  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14245  requiresDedicatedAllocation = false;
14246  prefersDedicatedAllocation = false;
14247  }
14248 }
14249 
14250 void VmaAllocator_T::GetImageMemoryRequirements(
14251  VkImage hImage,
14252  VkMemoryRequirements& memReq,
14253  bool& requiresDedicatedAllocation,
14254  bool& prefersDedicatedAllocation) const
14255 {
14256 #if VMA_DEDICATED_ALLOCATION
14257  if(m_UseKhrDedicatedAllocation)
14258  {
14259  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14260  memReqInfo.image = hImage;
14261 
14262  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14263 
14264  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14265  memReq2.pNext = &memDedicatedReq;
14266 
14267  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14268 
14269  memReq = memReq2.memoryRequirements;
14270  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14271  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14272  }
14273  else
14274 #endif // #if VMA_DEDICATED_ALLOCATION
14275  {
14276  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14277  requiresDedicatedAllocation = false;
14278  prefersDedicatedAllocation = false;
14279  }
14280 }
14281 
14282 VkResult VmaAllocator_T::AllocateMemory(
14283  const VkMemoryRequirements& vkMemReq,
14284  bool requiresDedicatedAllocation,
14285  bool prefersDedicatedAllocation,
14286  VkBuffer dedicatedBuffer,
14287  VkImage dedicatedImage,
14288  const VmaAllocationCreateInfo& createInfo,
14289  VmaSuballocationType suballocType,
14290  VmaAllocation* pAllocation)
14291 {
14292  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14293 
14294  if(vkMemReq.size == 0)
14295  {
14296  return VK_ERROR_VALIDATION_FAILED_EXT;
14297  }
14298  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14299  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14300  {
14301  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14302  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14303  }
14304  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14306  {
14307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14309  }
14310  if(requiresDedicatedAllocation)
14311  {
14312  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14313  {
14314  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14315  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14316  }
14317  if(createInfo.pool != VK_NULL_HANDLE)
14318  {
14319  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14320  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14321  }
14322  }
14323  if((createInfo.pool != VK_NULL_HANDLE) &&
14324  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14325  {
14326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14328  }
14329 
14330  if(createInfo.pool != VK_NULL_HANDLE)
14331  {
14332  const VkDeviceSize alignmentForPool = VMA_MAX(
14333  vkMemReq.alignment,
14334  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14335  return createInfo.pool->m_BlockVector.Allocate(
14336  createInfo.pool,
14337  m_CurrentFrameIndex.load(),
14338  vkMemReq.size,
14339  alignmentForPool,
14340  createInfo,
14341  suballocType,
14342  pAllocation);
14343  }
14344  else
14345  {
14346  // Bit mask of memory Vulkan types acceptable for this allocation.
14347  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14348  uint32_t memTypeIndex = UINT32_MAX;
14349  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14350  if(res == VK_SUCCESS)
14351  {
14352  VkDeviceSize alignmentForMemType = VMA_MAX(
14353  vkMemReq.alignment,
14354  GetMemoryTypeMinAlignment(memTypeIndex));
14355 
14356  res = AllocateMemoryOfType(
14357  vkMemReq.size,
14358  alignmentForMemType,
14359  requiresDedicatedAllocation || prefersDedicatedAllocation,
14360  dedicatedBuffer,
14361  dedicatedImage,
14362  createInfo,
14363  memTypeIndex,
14364  suballocType,
14365  pAllocation);
14366  // Succeeded on first try.
14367  if(res == VK_SUCCESS)
14368  {
14369  return res;
14370  }
14371  // Allocation from this memory type failed. Try other compatible memory types.
14372  else
14373  {
14374  for(;;)
14375  {
14376  // Remove old memTypeIndex from list of possibilities.
14377  memoryTypeBits &= ~(1u << memTypeIndex);
14378  // Find alternative memTypeIndex.
14379  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14380  if(res == VK_SUCCESS)
14381  {
14382  alignmentForMemType = VMA_MAX(
14383  vkMemReq.alignment,
14384  GetMemoryTypeMinAlignment(memTypeIndex));
14385 
14386  res = AllocateMemoryOfType(
14387  vkMemReq.size,
14388  alignmentForMemType,
14389  requiresDedicatedAllocation || prefersDedicatedAllocation,
14390  dedicatedBuffer,
14391  dedicatedImage,
14392  createInfo,
14393  memTypeIndex,
14394  suballocType,
14395  pAllocation);
14396  // Allocation from this alternative memory type succeeded.
14397  if(res == VK_SUCCESS)
14398  {
14399  return res;
14400  }
14401  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14402  }
14403  // No other matching memory type index could be found.
14404  else
14405  {
14406  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14407  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14408  }
14409  }
14410  }
14411  }
14412  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14413  else
14414  return res;
14415  }
14416 }
14417 
14418 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
14419 {
14420  VMA_ASSERT(allocation);
14421 
14422  if(TouchAllocation(allocation))
14423  {
14424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14425  {
14426  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14427  }
14428 
14429  switch(allocation->GetType())
14430  {
14431  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14432  {
14433  VmaBlockVector* pBlockVector = VMA_NULL;
14434  VmaPool hPool = allocation->GetPool();
14435  if(hPool != VK_NULL_HANDLE)
14436  {
14437  pBlockVector = &hPool->m_BlockVector;
14438  }
14439  else
14440  {
14441  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14442  pBlockVector = m_pBlockVectors[memTypeIndex];
14443  }
14444  pBlockVector->Free(allocation);
14445  }
14446  break;
14447  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14448  FreeDedicatedMemory(allocation);
14449  break;
14450  default:
14451  VMA_ASSERT(0);
14452  }
14453  }
14454 
14455  allocation->SetUserData(this, VMA_NULL);
14456  vma_delete(this, allocation);
14457 }
14458 
14459 VkResult VmaAllocator_T::ResizeAllocation(
14460  const VmaAllocation alloc,
14461  VkDeviceSize newSize)
14462 {
14463  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14464  {
14465  return VK_ERROR_VALIDATION_FAILED_EXT;
14466  }
14467  if(newSize == alloc->GetSize())
14468  {
14469  return VK_SUCCESS;
14470  }
14471 
14472  switch(alloc->GetType())
14473  {
14474  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14475  return VK_ERROR_FEATURE_NOT_PRESENT;
14476  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14477  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14478  {
14479  alloc->ChangeSize(newSize);
14480  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14481  return VK_SUCCESS;
14482  }
14483  else
14484  {
14485  return VK_ERROR_OUT_OF_POOL_MEMORY;
14486  }
14487  default:
14488  VMA_ASSERT(0);
14489  return VK_ERROR_VALIDATION_FAILED_EXT;
14490  }
14491 }
14492 
14493 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14494 {
14495  // Initialize.
14496  InitStatInfo(pStats->total);
14497  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14498  InitStatInfo(pStats->memoryType[i]);
14499  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14500  InitStatInfo(pStats->memoryHeap[i]);
14501 
14502  // Process default pools.
14503  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14504  {
14505  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14506  VMA_ASSERT(pBlockVector);
14507  pBlockVector->AddStats(pStats);
14508  }
14509 
14510  // Process custom pools.
14511  {
14512  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14513  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14514  {
14515  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14516  }
14517  }
14518 
14519  // Process dedicated allocations.
14520  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14521  {
14522  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14523  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14524  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14525  VMA_ASSERT(pDedicatedAllocVector);
14526  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14527  {
14528  VmaStatInfo allocationStatInfo;
14529  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14530  VmaAddStatInfo(pStats->total, allocationStatInfo);
14531  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14532  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14533  }
14534  }
14535 
14536  // Postprocess.
14537  VmaPostprocessCalcStatInfo(pStats->total);
14538  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14539  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14540  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14541  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14542 }
14543 
14544 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14545 
14546 VkResult VmaAllocator_T::DefragmentationBegin(
14547  const VmaDefragmentationInfo2& info,
14548  VmaDefragmentationStats* pStats,
14549  VmaDefragmentationContext* pContext)
14550 {
14551  if(info.pAllocationsChanged != VMA_NULL)
14552  {
14553  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14554  }
14555 
14556  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14557  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14558 
14559  (*pContext)->AddPools(info.poolCount, info.pPools);
14560  (*pContext)->AddAllocations(
14562 
14563  VkResult res = (*pContext)->Defragment(
14566  info.commandBuffer, pStats);
14567 
14568  if(res != VK_NOT_READY)
14569  {
14570  vma_delete(this, *pContext);
14571  *pContext = VMA_NULL;
14572  }
14573 
14574  return res;
14575 }
14576 
14577 VkResult VmaAllocator_T::DefragmentationEnd(
14578  VmaDefragmentationContext context)
14579 {
14580  vma_delete(this, context);
14581  return VK_SUCCESS;
14582 }
14583 
14584 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14585 {
14586  if(hAllocation->CanBecomeLost())
14587  {
14588  /*
14589  Warning: This is a carefully designed algorithm.
14590  Do not modify unless you really know what you're doing :)
14591  */
14592  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14593  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14594  for(;;)
14595  {
14596  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14597  {
14598  pAllocationInfo->memoryType = UINT32_MAX;
14599  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14600  pAllocationInfo->offset = 0;
14601  pAllocationInfo->size = hAllocation->GetSize();
14602  pAllocationInfo->pMappedData = VMA_NULL;
14603  pAllocationInfo->pUserData = hAllocation->GetUserData();
14604  return;
14605  }
14606  else if(localLastUseFrameIndex == localCurrFrameIndex)
14607  {
14608  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14609  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14610  pAllocationInfo->offset = hAllocation->GetOffset();
14611  pAllocationInfo->size = hAllocation->GetSize();
14612  pAllocationInfo->pMappedData = VMA_NULL;
14613  pAllocationInfo->pUserData = hAllocation->GetUserData();
14614  return;
14615  }
14616  else // Last use time earlier than current time.
14617  {
14618  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14619  {
14620  localLastUseFrameIndex = localCurrFrameIndex;
14621  }
14622  }
14623  }
14624  }
14625  else
14626  {
14627 #if VMA_STATS_STRING_ENABLED
14628  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14629  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14630  for(;;)
14631  {
14632  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14633  if(localLastUseFrameIndex == localCurrFrameIndex)
14634  {
14635  break;
14636  }
14637  else // Last use time earlier than current time.
14638  {
14639  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14640  {
14641  localLastUseFrameIndex = localCurrFrameIndex;
14642  }
14643  }
14644  }
14645 #endif
14646 
14647  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14648  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14649  pAllocationInfo->offset = hAllocation->GetOffset();
14650  pAllocationInfo->size = hAllocation->GetSize();
14651  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14652  pAllocationInfo->pUserData = hAllocation->GetUserData();
14653  }
14654 }
14655 
14656 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14657 {
14658  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14659  if(hAllocation->CanBecomeLost())
14660  {
14661  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14662  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14663  for(;;)
14664  {
14665  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14666  {
14667  return false;
14668  }
14669  else if(localLastUseFrameIndex == localCurrFrameIndex)
14670  {
14671  return true;
14672  }
14673  else // Last use time earlier than current time.
14674  {
14675  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14676  {
14677  localLastUseFrameIndex = localCurrFrameIndex;
14678  }
14679  }
14680  }
14681  }
14682  else
14683  {
14684 #if VMA_STATS_STRING_ENABLED
14685  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14686  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14687  for(;;)
14688  {
14689  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14690  if(localLastUseFrameIndex == localCurrFrameIndex)
14691  {
14692  break;
14693  }
14694  else // Last use time earlier than current time.
14695  {
14696  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14697  {
14698  localLastUseFrameIndex = localCurrFrameIndex;
14699  }
14700  }
14701  }
14702 #endif
14703 
14704  return true;
14705  }
14706 }
14707 
14708 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14709 {
14710  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14711 
14712  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14713 
14714  if(newCreateInfo.maxBlockCount == 0)
14715  {
14716  newCreateInfo.maxBlockCount = SIZE_MAX;
14717  }
14718  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14719  {
14720  return VK_ERROR_INITIALIZATION_FAILED;
14721  }
14722 
14723  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14724 
14725  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14726 
14727  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14728  if(res != VK_SUCCESS)
14729  {
14730  vma_delete(this, *pPool);
14731  *pPool = VMA_NULL;
14732  return res;
14733  }
14734 
14735  // Add to m_Pools.
14736  {
14737  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14738  (*pPool)->SetId(m_NextPoolId++);
14739  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14740  }
14741 
14742  return VK_SUCCESS;
14743 }
14744 
14745 void VmaAllocator_T::DestroyPool(VmaPool pool)
14746 {
14747  // Remove from m_Pools.
14748  {
14749  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14750  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
14751  VMA_ASSERT(success && "Pool not found in Allocator.");
14752  }
14753 
14754  vma_delete(this, pool);
14755 }
14756 
14757 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
14758 {
14759  pool->m_BlockVector.GetPoolStats(pPoolStats);
14760 }
14761 
14762 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
14763 {
14764  m_CurrentFrameIndex.store(frameIndex);
14765 }
14766 
14767 void VmaAllocator_T::MakePoolAllocationsLost(
14768  VmaPool hPool,
14769  size_t* pLostAllocationCount)
14770 {
14771  hPool->m_BlockVector.MakePoolAllocationsLost(
14772  m_CurrentFrameIndex.load(),
14773  pLostAllocationCount);
14774 }
14775 
14776 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
14777 {
14778  return hPool->m_BlockVector.CheckCorruption();
14779 }
14780 
14781 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
14782 {
14783  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
14784 
14785  // Process default pools.
14786  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14787  {
14788  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
14789  {
14790  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14791  VMA_ASSERT(pBlockVector);
14792  VkResult localRes = pBlockVector->CheckCorruption();
14793  switch(localRes)
14794  {
14795  case VK_ERROR_FEATURE_NOT_PRESENT:
14796  break;
14797  case VK_SUCCESS:
14798  finalRes = VK_SUCCESS;
14799  break;
14800  default:
14801  return localRes;
14802  }
14803  }
14804  }
14805 
14806  // Process custom pools.
14807  {
14808  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14809  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14810  {
14811  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
14812  {
14813  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
14814  switch(localRes)
14815  {
14816  case VK_ERROR_FEATURE_NOT_PRESENT:
14817  break;
14818  case VK_SUCCESS:
14819  finalRes = VK_SUCCESS;
14820  break;
14821  default:
14822  return localRes;
14823  }
14824  }
14825  }
14826  }
14827 
14828  return finalRes;
14829 }
14830 
14831 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
14832 {
14833  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
14834  (*pAllocation)->InitLost();
14835 }
14836 
14837 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
14838 {
14839  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
14840 
14841  VkResult res;
14842  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14843  {
14844  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14845  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
14846  {
14847  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14848  if(res == VK_SUCCESS)
14849  {
14850  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
14851  }
14852  }
14853  else
14854  {
14855  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
14856  }
14857  }
14858  else
14859  {
14860  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14861  }
14862 
14863  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
14864  {
14865  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
14866  }
14867 
14868  return res;
14869 }
14870 
14871 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
14872 {
14873  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
14874  {
14875  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
14876  }
14877 
14878  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
14879 
14880  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
14881  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14882  {
14883  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14884  m_HeapSizeLimit[heapIndex] += size;
14885  }
14886 }
14887 
14888 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
14889 {
14890  if(hAllocation->CanBecomeLost())
14891  {
14892  return VK_ERROR_MEMORY_MAP_FAILED;
14893  }
14894 
14895  switch(hAllocation->GetType())
14896  {
14897  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14898  {
14899  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14900  char *pBytes = VMA_NULL;
14901  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
14902  if(res == VK_SUCCESS)
14903  {
14904  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
14905  hAllocation->BlockAllocMap();
14906  }
14907  return res;
14908  }
14909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14910  return hAllocation->DedicatedAllocMap(this, ppData);
14911  default:
14912  VMA_ASSERT(0);
14913  return VK_ERROR_MEMORY_MAP_FAILED;
14914  }
14915 }
14916 
14917 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
14918 {
14919  switch(hAllocation->GetType())
14920  {
14921  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14922  {
14923  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14924  hAllocation->BlockAllocUnmap();
14925  pBlock->Unmap(this, 1);
14926  }
14927  break;
14928  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14929  hAllocation->DedicatedAllocUnmap(this);
14930  break;
14931  default:
14932  VMA_ASSERT(0);
14933  }
14934 }
14935 
14936 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
14937 {
14938  VkResult res = VK_SUCCESS;
14939  switch(hAllocation->GetType())
14940  {
14941  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14942  res = GetVulkanFunctions().vkBindBufferMemory(
14943  m_hDevice,
14944  hBuffer,
14945  hAllocation->GetMemory(),
14946  0); //memoryOffset
14947  break;
14948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14949  {
14950  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14951  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
14952  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
14953  break;
14954  }
14955  default:
14956  VMA_ASSERT(0);
14957  }
14958  return res;
14959 }
14960 
14961 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
14962 {
14963  VkResult res = VK_SUCCESS;
14964  switch(hAllocation->GetType())
14965  {
14966  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14967  res = GetVulkanFunctions().vkBindImageMemory(
14968  m_hDevice,
14969  hImage,
14970  hAllocation->GetMemory(),
14971  0); //memoryOffset
14972  break;
14973  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14974  {
14975  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14976  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
14977  res = pBlock->BindImageMemory(this, hAllocation, hImage);
14978  break;
14979  }
14980  default:
14981  VMA_ASSERT(0);
14982  }
14983  return res;
14984 }
14985 
14986 void VmaAllocator_T::FlushOrInvalidateAllocation(
14987  VmaAllocation hAllocation,
14988  VkDeviceSize offset, VkDeviceSize size,
14989  VMA_CACHE_OPERATION op)
14990 {
14991  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
14992  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
14993  {
14994  const VkDeviceSize allocationSize = hAllocation->GetSize();
14995  VMA_ASSERT(offset <= allocationSize);
14996 
14997  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
14998 
14999  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15000  memRange.memory = hAllocation->GetMemory();
15001 
15002  switch(hAllocation->GetType())
15003  {
15004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15005  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15006  if(size == VK_WHOLE_SIZE)
15007  {
15008  memRange.size = allocationSize - memRange.offset;
15009  }
15010  else
15011  {
15012  VMA_ASSERT(offset + size <= allocationSize);
15013  memRange.size = VMA_MIN(
15014  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15015  allocationSize - memRange.offset);
15016  }
15017  break;
15018 
15019  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15020  {
15021  // 1. Still within this allocation.
15022  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15023  if(size == VK_WHOLE_SIZE)
15024  {
15025  size = allocationSize - offset;
15026  }
15027  else
15028  {
15029  VMA_ASSERT(offset + size <= allocationSize);
15030  }
15031  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15032 
15033  // 2. Adjust to whole block.
15034  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15035  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15036  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15037  memRange.offset += allocationOffset;
15038  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15039 
15040  break;
15041  }
15042 
15043  default:
15044  VMA_ASSERT(0);
15045  }
15046 
15047  switch(op)
15048  {
15049  case VMA_CACHE_FLUSH:
15050  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15051  break;
15052  case VMA_CACHE_INVALIDATE:
15053  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15054  break;
15055  default:
15056  VMA_ASSERT(0);
15057  }
15058  }
15059  // else: Just ignore this call.
15060 }
15061 
15062 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15063 {
15064  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15065 
15066  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15067  {
15068  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15069  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15070  VMA_ASSERT(pDedicatedAllocations);
15071  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15072  VMA_ASSERT(success);
15073  }
15074 
15075  VkDeviceMemory hMemory = allocation->GetMemory();
15076 
15077  /*
15078  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15079  before vkFreeMemory.
15080 
15081  if(allocation->GetMappedData() != VMA_NULL)
15082  {
15083  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15084  }
15085  */
15086 
15087  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15088 
15089  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15090 }
15091 
15092 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15093 {
15094  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15095  !hAllocation->CanBecomeLost() &&
15096  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15097  {
15098  void* pData = VMA_NULL;
15099  VkResult res = Map(hAllocation, &pData);
15100  if(res == VK_SUCCESS)
15101  {
15102  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15103  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15104  Unmap(hAllocation);
15105  }
15106  else
15107  {
15108  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15109  }
15110  }
15111 }
15112 
15113 #if VMA_STATS_STRING_ENABLED
15114 
15115 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15116 {
15117  bool dedicatedAllocationsStarted = false;
15118  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15119  {
15120  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15121  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15122  VMA_ASSERT(pDedicatedAllocVector);
15123  if(pDedicatedAllocVector->empty() == false)
15124  {
15125  if(dedicatedAllocationsStarted == false)
15126  {
15127  dedicatedAllocationsStarted = true;
15128  json.WriteString("DedicatedAllocations");
15129  json.BeginObject();
15130  }
15131 
15132  json.BeginString("Type ");
15133  json.ContinueString(memTypeIndex);
15134  json.EndString();
15135 
15136  json.BeginArray();
15137 
15138  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15139  {
15140  json.BeginObject(true);
15141  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15142  hAlloc->PrintParameters(json);
15143  json.EndObject();
15144  }
15145 
15146  json.EndArray();
15147  }
15148  }
15149  if(dedicatedAllocationsStarted)
15150  {
15151  json.EndObject();
15152  }
15153 
15154  {
15155  bool allocationsStarted = false;
15156  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15157  {
15158  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15159  {
15160  if(allocationsStarted == false)
15161  {
15162  allocationsStarted = true;
15163  json.WriteString("DefaultPools");
15164  json.BeginObject();
15165  }
15166 
15167  json.BeginString("Type ");
15168  json.ContinueString(memTypeIndex);
15169  json.EndString();
15170 
15171  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15172  }
15173  }
15174  if(allocationsStarted)
15175  {
15176  json.EndObject();
15177  }
15178  }
15179 
15180  // Custom pools
15181  {
15182  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15183  const size_t poolCount = m_Pools.size();
15184  if(poolCount > 0)
15185  {
15186  json.WriteString("Pools");
15187  json.BeginObject();
15188  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15189  {
15190  json.BeginString();
15191  json.ContinueString(m_Pools[poolIndex]->GetId());
15192  json.EndString();
15193 
15194  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15195  }
15196  json.EndObject();
15197  }
15198  }
15199 }
15200 
15201 #endif // #if VMA_STATS_STRING_ENABLED
15202 
15204 // Public interface
15205 
15206 VkResult vmaCreateAllocator(
15207  const VmaAllocatorCreateInfo* pCreateInfo,
15208  VmaAllocator* pAllocator)
15209 {
15210  VMA_ASSERT(pCreateInfo && pAllocator);
15211  VMA_DEBUG_LOG("vmaCreateAllocator");
15212  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15213  return (*pAllocator)->Init(pCreateInfo);
15214 }
15215 
15216 void vmaDestroyAllocator(
15217  VmaAllocator allocator)
15218 {
15219  if(allocator != VK_NULL_HANDLE)
15220  {
15221  VMA_DEBUG_LOG("vmaDestroyAllocator");
15222  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15223  vma_delete(&allocationCallbacks, allocator);
15224  }
15225 }
15226 
15228  VmaAllocator allocator,
15229  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15230 {
15231  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15232  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15233 }
15234 
15236  VmaAllocator allocator,
15237  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15238 {
15239  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15240  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15241 }
15242 
15244  VmaAllocator allocator,
15245  uint32_t memoryTypeIndex,
15246  VkMemoryPropertyFlags* pFlags)
15247 {
15248  VMA_ASSERT(allocator && pFlags);
15249  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15250  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15251 }
15252 
15254  VmaAllocator allocator,
15255  uint32_t frameIndex)
15256 {
15257  VMA_ASSERT(allocator);
15258  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15259 
15260  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15261 
15262  allocator->SetCurrentFrameIndex(frameIndex);
15263 }
15264 
15265 void vmaCalculateStats(
15266  VmaAllocator allocator,
15267  VmaStats* pStats)
15268 {
15269  VMA_ASSERT(allocator && pStats);
15270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15271  allocator->CalculateStats(pStats);
15272 }
15273 
15274 #if VMA_STATS_STRING_ENABLED
15275 
15276 void vmaBuildStatsString(
15277  VmaAllocator allocator,
15278  char** ppStatsString,
15279  VkBool32 detailedMap)
15280 {
15281  VMA_ASSERT(allocator && ppStatsString);
15282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15283 
15284  VmaStringBuilder sb(allocator);
15285  {
15286  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15287  json.BeginObject();
15288 
15289  VmaStats stats;
15290  allocator->CalculateStats(&stats);
15291 
15292  json.WriteString("Total");
15293  VmaPrintStatInfo(json, stats.total);
15294 
15295  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15296  {
15297  json.BeginString("Heap ");
15298  json.ContinueString(heapIndex);
15299  json.EndString();
15300  json.BeginObject();
15301 
15302  json.WriteString("Size");
15303  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15304 
15305  json.WriteString("Flags");
15306  json.BeginArray(true);
15307  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15308  {
15309  json.WriteString("DEVICE_LOCAL");
15310  }
15311  json.EndArray();
15312 
15313  if(stats.memoryHeap[heapIndex].blockCount > 0)
15314  {
15315  json.WriteString("Stats");
15316  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15317  }
15318 
15319  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15320  {
15321  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15322  {
15323  json.BeginString("Type ");
15324  json.ContinueString(typeIndex);
15325  json.EndString();
15326 
15327  json.BeginObject();
15328 
15329  json.WriteString("Flags");
15330  json.BeginArray(true);
15331  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15332  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15333  {
15334  json.WriteString("DEVICE_LOCAL");
15335  }
15336  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15337  {
15338  json.WriteString("HOST_VISIBLE");
15339  }
15340  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15341  {
15342  json.WriteString("HOST_COHERENT");
15343  }
15344  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15345  {
15346  json.WriteString("HOST_CACHED");
15347  }
15348  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15349  {
15350  json.WriteString("LAZILY_ALLOCATED");
15351  }
15352  json.EndArray();
15353 
15354  if(stats.memoryType[typeIndex].blockCount > 0)
15355  {
15356  json.WriteString("Stats");
15357  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15358  }
15359 
15360  json.EndObject();
15361  }
15362  }
15363 
15364  json.EndObject();
15365  }
15366  if(detailedMap == VK_TRUE)
15367  {
15368  allocator->PrintDetailedMap(json);
15369  }
15370 
15371  json.EndObject();
15372  }
15373 
15374  const size_t len = sb.GetLength();
15375  char* const pChars = vma_new_array(allocator, char, len + 1);
15376  if(len > 0)
15377  {
15378  memcpy(pChars, sb.GetData(), len);
15379  }
15380  pChars[len] = '\0';
15381  *ppStatsString = pChars;
15382 }
15383 
15384 void vmaFreeStatsString(
15385  VmaAllocator allocator,
15386  char* pStatsString)
15387 {
15388  if(pStatsString != VMA_NULL)
15389  {
15390  VMA_ASSERT(allocator);
15391  size_t len = strlen(pStatsString);
15392  vma_delete_array(allocator, pStatsString, len + 1);
15393  }
15394 }
15395 
15396 #endif // #if VMA_STATS_STRING_ENABLED
15397 
15398 /*
15399 This function is not protected by any mutex because it just reads immutable data.
15400 */
15401 VkResult vmaFindMemoryTypeIndex(
15402  VmaAllocator allocator,
15403  uint32_t memoryTypeBits,
15404  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15405  uint32_t* pMemoryTypeIndex)
15406 {
15407  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15408  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15409  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15410 
15411  if(pAllocationCreateInfo->memoryTypeBits != 0)
15412  {
15413  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15414  }
15415 
15416  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15417  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15418 
15419  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15420  if(mapped)
15421  {
15422  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15423  }
15424 
15425  // Convert usage to requiredFlags and preferredFlags.
15426  switch(pAllocationCreateInfo->usage)
15427  {
15429  break;
15431  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15432  {
15433  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15434  }
15435  break;
15437  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15438  break;
15440  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15441  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15442  {
15443  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15444  }
15445  break;
15447  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15448  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15449  break;
15450  default:
15451  break;
15452  }
15453 
15454  *pMemoryTypeIndex = UINT32_MAX;
15455  uint32_t minCost = UINT32_MAX;
15456  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15457  memTypeIndex < allocator->GetMemoryTypeCount();
15458  ++memTypeIndex, memTypeBit <<= 1)
15459  {
15460  // This memory type is acceptable according to memoryTypeBits bitmask.
15461  if((memTypeBit & memoryTypeBits) != 0)
15462  {
15463  const VkMemoryPropertyFlags currFlags =
15464  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15465  // This memory type contains requiredFlags.
15466  if((requiredFlags & ~currFlags) == 0)
15467  {
15468  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15469  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15470  // Remember memory type with lowest cost.
15471  if(currCost < minCost)
15472  {
15473  *pMemoryTypeIndex = memTypeIndex;
15474  if(currCost == 0)
15475  {
15476  return VK_SUCCESS;
15477  }
15478  minCost = currCost;
15479  }
15480  }
15481  }
15482  }
15483  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15484 }
15485 
15487  VmaAllocator allocator,
15488  const VkBufferCreateInfo* pBufferCreateInfo,
15489  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15490  uint32_t* pMemoryTypeIndex)
15491 {
15492  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15493  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15494  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15495  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15496 
15497  const VkDevice hDev = allocator->m_hDevice;
15498  VkBuffer hBuffer = VK_NULL_HANDLE;
15499  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15500  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15501  if(res == VK_SUCCESS)
15502  {
15503  VkMemoryRequirements memReq = {};
15504  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15505  hDev, hBuffer, &memReq);
15506 
15507  res = vmaFindMemoryTypeIndex(
15508  allocator,
15509  memReq.memoryTypeBits,
15510  pAllocationCreateInfo,
15511  pMemoryTypeIndex);
15512 
15513  allocator->GetVulkanFunctions().vkDestroyBuffer(
15514  hDev, hBuffer, allocator->GetAllocationCallbacks());
15515  }
15516  return res;
15517 }
15518 
15520  VmaAllocator allocator,
15521  const VkImageCreateInfo* pImageCreateInfo,
15522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15523  uint32_t* pMemoryTypeIndex)
15524 {
15525  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15526  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15527  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15528  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15529 
15530  const VkDevice hDev = allocator->m_hDevice;
15531  VkImage hImage = VK_NULL_HANDLE;
15532  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15533  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15534  if(res == VK_SUCCESS)
15535  {
15536  VkMemoryRequirements memReq = {};
15537  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15538  hDev, hImage, &memReq);
15539 
15540  res = vmaFindMemoryTypeIndex(
15541  allocator,
15542  memReq.memoryTypeBits,
15543  pAllocationCreateInfo,
15544  pMemoryTypeIndex);
15545 
15546  allocator->GetVulkanFunctions().vkDestroyImage(
15547  hDev, hImage, allocator->GetAllocationCallbacks());
15548  }
15549  return res;
15550 }
15551 
15552 VkResult vmaCreatePool(
15553  VmaAllocator allocator,
15554  const VmaPoolCreateInfo* pCreateInfo,
15555  VmaPool* pPool)
15556 {
15557  VMA_ASSERT(allocator && pCreateInfo && pPool);
15558 
15559  VMA_DEBUG_LOG("vmaCreatePool");
15560 
15561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15562 
15563  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15564 
15565 #if VMA_RECORDING_ENABLED
15566  if(allocator->GetRecorder() != VMA_NULL)
15567  {
15568  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15569  }
15570 #endif
15571 
15572  return res;
15573 }
15574 
15575 void vmaDestroyPool(
15576  VmaAllocator allocator,
15577  VmaPool pool)
15578 {
15579  VMA_ASSERT(allocator);
15580 
15581  if(pool == VK_NULL_HANDLE)
15582  {
15583  return;
15584  }
15585 
15586  VMA_DEBUG_LOG("vmaDestroyPool");
15587 
15588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15589 
15590 #if VMA_RECORDING_ENABLED
15591  if(allocator->GetRecorder() != VMA_NULL)
15592  {
15593  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15594  }
15595 #endif
15596 
15597  allocator->DestroyPool(pool);
15598 }
15599 
15600 void vmaGetPoolStats(
15601  VmaAllocator allocator,
15602  VmaPool pool,
15603  VmaPoolStats* pPoolStats)
15604 {
15605  VMA_ASSERT(allocator && pool && pPoolStats);
15606 
15607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15608 
15609  allocator->GetPoolStats(pool, pPoolStats);
15610 }
15611 
15613  VmaAllocator allocator,
15614  VmaPool pool,
15615  size_t* pLostAllocationCount)
15616 {
15617  VMA_ASSERT(allocator && pool);
15618 
15619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15620 
15621 #if VMA_RECORDING_ENABLED
15622  if(allocator->GetRecorder() != VMA_NULL)
15623  {
15624  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15625  }
15626 #endif
15627 
15628  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15629 }
15630 
15631 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15632 {
15633  VMA_ASSERT(allocator && pool);
15634 
15635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15636 
15637  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15638 
15639  return allocator->CheckPoolCorruption(pool);
15640 }
15641 
15642 VkResult vmaAllocateMemory(
15643  VmaAllocator allocator,
15644  const VkMemoryRequirements* pVkMemoryRequirements,
15645  const VmaAllocationCreateInfo* pCreateInfo,
15646  VmaAllocation* pAllocation,
15647  VmaAllocationInfo* pAllocationInfo)
15648 {
15649  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15650 
15651  VMA_DEBUG_LOG("vmaAllocateMemory");
15652 
15653  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15654 
15655  VkResult result = allocator->AllocateMemory(
15656  *pVkMemoryRequirements,
15657  false, // requiresDedicatedAllocation
15658  false, // prefersDedicatedAllocation
15659  VK_NULL_HANDLE, // dedicatedBuffer
15660  VK_NULL_HANDLE, // dedicatedImage
15661  *pCreateInfo,
15662  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15663  pAllocation);
15664 
15665 #if VMA_RECORDING_ENABLED
15666  if(allocator->GetRecorder() != VMA_NULL)
15667  {
15668  allocator->GetRecorder()->RecordAllocateMemory(
15669  allocator->GetCurrentFrameIndex(),
15670  *pVkMemoryRequirements,
15671  *pCreateInfo,
15672  *pAllocation);
15673  }
15674 #endif
15675 
15676  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15677  {
15678  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15679  }
15680 
15681  return result;
15682 }
15683 
15685  VmaAllocator allocator,
15686  VkBuffer buffer,
15687  const VmaAllocationCreateInfo* pCreateInfo,
15688  VmaAllocation* pAllocation,
15689  VmaAllocationInfo* pAllocationInfo)
15690 {
15691  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15692 
15693  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
15694 
15695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15696 
15697  VkMemoryRequirements vkMemReq = {};
15698  bool requiresDedicatedAllocation = false;
15699  bool prefersDedicatedAllocation = false;
15700  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
15701  requiresDedicatedAllocation,
15702  prefersDedicatedAllocation);
15703 
15704  VkResult result = allocator->AllocateMemory(
15705  vkMemReq,
15706  requiresDedicatedAllocation,
15707  prefersDedicatedAllocation,
15708  buffer, // dedicatedBuffer
15709  VK_NULL_HANDLE, // dedicatedImage
15710  *pCreateInfo,
15711  VMA_SUBALLOCATION_TYPE_BUFFER,
15712  pAllocation);
15713 
15714 #if VMA_RECORDING_ENABLED
15715  if(allocator->GetRecorder() != VMA_NULL)
15716  {
15717  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
15718  allocator->GetCurrentFrameIndex(),
15719  vkMemReq,
15720  requiresDedicatedAllocation,
15721  prefersDedicatedAllocation,
15722  *pCreateInfo,
15723  *pAllocation);
15724  }
15725 #endif
15726 
15727  if(pAllocationInfo && result == VK_SUCCESS)
15728  {
15729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15730  }
15731 
15732  return result;
15733 }
15734 
15735 VkResult vmaAllocateMemoryForImage(
15736  VmaAllocator allocator,
15737  VkImage image,
15738  const VmaAllocationCreateInfo* pCreateInfo,
15739  VmaAllocation* pAllocation,
15740  VmaAllocationInfo* pAllocationInfo)
15741 {
15742  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15743 
15744  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
15745 
15746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15747 
15748  VkMemoryRequirements vkMemReq = {};
15749  bool requiresDedicatedAllocation = false;
15750  bool prefersDedicatedAllocation = false;
15751  allocator->GetImageMemoryRequirements(image, vkMemReq,
15752  requiresDedicatedAllocation, prefersDedicatedAllocation);
15753 
15754  VkResult result = allocator->AllocateMemory(
15755  vkMemReq,
15756  requiresDedicatedAllocation,
15757  prefersDedicatedAllocation,
15758  VK_NULL_HANDLE, // dedicatedBuffer
15759  image, // dedicatedImage
15760  *pCreateInfo,
15761  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
15762  pAllocation);
15763 
15764 #if VMA_RECORDING_ENABLED
15765  if(allocator->GetRecorder() != VMA_NULL)
15766  {
15767  allocator->GetRecorder()->RecordAllocateMemoryForImage(
15768  allocator->GetCurrentFrameIndex(),
15769  vkMemReq,
15770  requiresDedicatedAllocation,
15771  prefersDedicatedAllocation,
15772  *pCreateInfo,
15773  *pAllocation);
15774  }
15775 #endif
15776 
15777  if(pAllocationInfo && result == VK_SUCCESS)
15778  {
15779  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15780  }
15781 
15782  return result;
15783 }
15784 
15785 void vmaFreeMemory(
15786  VmaAllocator allocator,
15787  VmaAllocation allocation)
15788 {
15789  VMA_ASSERT(allocator);
15790 
15791  if(allocation == VK_NULL_HANDLE)
15792  {
15793  return;
15794  }
15795 
15796  VMA_DEBUG_LOG("vmaFreeMemory");
15797 
15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15799 
15800 #if VMA_RECORDING_ENABLED
15801  if(allocator->GetRecorder() != VMA_NULL)
15802  {
15803  allocator->GetRecorder()->RecordFreeMemory(
15804  allocator->GetCurrentFrameIndex(),
15805  allocation);
15806  }
15807 #endif
15808 
15809  allocator->FreeMemory(allocation);
15810 }
15811 
15812 VkResult vmaResizeAllocation(
15813  VmaAllocator allocator,
15814  VmaAllocation allocation,
15815  VkDeviceSize newSize)
15816 {
15817  VMA_ASSERT(allocator && allocation);
15818 
15819  VMA_DEBUG_LOG("vmaResizeAllocation");
15820 
15821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15822 
15823 #if VMA_RECORDING_ENABLED
15824  if(allocator->GetRecorder() != VMA_NULL)
15825  {
15826  allocator->GetRecorder()->RecordResizeAllocation(
15827  allocator->GetCurrentFrameIndex(),
15828  allocation,
15829  newSize);
15830  }
15831 #endif
15832 
15833  return allocator->ResizeAllocation(allocation, newSize);
15834 }
15835 
15837  VmaAllocator allocator,
15838  VmaAllocation allocation,
15839  VmaAllocationInfo* pAllocationInfo)
15840 {
15841  VMA_ASSERT(allocator && allocation && pAllocationInfo);
15842 
15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordGetAllocationInfo(
15849  allocator->GetCurrentFrameIndex(),
15850  allocation);
15851  }
15852 #endif
15853 
15854  allocator->GetAllocationInfo(allocation, pAllocationInfo);
15855 }
15856 
15857 VkBool32 vmaTouchAllocation(
15858  VmaAllocator allocator,
15859  VmaAllocation allocation)
15860 {
15861  VMA_ASSERT(allocator && allocation);
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865 #if VMA_RECORDING_ENABLED
15866  if(allocator->GetRecorder() != VMA_NULL)
15867  {
15868  allocator->GetRecorder()->RecordTouchAllocation(
15869  allocator->GetCurrentFrameIndex(),
15870  allocation);
15871  }
15872 #endif
15873 
15874  return allocator->TouchAllocation(allocation);
15875 }
15876 
15878  VmaAllocator allocator,
15879  VmaAllocation allocation,
15880  void* pUserData)
15881 {
15882  VMA_ASSERT(allocator && allocation);
15883 
15884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15885 
15886  allocation->SetUserData(allocator, pUserData);
15887 
15888 #if VMA_RECORDING_ENABLED
15889  if(allocator->GetRecorder() != VMA_NULL)
15890  {
15891  allocator->GetRecorder()->RecordSetAllocationUserData(
15892  allocator->GetCurrentFrameIndex(),
15893  allocation,
15894  pUserData);
15895  }
15896 #endif
15897 }
15898 
15900  VmaAllocator allocator,
15901  VmaAllocation* pAllocation)
15902 {
15903  VMA_ASSERT(allocator && pAllocation);
15904 
15905  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
15906 
15907  allocator->CreateLostAllocation(pAllocation);
15908 
15909 #if VMA_RECORDING_ENABLED
15910  if(allocator->GetRecorder() != VMA_NULL)
15911  {
15912  allocator->GetRecorder()->RecordCreateLostAllocation(
15913  allocator->GetCurrentFrameIndex(),
15914  *pAllocation);
15915  }
15916 #endif
15917 }
15918 
15919 VkResult vmaMapMemory(
15920  VmaAllocator allocator,
15921  VmaAllocation allocation,
15922  void** ppData)
15923 {
15924  VMA_ASSERT(allocator && allocation && ppData);
15925 
15926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15927 
15928  VkResult res = allocator->Map(allocation, ppData);
15929 
15930 #if VMA_RECORDING_ENABLED
15931  if(allocator->GetRecorder() != VMA_NULL)
15932  {
15933  allocator->GetRecorder()->RecordMapMemory(
15934  allocator->GetCurrentFrameIndex(),
15935  allocation);
15936  }
15937 #endif
15938 
15939  return res;
15940 }
15941 
15942 void vmaUnmapMemory(
15943  VmaAllocator allocator,
15944  VmaAllocation allocation)
15945 {
15946  VMA_ASSERT(allocator && allocation);
15947 
15948  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15949 
15950 #if VMA_RECORDING_ENABLED
15951  if(allocator->GetRecorder() != VMA_NULL)
15952  {
15953  allocator->GetRecorder()->RecordUnmapMemory(
15954  allocator->GetCurrentFrameIndex(),
15955  allocation);
15956  }
15957 #endif
15958 
15959  allocator->Unmap(allocation);
15960 }
15961 
15962 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15963 {
15964  VMA_ASSERT(allocator && allocation);
15965 
15966  VMA_DEBUG_LOG("vmaFlushAllocation");
15967 
15968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15969 
15970  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
15971 
15972 #if VMA_RECORDING_ENABLED
15973  if(allocator->GetRecorder() != VMA_NULL)
15974  {
15975  allocator->GetRecorder()->RecordFlushAllocation(
15976  allocator->GetCurrentFrameIndex(),
15977  allocation, offset, size);
15978  }
15979 #endif
15980 }
15981 
15982 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15983 {
15984  VMA_ASSERT(allocator && allocation);
15985 
15986  VMA_DEBUG_LOG("vmaInvalidateAllocation");
15987 
15988  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15989 
15990  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
15991 
15992 #if VMA_RECORDING_ENABLED
15993  if(allocator->GetRecorder() != VMA_NULL)
15994  {
15995  allocator->GetRecorder()->RecordInvalidateAllocation(
15996  allocator->GetCurrentFrameIndex(),
15997  allocation, offset, size);
15998  }
15999 #endif
16000 }
16001 
16002 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16003 {
16004  VMA_ASSERT(allocator);
16005 
16006  VMA_DEBUG_LOG("vmaCheckCorruption");
16007 
16008  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16009 
16010  return allocator->CheckCorruption(memoryTypeBits);
16011 }
16012 
16013 VkResult vmaDefragment(
16014  VmaAllocator allocator,
16015  VmaAllocation* pAllocations,
16016  size_t allocationCount,
16017  VkBool32* pAllocationsChanged,
16018  const VmaDefragmentationInfo *pDefragmentationInfo,
16019  VmaDefragmentationStats* pDefragmentationStats)
16020 {
16021  // Deprecated interface, reimplemented using new one.
16022 
16023  VmaDefragmentationInfo2 info2 = {};
16024  info2.allocationCount = (uint32_t)allocationCount;
16025  info2.pAllocations = pAllocations;
16026  info2.pAllocationsChanged = pAllocationsChanged;
16027  if(pDefragmentationInfo != VMA_NULL)
16028  {
16029  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16030  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16031  }
16032  else
16033  {
16034  info2.maxCpuAllocationsToMove = UINT32_MAX;
16035  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16036  }
16037  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16038 
16040  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16041  if(res == VK_NOT_READY)
16042  {
16043  res = vmaDefragmentationEnd( allocator, ctx);
16044  }
16045  return res;
16046 }
16047 
16048 VkResult vmaDefragmentationBegin(
16049  VmaAllocator allocator,
16050  const VmaDefragmentationInfo2* pInfo,
16051  VmaDefragmentationStats* pStats,
16052  VmaDefragmentationContext *pContext)
16053 {
16054  VMA_ASSERT(allocator && pInfo && pContext);
16055 
16056  // Degenerate case: Nothing to defragment.
16057  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16058  {
16059  return VK_SUCCESS;
16060  }
16061 
16062  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16063  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16064  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16065  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16066 
16067  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16068 
16069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16070 
16071  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16072 
16073 #if VMA_RECORDING_ENABLED
16074  if(allocator->GetRecorder() != VMA_NULL)
16075  {
16076  allocator->GetRecorder()->RecordDefragmentationBegin(
16077  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16078  }
16079 #endif
16080 
16081  return res;
16082 }
16083 
16084 VkResult vmaDefragmentationEnd(
16085  VmaAllocator allocator,
16086  VmaDefragmentationContext context)
16087 {
16088  VMA_ASSERT(allocator);
16089 
16090  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16091 
16092  if(context != VK_NULL_HANDLE)
16093  {
16094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16095 
16096 #if VMA_RECORDING_ENABLED
16097  if(allocator->GetRecorder() != VMA_NULL)
16098  {
16099  allocator->GetRecorder()->RecordDefragmentationEnd(
16100  allocator->GetCurrentFrameIndex(), context);
16101  }
16102 #endif
16103 
16104  return allocator->DefragmentationEnd(context);
16105  }
16106  else
16107  {
16108  return VK_SUCCESS;
16109  }
16110 }
16111 
16112 VkResult vmaBindBufferMemory(
16113  VmaAllocator allocator,
16114  VmaAllocation allocation,
16115  VkBuffer buffer)
16116 {
16117  VMA_ASSERT(allocator && allocation && buffer);
16118 
16119  VMA_DEBUG_LOG("vmaBindBufferMemory");
16120 
16121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16122 
16123  return allocator->BindBufferMemory(allocation, buffer);
16124 }
16125 
16126 VkResult vmaBindImageMemory(
16127  VmaAllocator allocator,
16128  VmaAllocation allocation,
16129  VkImage image)
16130 {
16131  VMA_ASSERT(allocator && allocation && image);
16132 
16133  VMA_DEBUG_LOG("vmaBindImageMemory");
16134 
16135  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16136 
16137  return allocator->BindImageMemory(allocation, image);
16138 }
16139 
16140 VkResult vmaCreateBuffer(
16141  VmaAllocator allocator,
16142  const VkBufferCreateInfo* pBufferCreateInfo,
16143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16144  VkBuffer* pBuffer,
16145  VmaAllocation* pAllocation,
16146  VmaAllocationInfo* pAllocationInfo)
16147 {
16148  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16149 
16150  if(pBufferCreateInfo->size == 0)
16151  {
16152  return VK_ERROR_VALIDATION_FAILED_EXT;
16153  }
16154 
16155  VMA_DEBUG_LOG("vmaCreateBuffer");
16156 
16157  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16158 
16159  *pBuffer = VK_NULL_HANDLE;
16160  *pAllocation = VK_NULL_HANDLE;
16161 
16162  // 1. Create VkBuffer.
16163  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16164  allocator->m_hDevice,
16165  pBufferCreateInfo,
16166  allocator->GetAllocationCallbacks(),
16167  pBuffer);
16168  if(res >= 0)
16169  {
16170  // 2. vkGetBufferMemoryRequirements.
16171  VkMemoryRequirements vkMemReq = {};
16172  bool requiresDedicatedAllocation = false;
16173  bool prefersDedicatedAllocation = false;
16174  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16175  requiresDedicatedAllocation, prefersDedicatedAllocation);
16176 
16177  // Make sure alignment requirements for specific buffer usages reported
16178  // in Physical Device Properties are included in alignment reported by memory requirements.
16179  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16180  {
16181  VMA_ASSERT(vkMemReq.alignment %
16182  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16183  }
16184  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16185  {
16186  VMA_ASSERT(vkMemReq.alignment %
16187  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16188  }
16189  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16190  {
16191  VMA_ASSERT(vkMemReq.alignment %
16192  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16193  }
16194 
16195  // 3. Allocate memory using allocator.
16196  res = allocator->AllocateMemory(
16197  vkMemReq,
16198  requiresDedicatedAllocation,
16199  prefersDedicatedAllocation,
16200  *pBuffer, // dedicatedBuffer
16201  VK_NULL_HANDLE, // dedicatedImage
16202  *pAllocationCreateInfo,
16203  VMA_SUBALLOCATION_TYPE_BUFFER,
16204  pAllocation);
16205 
16206 #if VMA_RECORDING_ENABLED
16207  if(allocator->GetRecorder() != VMA_NULL)
16208  {
16209  allocator->GetRecorder()->RecordCreateBuffer(
16210  allocator->GetCurrentFrameIndex(),
16211  *pBufferCreateInfo,
16212  *pAllocationCreateInfo,
16213  *pAllocation);
16214  }
16215 #endif
16216 
16217  if(res >= 0)
16218  {
16219  // 3. Bind buffer with memory.
16220  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16221  if(res >= 0)
16222  {
16223  // All steps succeeded.
16224  #if VMA_STATS_STRING_ENABLED
16225  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16226  #endif
16227  if(pAllocationInfo != VMA_NULL)
16228  {
16229  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16230  }
16231 
16232  return VK_SUCCESS;
16233  }
16234  allocator->FreeMemory(*pAllocation);
16235  *pAllocation = VK_NULL_HANDLE;
16236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16237  *pBuffer = VK_NULL_HANDLE;
16238  return res;
16239  }
16240  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16241  *pBuffer = VK_NULL_HANDLE;
16242  return res;
16243  }
16244  return res;
16245 }
16246 
16247 void vmaDestroyBuffer(
16248  VmaAllocator allocator,
16249  VkBuffer buffer,
16250  VmaAllocation allocation)
16251 {
16252  VMA_ASSERT(allocator);
16253 
16254  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16255  {
16256  return;
16257  }
16258 
16259  VMA_DEBUG_LOG("vmaDestroyBuffer");
16260 
16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16262 
16263 #if VMA_RECORDING_ENABLED
16264  if(allocator->GetRecorder() != VMA_NULL)
16265  {
16266  allocator->GetRecorder()->RecordDestroyBuffer(
16267  allocator->GetCurrentFrameIndex(),
16268  allocation);
16269  }
16270 #endif
16271 
16272  if(buffer != VK_NULL_HANDLE)
16273  {
16274  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16275  }
16276 
16277  if(allocation != VK_NULL_HANDLE)
16278  {
16279  allocator->FreeMemory(allocation);
16280  }
16281 }
16282 
16283 VkResult vmaCreateImage(
16284  VmaAllocator allocator,
16285  const VkImageCreateInfo* pImageCreateInfo,
16286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16287  VkImage* pImage,
16288  VmaAllocation* pAllocation,
16289  VmaAllocationInfo* pAllocationInfo)
16290 {
16291  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16292 
16293  if(pImageCreateInfo->extent.width == 0 ||
16294  pImageCreateInfo->extent.height == 0 ||
16295  pImageCreateInfo->extent.depth == 0 ||
16296  pImageCreateInfo->mipLevels == 0 ||
16297  pImageCreateInfo->arrayLayers == 0)
16298  {
16299  return VK_ERROR_VALIDATION_FAILED_EXT;
16300  }
16301 
16302  VMA_DEBUG_LOG("vmaCreateImage");
16303 
16304  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16305 
16306  *pImage = VK_NULL_HANDLE;
16307  *pAllocation = VK_NULL_HANDLE;
16308 
16309  // 1. Create VkImage.
16310  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16311  allocator->m_hDevice,
16312  pImageCreateInfo,
16313  allocator->GetAllocationCallbacks(),
16314  pImage);
16315  if(res >= 0)
16316  {
16317  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16318  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16319  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16320 
16321  // 2. Allocate memory using allocator.
16322  VkMemoryRequirements vkMemReq = {};
16323  bool requiresDedicatedAllocation = false;
16324  bool prefersDedicatedAllocation = false;
16325  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16326  requiresDedicatedAllocation, prefersDedicatedAllocation);
16327 
16328  res = allocator->AllocateMemory(
16329  vkMemReq,
16330  requiresDedicatedAllocation,
16331  prefersDedicatedAllocation,
16332  VK_NULL_HANDLE, // dedicatedBuffer
16333  *pImage, // dedicatedImage
16334  *pAllocationCreateInfo,
16335  suballocType,
16336  pAllocation);
16337 
16338 #if VMA_RECORDING_ENABLED
16339  if(allocator->GetRecorder() != VMA_NULL)
16340  {
16341  allocator->GetRecorder()->RecordCreateImage(
16342  allocator->GetCurrentFrameIndex(),
16343  *pImageCreateInfo,
16344  *pAllocationCreateInfo,
16345  *pAllocation);
16346  }
16347 #endif
16348 
16349  if(res >= 0)
16350  {
16351  // 3. Bind image with memory.
16352  res = allocator->BindImageMemory(*pAllocation, *pImage);
16353  if(res >= 0)
16354  {
16355  // All steps succeeded.
16356  #if VMA_STATS_STRING_ENABLED
16357  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16358  #endif
16359  if(pAllocationInfo != VMA_NULL)
16360  {
16361  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16362  }
16363 
16364  return VK_SUCCESS;
16365  }
16366  allocator->FreeMemory(*pAllocation);
16367  *pAllocation = VK_NULL_HANDLE;
16368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16369  *pImage = VK_NULL_HANDLE;
16370  return res;
16371  }
16372  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16373  *pImage = VK_NULL_HANDLE;
16374  return res;
16375  }
16376  return res;
16377 }
16378 
16379 void vmaDestroyImage(
16380  VmaAllocator allocator,
16381  VkImage image,
16382  VmaAllocation allocation)
16383 {
16384  VMA_ASSERT(allocator);
16385 
16386  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16387  {
16388  return;
16389  }
16390 
16391  VMA_DEBUG_LOG("vmaDestroyImage");
16392 
16393  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16394 
16395 #if VMA_RECORDING_ENABLED
16396  if(allocator->GetRecorder() != VMA_NULL)
16397  {
16398  allocator->GetRecorder()->RecordDestroyImage(
16399  allocator->GetCurrentFrameIndex(),
16400  allocation);
16401  }
16402 #endif
16403 
16404  if(image != VK_NULL_HANDLE)
16405  {
16406  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16407  }
16408  if(allocation != VK_NULL_HANDLE)
16409  {
16410  allocator->FreeMemory(allocation);
16411  }
16412 }
16413 
16414 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1727
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2030
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1785
diff --git a/src/Tests.cpp b/src/Tests.cpp index 0f1392e..d0b3ea2 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -5049,7 +5049,7 @@ void Test() { wprintf(L"TESTING:\n"); - if(true) + if(false) { // # Temporarily insert custom tests here // ########################################