From 5268dbbce51f9d44346afcd3a6d0eee859dcc529 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 8 Nov 2017 12:52:05 +0100 Subject: [PATCH] Version 2.0.0-alpha.5. Major change in memory mapping - now it's internally reference-counted. Renamed flag VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT to VMA_ALLOCATION_CREATE_MAPPED_BIT. See documentation of vmaMapMemory(). --- README.md | 2 +- bin/VulkanSample_Release_2015.exe | Bin 100864 -> 101376 bytes docs/html/globals.html | 15 +- docs/html/globals_eval.html | 9 +- docs/html/globals_func.html | 6 - docs/html/index.html | 4 +- docs/html/memory_mapping.html | 93 ++ docs/html/quick_start.html | 12 +- docs/html/search/all_6.js | 1 + docs/html/search/all_8.js | 1 - docs/html/search/all_e.js | 5 +- docs/html/search/enumvalues_0.js | 5 +- docs/html/search/functions_0.js | 4 +- docs/html/search/pages_3.js | 2 +- docs/html/search/searchdata.js | 2 +- docs/html/struct_vma_allocation_info.html | 7 +- docs/html/thread_safety.html | 2 +- docs/html/vk__mem__alloc_8h.html | 90 +- docs/html/vk__mem__alloc_8h_source.html | 223 +++-- src/VulkanSample.cpp | 6 +- src/vk_mem_alloc.h | 1038 ++++++++++----------- 21 files changed, 750 insertions(+), 777 deletions(-) create mode 100644 docs/html/memory_mapping.html diff --git a/README.md b/README.md index 1b5e51a..88645fd 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Additional features: - Thread-safety: Library is designed to be used by multithreaded code. - Configuration: Fill optional members of CreateInfo structure to provide custom CPU memory allocator and other parameters. - Customization: Predefine appropriate macros to provide your own implementation of all external facilities used by the library, from assert, mutex, and atomic, to vector and linked list. -- Support for persistently mapped memory: Just allocate memory with appropriate flag and you get access to mapped pointer. +- Support memory mapping, reference-counted internally. Support for for persistently mapped memory: Just allocate with appropriate flag and you get access to mapped pointer. - Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it. - Support for VK_KHR_dedicated_allocation extension. - Defragmentation: Call one function and let the library move data around to free some memory blocks and make your allocations better compacted. diff --git a/bin/VulkanSample_Release_2015.exe b/bin/VulkanSample_Release_2015.exe index e3da0c0e891ee68a32cf761001f14f67d442a71a..b8c65e68a79a531a3ec16036b81bdf57877e7cb5 100644 GIT binary patch delta 41913 zcmbTf30zax5U{iaJNALC0_F$ z_NdXm*4kIQx7MX~>xL`1RBc}utlDDNDAiKiD%RTkzjN*l0etWG`~Uy>e8`+LXU@!= zIdkW%Hx#Z9E8G;eRqU|fnr4Eaerkn8Awm-+2r;4{EK`tatb;hc>-y=U5DKJFE(+BI zw~B%#QmsbdtSIaN*ui#+?KS41>{GF?W_U~1T+_LE;#d@01zp)}CBT zIc*wS9NN|u{<|O;#+6GI+Gar0NAl>uHHyM-2AB4VEDBQ5k)+Fyh@x;xD*8FeSMghb zzC(~+6Rpr|fa3JM>giY4(>LIssM24NU1|lfzQWM@3hN+URcMe}VO~I?PlSL%;q~;3 zgY=K8^b-Q~FK~K~elrR#ucr(?l1Iy!c1uWvAnr~Jdr$20!AEGmJVGaVMqA8M$Qh@6 z*f+cxRFs!lrbx2Wk|L#^@(pFJbc?$r{1@6cbPZ_JiS`w?NB5BUFV>=24{;W=HtQ~0 z*{o(=#C>c-vl-%c=5J;Z-OSS5B2HzF=J$x`l;-gXaUa0Qd$s2}h(b*phumJ0PfGGq zqvRQ<@%_biHvgyiB(p{I)%SUy^c}7P?cZir9C5GsD| zkQ$jAEIP_^42fbtHs8=$({dmCm!X4L#y&8lNABHQU){n)W1_~r7i?o#h7lrzW$&|D zC`&+@5hC~0*BQ;u0l#w(uM-`e8960bc9D&bP855yrO}-=5qr4IQg$>tBXaw@_4VUf z^A?Gk!gqQ7K`oLZ2L;P^vjr%NQOdTX?2C8m>kVh$qU_aoc)d0)OT>$8Ys=;0KUuq& zR84#Zdmv_{rulBREvAdOf}M&6()g4N`(T5pLiQ*ELYU&?3Ac$VWmYNbZhF$D= z;6K?}PkxR41o9VI^Hy1r4+qgQo6sszOkvAg^^g1wfktIs#*PAgl=)h97Pl}%Y$r|J zA@*@h0(&4fPJDzt85^(Zw1c^0&6*bv0&ihwVp~R@c)Py-LUt`SQGAv~$KAt>aXKc& z^$_o63*tIQ8iPbf*{gAh;eg2k0=#bAG3k_TDkjEJ%{|Vp2?3dP(SisWT+$#=Xd2PDHnHmvJ3J`8#Z@1|# ze!?!aNfn=C#`rE&Dc&spzWc#=pQtZf(pEH-z~h9%CF#3IBs?jK*V%jR#%nUZVo$YC z7M;x2K2to)mbdRNe#c&Kzjbh%5J7OtYb~XyuJtUjvdt$0DcaQNL4u*0Ko@eF3 z7n4oGh)3b#bm)H@YS?5O8gPs^poW_6*RTbNF{%Hq(Q>|0(H+sS>UG|zQQ-fnq4+su zoStmIqla^^#S`~A`;d2P|F z0hK(16pakU15sAf7I`RTSk;SiQAK|$dKpSOupXT2_MoW5?OXVm!7=6L+w~-9?b5yuUDmkCu3L}Z%bU!+}q&&ZwTM!HNM zl7oD>zuY|}r>3Xm@(dX-xw9-u?qwFUqv*Kbo?7mZzmVi?oqt1IOR8KDUNIyG-66*8YROOw>RW9OPk#9DDeE|Boud5+}Hw~TklK9?k4m*iWi z=Y!f?gHkp&kg^NuGI=Oz->%3fx`*Zzw2~@@jwkn%q&_jOLn=|Dc_Rhl4|HB3kQ7~Orc83k;yXb@kz1> zfPvJOo_gM1d>rmyr92z;?*GxvpqrjYx-8Z2$oOaz7#LP#;0Lx*(VMSnGS*x<)41<)JAy`3s-( zs^4E+UJxz$D;$|z*u1ru&5&030jVzJw*eQUQy!vo$g#fJxBY&(uB6;hvNa4wDEg54 zRxF4rdTNnaptBuYP40kU$pD{=%TJInK1p~6Vh&h=&hiX9nB^H%Dao#8{_6*(twU1>$Tu zDog%=+{rFiNS-lu)NEhLFZ2lTzQ~ewwu6^LY=xIZjE+W!9OZJLJx)2?cFfvVbhrfv zq&eI*DLvJH`9&=aeQ#BR!gj+h zXbdGXMjP}Bb(bS^sM%1ogd<$W)Eo40=2X~UMIC|nmTL#Pr`(2#6@cVkzK(mGIRY*=_R941BB zKnOCF)WWEjUnI?xo{7rG)LrSCRtM??_Tg)-UFZVAZ<=*O=?PdVR+1}y7ytBQa!_3p zVo&{ElE>_$u{M_!x4A^*?EJf1y^=sTH6 zG;|#IPphB**i)~P7iMATb=wQ^SBju9m7+!&GF$lw=g8omY*^(58@03Q83cQ+io0_q zs-4AOl^T4ik*-yP56TdHN=IAL?~Xlh`6%|}_rtSQD;=zkkwWZM*(F!=wNhrqe2Y`Q zW-!_-4bf24V9c&EL}!-*;X|)(l4y8GT#@{T9huLQ*U8lAmBTUcfkPH1rQ;37N8tAI zyX7dg7T?Hk3YM!IIHvbXcQMRJL5II_q-e}L|`!K{fiat<+tie{a{Qpyw zZ4zWxT<31>`oBln=aeWL@_&f3qb-Z5WxhL?{|{kSiLxP$qpT~(Xua`u(6<{!+H7|& zA}u;R7;5=Ut;R%_;mMECasGVmM|||HA3N`Z%5Vrib)1QG89kfI&@8PuLL;_qvh88p zB-_N$$q!FD%<_AsiGKE4ua3Pde+kz79%)Rw<|Gxm+7-?-N^lsbY)9X>@*V#j4fT{z zRAWE%vW?2~oum;)?HohN3n-xeF_d_bD*W(Od}Had0^f*NJT7JqiCd8Gl<#14Lr^Y$ zj~8t$14u2OR>tP`9uj>F3efz}^A8?AD_&%ky}P^4fQ8dpsj+Gv5`bB817UiN0>SUXAW6s>m<3f5akXz^sJQ0lMACH{~;-p zSDSafriH^3b5@dvfl|YsOpTs->1xnS`fD&#c8NcKY_>d7mn{#?v(ab~<@>AycvcM6 zfi;@Z&fyuE=R12IKI|EZIo2Mqz-%iHi@$tK(6Hn~k2QB-J;6h8GmL3|)34Xqoz#vl z>sZ@Z8_j2}6k$eV%8D?OuIzsym6R>lVF056Zwh9(G-ap36^t-7BuXa+)G59&GbwE- z-NDPEXug#vVezo2$@GNkc}9pB3K26sLv()E*%~7s5cU(wZ|aE_leSTC1sWE&;6ZlM zdd5lep&reT+#@g;j>xmQ!+iIl(USZj?{=k&utz5PUV)d?PgkLf)13wbLmx7xKHw+* zN{7oGj&{?^1YV1!2|iXVt;Ph5(rA%_F#e{c5PD+D&u5^bVGn#46V0pg#o|u`GtIo< zQbYH1urn=4!{^WV{`Cpze6LepV05Hl44357KItFCbqHp5p2> z!Fh9WTU2plRdEfZh4AVw#ZH07eM9jUEWSK_SK~fg+t?PI&VRRBZ#JL{nja<)n!YRc z7$i^R0*7p8xB9knIf`6y!h+c(__@0o4r{Qmp3RK;c9EiSzYIm9XRSg@ZMl(_DsBC#tZ+lpK%m@0h5pSd%>zCk5i0SEkU z&E0K2gMo>Z-piG5)aG}ZkdwQo5bb#=thoE!6WINE!gu2C7Ay=_c7+8DJYN+y{BB`V zke-v*d;1*Dn;Uo=q>=oIcZ+M*q&nHx#0Rc3z5y4xnHsh2D|cg4a2sgv=DW+ye$DKp zN&XjtN&8-oBEA|G7IdM_yZR|-qz|z>4=W&FvoD~b|EMGLZ3?sVF0z&bx@wGWcHe+r z`imGoX_Ey95dXN!F7%IY^Df6IPbfBL$Ivb=g{}e5u;>BFn!Hk8Z$Pl#wV&Ct0r6u@ z95Ym>Z2s0)+p?LWX8rVI;Sa%p`XbT@sI;>sziBBXA5jCjZ4zxW;lq9&kRlFd3HP-Y zbJ@N3bxi!8SJ%{vP+diW#uyVcChwl1OHO7b~2+}6Y8#0gSKImVJo$umEW@9l;ej1HqAdW6wnw3!Xj zL(Gy#u7c7XE3O~H!NS91cTz@=l@0xn4e?n~fEacpr=kpOa+?=VL$#agP; zzuySmf0N!I9aG5CF_J8h1O*|}-o@}72POs*Ha2}=3=d5Q{I?cg|Po{tXS(7nH@_yf4SZ!an8H^+DpLT35ahN3WR-sOs*>9QP!k6B zukjmA%G!<91oYD{5inKrT*5l!Bst-A zZrA~Xd9KgLLB6g}`BzX`$-oTtCiNq9p3JRvm=~iUp&W!>gmD?{KFQ4$1#2r%5LT<|42+~;>YQ$_J}@~7rrPP|Edge{g7G=sXcSj* zih?PJ(G*J#%0>H}^lWhfR4|2;wjGoQR6aIpSL7;n7X|WpE!kOVhG{CIho+#Jt?1j^ zSyv&=j^{5ySdxw3nD;*h1V(I6AC@<*a~+A^&QZeYEnGZ&ldKDdf~| zKUHBcWX*@?m|8{9pk^qYAVMkkBq@E;lzbaoG`yXQdQz^X4O6F_ZOxV|u>DOloEp@S zk6kuW1)Ph}>hynZTsPTL5zfI6l;^v%yqZQRM+Gj70JtB-dhkJ`&25XDW~;Da|9YaW z^g(PII^~ntPAddWwmTAgT-d{u$5?I0Y;83Tck66~1?-09I@~s+y>v!`LoSzYR<$`^ zcnLd!j&^qUTx>>R_$#6boQV=Hu+Fdb!7j@{O_&BSM&9@8;3Ln=cJ;g zaYDhTxSjzkmFGVItXOcEl2Vqu0^K$%d!V?7y_Mai*$4~oh7uFIm_4J}DeNLsK;Oqk zj)>L0-;v4+=CQmHBgC=nqY?3%VIA3p5or(8+=xE>T#KJ((NVS}e}x_|h{6fEkgroW z-_i_whP5raEan@EZeeePIHYx3s; zzhk2a;@{RWSXz%(7uPF(rl?QrYb)PjazAUKp$9=@d2dXCQjbXPV#`*LIB}Kl}ML4<-I;4znYC<%~C&OkmP^5D7JGsk(Yf5kin}stf42SO@U3PEP2~6G+l6d`o9I+3C@r^j)OnJcURN!;0UR> zOJSFZ9|`~U3D=m4~DX;&9{5e~mfN}pucFB1|@n-5D&r|4uhto%TBJaueEH*nl zGqCl_XPcpg4m>8=AxE=4Q!x3QZC2J3gLK2>$XfJs4yyAGAI%doZV#YeT~jEw@Z*H_ zmO`QcGEbe22-KJ42sCZ*hxvzuO5#}ThGbdt9GPP<#t%M<3890IF_wxrJ@XCdyp4lv%n@HoPe!h^IO3+C%`qv0=7pu+fYkDQHMuLkust&fDBd{ z1hhrTU97EOjX(df?9AT`D_`d`mWh@melNgDr@HvV(-`nSTEHM$@Lk$$q|5wA$zw7V zk6HN@mMHR&P*50U920)xlR}0VQ$Lk%{#v7<-WZaEc_c4Y3*#CVTJ3oDGQ9kGnUw9x z4rSKSBh9NR@60?q?-iw(Pmrf(dpx0d;NS^t;OLyrs|m}A@L2Juo0dW0Z^Fz7iv0Os zabySUG{$@%g&iA0j-iBB{}hr8#WXlc?zR?rGYtV*^02tn zXm=Zf1kpr*gA(~*4G(lR2y~bo$W{zHR-_NslNN-cBn4Pw%}UPO(t_6wBMocr5|MhG z*JSI)wtO<_{|#4gCz)~e0`uD|_A$~kA`gS;Ce2JA*3M9To%;%@Eagfe!%|rExOkQ_ zu6Jt+6C*rn$5U@Oik=$pHx#cX>v~pHv8ChU<4y$~=p@pZrP_^-j3MLM_Hk{pX(B}< ze1QQgS)-%ti19Vr4|x=?_drJj1_i{RCf@0>#aYwm8l8r$W7#sU6F78!udXFCjbA^3 zCV_)!2!&FTC+&5>O4{6I|9r4j z#uhA00}s^!q|379H*Y90l8!Du&xS2VdSWUa@(D+BrF8IC2u*R?8QL?$giP&Rzm$25 zjhqncYDF5$U(lvVyo*d*ule`KgA$A+!LVm!Tlo{xIRZMpp1_}Pa?ny+(g^6vrb zIXrwhB89*h-`EP5gA2cfG-)5Eidd!vy6RG8u)%kbc3~=2{0G?-v37@4v9F1X($Uk}WF7ne&$cfJ7yY<#xgfBNB zTng(@6oMXAsDMpc+Dr9})cMTIAPM`!FhAmcOzCwTOrE`Nh}OvVD)%7x2lk2Y<~^(d zZ)_;K_-_3INFe%lP}(McMqBV9uivGXv2oQTUM2Hx5xyBgvpfX#t7-H-X7@E#|IwwS zzN@kR)r$T;=$|eBgb<*1$toMB9I59G?ZLyU8zJ|vCUwj2w(+b6vazo|srHz?4&B$l zY&A<7nyr;$w^~JRoocsSHs;~Zu7jr)t6aX@6hGz$o0wQSISKpN+~84MBfiZ#_%$-% z8Q<+U?lR=74Qs0hCQen@vU*|c&xc#Nz}sfnGc*BL)K9lF_K%yRsgw%B(# zGp860D=U?a&3(8Z7bnmy(|}q-jfKlf{}S#}t4jmc|4XeH&seV4U(=`wT(7rIvSpLo zy8gp4>PB)y#U?C5JisodJj|_;FRJyKIq`w;FY_m`!%M9 z-e-K%?q=E&WQl5?<)QMJO(q^toRwIt8{nn z-Q7K=i3QsPrNDC+;(@x6-^<}YO|UB_LjiNccTf7RK1Pajr`S?SPT~3=UU8HY-t86H z1+<_jp#^1w@v;g5(;9Sa^TD8F_eUDjkC$jonO*X|p|}8!LAHAlw!^i*M&n`*m)dw- zT#uOs(OX_3`bOj(nviokC+FLU?$)EkLSake3f+C<{^0bu7U~>0wN*~7Py6!nLCKF! zu{*5}jSV~m4akpo$R1^r^GmE2Z}~3%2WxnLMRw+2SP1iYGYgkoz%kNFmtB;2Gogv* zxdDw>!sGY2em;h-7{njYU&MkhogNw!24e70AL%o`C{E9hoN!WwbtRoto%VU>H=2~? zeH66&vq+=&pSAFbQPGjSz-1S@>!R+bm}!?|tc{&~+^D&gz%D*MUxS^v`=@o+SX;6} zfbyT%_G$U8cePcwIZDjf&Y;~k_n3W7xyLS+Iem=SovoYxSVTJA`oXJ$NRP^mEZT^+XS_R=&B=`w`?A%!Nv1yaq`S8&C!1${o7s`v&YDkO zU_a+h#T!JUW+X)(J{;h9`FU0{BQY7*=|ajK^0P7%YO~dD85505JL&wa#4hou=FRB zVkhE(Q;qSQIw?Ej`{^0B@X1c1nLYR9W8(WvoRt)DxI93Y!O~`R79V8~&6?bTwqUI{ zj*0mb6&}~0Wv6C!i4g0F=dizLrHV4^Jv%947ko@1)3L{AcNde{)4)DjkA0OL1@DhEc}e2y zY*^nJ5Zd$#?PH2&SjNzt*vRPZdf@Rx4@h-F3!>$akOOdTxK2SO~Y$krSm3; z&#>?2Wr#T}VSbhv&mNt>865ta-yZL>wau@I`WznaH+{)pi_+_usbFA-QlPLEBvGuO zU7sAk>EbRVW&L(etYZZQy~Q5v)qt7fpkxcCRH>|s$74YSgKWic$-~XFfcj| zi;aMA>?4^htk=SEQBG{C)kI=hllrOOG?Z;z*zKNBUOA}G@7sx4P@nI4US`5)-sU{ew10tP z0J>;Phmem+*!9zDSj)xTBkU-~9lKh;=`l8JaTeYY{paG;=tHlQ;ZI^m#QYGBh0edu z{(!_LblC!E3}t-wgziiww4ksBZn664<+aElw10 zF|%O0tpAkWoAj7o0VY%b0={Y6PKO;_9k$aBp=2F!vJU&bL~+nC=%B2Bx>-A-UIYxB z1A|4D1@A}Rr(TiQ>P-jZ3C4RA$#O8euBFr~(=HS9qc_KZ;0*9Dd9**D9yZImM($ams`+<5fS%i*H1KMXBl{$=A5H{w-d9OH$X@SxarE2)8=F*q6; z@COyR2YemF7A+;^APos=|?XppGd#vo$whd=eQi$4qMyF@?-sSe*$04k? zC0nwxlj~edVzvxBh;&oH_bKiYp(#^w$n-W!U>PiPq=0F%WvQuYvSlkwd}!+)yBq$x_=Vr(VU>t z^um>Vvc5;hVKV8I3#w}F@vS}c^on*arQ;xm43}dtYLi2FS53r;R)zgp9@0Ex%-9^+ zWHUtL(uTeb2#}xLK0p2t;a)9_W@@8OXKd)0^8^y1E}tayax#^0>EI9YQklIcr+Xhz4h6m<@; z+a73BW1;V3e%-HNE8J0Yg2}7e#VZHEkVAKH z;m2qh-onhoSenC*tQt1%xfh7Z7~B~?JuS;HNB{(my3-}34dbp-+Vlg`(+qR^ zP&pxbe6Jt|JgXwK^VLMcGf3>7w%RJ{h9J5lOvSS8rOS1j5s;nkJ}p?PySpo33A%PI zm#oKQLAQqyIZeYscC!jW=nx_vY-^9l>qiJbwxWYtU*`+;F8Csfv*pwOrw|0=-4%j< z=3Jc=gUEiW4~|o0`Mk%-2m46Kk`K;DXyzekFE8g$)iFOKn}X`!9I4;Ux~@sqg>NA{ z>I>P#H5*zUdX6&MlXz>^o_ZYUb73r0&TjiC@VG?r1n?LZsV{8%D5isA4x1&v(W2&A zUU8c?P;nRQwswpujidErfcBflapw9`wtj8j$j{<8{A3lL^u! zC<@t+u>SvO8@mjoc(0T^ARRO*Q8ZYYHnKVYND=$6t^Zi7+ovJUpFYFV{<+o_^E+vW zQ{j>_OiY~~Z3u~yO>IF0J++*6@@Bp@l!X921>K`wTQ}9=t^jB*0bmM*jR5q|6yMBL zrOR;a6d*jK5booI{Xj^ZrV^)7@ZqTtw_Ty|a*EB^mjSD(K67u*Dnn&Oge3~$Y)+U5 zLMT9&t8iq5Fmux{u(GmI2F5Ykcl^BkT~=Gx(=Y@$xz7%&cudy5$$G3y#vAt!t{aGH z_2zY5V*esycgb0Fm#iOpT@c*aMp@gNeYs9@{eilE)4+|y&QwN(@CzK^mP7hEUM72~ zY)qWwHszt58i-weZYsnO{X3lKDKH=(_UY4j!*1Lmlk+#?O#KvHWllJTOG@bcH!;=5 zvMo=yj~~ZNX7Cd271YEYM!`jN|BrWA)zh=ub|I7;i5HnXQV8x~mgL|YjS*yR410w2 z>(iP`i#$tedp4w?6pBrSSik9IaK$0~M2x>Hm*CvX_bztxmiyGu54n{KVQsgu3GAt|l`g^E236??XWVf-d6*EVzx-;YAv z^JHB&j%s=EPtHXLDjvX!1}tHojq^+wfcKlWuOr#|Gd%Oj(=5iDI$}MqVc)8HQJ&2+ z3hQ_M2b|`cGG1W^&oqCUGN$vl0N|IgieIkf(tDx|9rY30;O!7zi$bskd< zej%Jp&lNCjY|P>(&b}GSz+wsOx2c)bA!c8tm~GghPUyMqi4r!0A#Jmc5msV`RzLUYD?R+ z1cXJu$*@Lg0-jk4+B%MGF00550@@AqufGxPZ);e$=M(!aT+P?fL$Qvopmp?go=>im ziu`;XJ&xy#{CpifOd+nLEu5#=s^=43@d{=zsvoB65lZzc2`ffghTmSr*VMOp^=mvM zSJ9J`IE{2ksHx>`xj-PfN;-HOho$H9J4*hO+$mx}s}JY;E=JqX>Rm*s29oWyBM4{u zC$DWPByjN05Z^!x-#`U*4_=~e$4lzK8<(PXpi|0FHj0pPB(_&QXZf?B#V?p_?e1#v zEAVc6sk@*|@|5r*w|$eWeI4{}`&L=|5>IRk49c>$?iUItkyYGD74PYRu2PJNS0*8B zIY^{r?Q8-Ep-5(xVdW@(U~jrYdITQ>*Kd&TH|gEniBEGSP5#C-XF180Qcm(6$|_36 z!x5XIWi*nOOd%h2m*fHWwT#Qh;|2vHjyY<+nPQ5Vq6y;Na6dk$xn62C18c&0n$Ge zt-ia;&cEm|@Aw8Jd>)h%PC8x9!p$Bl?&m1`K|5L2w(jA@#ez^cU;|sY&8qpq&Gv8W zkaY-th20x^UB=h<5x@VKwmmqhF%B<+I^DBMC6AWBPD4{N+VH~{ez(RK9!FKRRclzg zm%79s`oN^l#akqrBV=*P(UvS2uN9{=vZF6|ZgKev)%gx}=-kGgaQO^7^Na5{)bkUG?*sV=uKfUr4e)eVBt7F0MgIAx_)qEug8QODSv0krrAGr5& zpv_xhJ@8!MBmFB2$#(qCqsLPYYxs&Er(*3m6vKX%iv1R|XJ0dG7PaI*HLwV)8NcbD z%RqpOAO_5Hs3N^rd3zHU18(i_KXEfBfCa1|n8n@k9VW<9TO)nRVfFhne4KDWb9@~(@+oDh8Br(fTr|7UwqSM$5 z;3X=&P6c`0LVdrfB};r`i7BsMD~t&GnY;v2-k1~DHc+yGCfF$a4hX;LC`*5HZu{Hc z2l^&ODV)FyFDxZWZ2_x%(}G`iyZUBgn&}5>Lm=?pM9yz&4Ki*lV};$Qg?^|r1{`4o z>@I3gu)%Nj7T;zI-g4;1@CrXIWfgB_58ttf#!^q{QB)D60fG^4VGXd}sA!L0>@hK3 zDA$kUMLZuI$LI6BI*w0cleaS$y#?hreYRK-c8b1>c*7QvXUn(H?M5K>2ueHsroBr@ zy*`B~1yS^O0BU%;h&1}aZ*nRme<&pP5l~4E1W1lU6u2ZRB(4Wkn!X6$b2u)c{1O61 z`oe}W{8*zH{u7-Lx%lTdRm@~HVIp=EtC)y&u@3RHER(ks=x$mAfPcAPe#k5eJ(!ATO1u`WB?XUcD3ucfw*9Jo8mXEp$5qMB4GIP{{ZU1Dvz;$@9Ng4429Uv>B>1MyH^)6 z?xmhbjqa%7z{_dG2m9wZwZz|ezX>ORyGKPn3Nk#ny)Xi(%l10Z|4(7 ztw3CGv)@UFH<|3K-S;LufN>0=q734H!`^Uc31vMLU@)+-p6|@j?Zx!XY3Q^vINJXseTR1m}+vlobD(=R}0c?M&B z#_`9Yc@nk2Z`wSM>T$Ie1d}r7rhOLGsJV+ErjpItlOnpLq%m4aBj_ykq*Ro&tbrPRqvHN?8 zn3e4BV4itG)zUsR^|;^k#7yw?SJ?OFpqKEN6|pt@t(HHZz)P6)Yh5t3+y0H;BtA){ zcKmQ#!UCmJ+Ouo>+xB|~gy?(X>B+v=Z6AoPk+pQ)6Mb_)5Y8Cg_Q8^S`Mr25K?>uY zts=M7zQ(c+WZ^8^eITK86iP6Rw<%g~XpxV59!Si$~&FTUp+AZvJh`Y)aj9#_gduO3%E#fH8=!LS=X;5RML<%3ZF z&)Jms;$X1j@0W+4groRP8(7%~8+AQ-x^5a<_d!oK@x!;mhr>7gCKEG$)L!hgJLRKh zqAos5{zF8#Twjn(wY6sP+PRbWoWcTp9go!X|&>kPkYN z%lR6TRUK{{J_WAEl{?&f@MMsHc@6-*$Rz*Ak$ebJIC1h>-`2k1AnP-T5!bp4rD!$E zwZ^XlY7Sqk+jK#X>22XDyJve9gzB;vLo5fqbAMYCs^eXbP$}C3qRUh_sX^ z%rgElWEA0}DAh>#P{K43UV|r5@s@K){ZHXX*v?}n&7R|m@ENM`W4(1lQ45-jv}Mp)a)%uovP?KO zWG z)9j&>$(p|(Vx=d$i;LL1Cs&CNvi_&q#->3QUJbC~-KIj~hjnC%-*g|Fb1F@Ifo(hW zVyqn!L4iAWa)83>H;rTuf9$|dx4!!EB=HOjJG~yi&HJy@X_`CZ*(axe5X0EsGc)zc z67N zH=pe~o7D5l7;f%C*uFYM+gD%leDYbmx6QY&PVjuupM3l3gWbAMVnuN|Gk>}n*IW*K zx<|}sbN{nVe4e%XY=k(TJ^tD0UKwb%-&8YFk(JDI7b%DPSv8%C>Ou$$ADa4h&6Fqo5 zeW)t?fwmkss&%DFtUaA%6vq!&u zR`a9n>F@3rXR-&rJJV{IldAn?rM7$Y{dw-1m8E~*zUQn#jg3Ev=Yz)2=6TgXR#x); zg7)Wt;_n;ZL|+~ojK_8GT(JLYAayQ&j=RkdkKhW{vLD)tquJ&klElO8&<|;Y`@V%@ z_uzcVJwKP8J1hX;2)U?)Rc@uP!br92ko21d--nyT<@sSmiZ`3wOLIBxJ1pbJiNo)q zR^V5L8!kBe-#Em3yCrX-T>s>R@O;o?e}gn!rc$1q9|yAPA4iIvSjtakF_ew^sjs+! zmHsqq*cpf?37G3P@FzvWZYsmuPJw6hMV=2zU_7r%Sj7hVa`1!JWj-_9W~Z+&o)4dI zjpo}-mVdbq`sww{Nj{)cTH<;`7;sd)^UM~ubpWIV9ROzyrwBajdeLQfu5x~qhd*B#u z#LaG5#XU$L+zaeQdlh_lSzDH-)JVa~b|M}?!Y5(5&abOuUp2}1Xe`$di)*;(V)p*c zw&Hqr?q)aF=2Rk&pjB6M$vq7B1SK7I_8=C#KBK47jkkOclLaub_h@Hq5`^F_MUU3j zB)_;xepHkEEpLPRRz3a=dB&PR%>P)Sdpa(DyY9+v@MC8QhGW^ryKB!m&lX4PRI?Mq)&|qeDK* zfUn=GaBhB$zAgiYaluJgH@+|+Xni-LkUN7CbQPDfA4~)v@jIm?XHru6Zy;Te zeBUF6*k&))e!0rKH$;3;bb6yh#g;KSDE7^bAba4~=4+G`w=2BQgo|xr=+#Hql%6!VOl3Il@m7Y59fo`i_kjO}$|pbW zTGXk0CGYI)TKci4v!Sq@7UfX4)^ADxcS(NE$0;^cp%Af>Z(QAu_uiuu2a3DAb9C^j z+1?j)Vu#GJ@o*}!7xOe_&etT&q9I3Hi$LHoY< zj!ul#rY~_Jwp@Q;2z!v%5#mz;F z>!U)n;dN3OTUIoBzr2EkQo5*|9@V~-m$m;S;MNKtS1xC}wV^;&Xuopd@D_Y4w4Wg* zf>VX|Yc;hGPmv%!Q4WUsFMyLOLPa=?5X4(#{hmcAa63cI0W96TcrAcLO}p0`Ats1t zybnj<0eZ{3C_?ND8@?R@^^dow$`N=sL)R#j`sinl(TN9nc`%ZmqTp@AyBylRyu^mU zt9_OyA@(4C*q?8Bz*{957rgZ|{ulq3(GJPeSL^QyU0?Jd?LHQcTd?Z0CI? zQXHb`lI{H^Qf#N0p6xa0#jeIT7vR@!U?s!9v*d#n+Ra``52xAfU7{DQEqb94zbRN5 z+NpM+-;m|jKH@#47c<&-fn(Zn9^HDyCBaZ@SaC}*OcCbcKT}kOf~c9-5+$a_eU2Wp z84g^EnLoQiy9^s_B&I^Uz&j^O?A1d8qNq|=p}iLgm`}K2_nF<=UdZLQEUGkDXxkx4 z;lizL>-{)NjCaKVVc1Z9*xxA;1D(H98vp*g6{TzW_qY7}cm5s4ORaHKdVr_%`S%|F zZEk>npJP7c-{t)K5dUt=X^!!9Wvp;N7@gpmQ~dih|2FdPPr1nNdHP45KFhyXa!hyr z-HLxF@$c7pY1ZET2Js28r7wn*Tw(UdDbLRJ~WueDp9!n`|tJor_{-LDy-7)%iFmD&ewAp>(kq#sHD#3ko!$xctl^5}djDu4 z-t%~FG%OvkA2pkP$QOYxiSY88@r3br>Zjz?a^vMghe#Q)nnLYMd$(S1E9EW0Ja1Qw z*WFSyM|Py2@Wp)rJlo0McUy{S!{0-%oNBVk@CwHTUzzdpd~mk9n&;Ko>Jo*x(znx_ z79(~_@?xb^#uekL`a6AV#9`qW+MVV9DFxB%6Ith5;9U_TCX3y@Z^ejKjI!5a#KEpt zuG0Um;HKa<#w; zmOk`f)hdw&$0Yv7pu+DjDh9u7;wLHmZV{PFHhl|-y zeMH%y#Wnb0NRgl5VVupxfral7ZiQehyvKX3m3YY2cOf1Ce5R=I&0;0*RP&W3O8y-c zZc^bA6+W!OVJhsa!nP{ZsqnhWw@QVF6zCG3RuNnae4V0Kx$)Vm!eJ`B_LPF(rox#j zOjO}l%a!u=DjcuEL=~Q1rj##HVHAf5Q&mfqf=w!%pu#8>o?fDq4^d&=VkLh?g(s7lq2ixXVOtdj_+_n7 z_@%87x%>cLNH0VxH z#4Z&!R~htF;oqvjnQDHm3OA|nWfg8$;cS)95jFp*3aeFkMTNh+)Wk&9B$Ar1Q}Y3h zC6(bGwfsX>p|4cBS5>%0h3i$gMumkcoTkG2Rp@G?CT;{6s_?7|_oy(S$c-kZj8lC@ zr@|Wx6w`mJ!b2+DuEJ+kSg1gkkf$OZRiRUbX)0{1!Y~zHqc{5T`C5fXRJcopTU5y3 z{0Cj0n!jI#{ZyF9A^a~=MbzahjK5OhF%`b4!a@~JP+@-+c2i-L3jdt1@=;;A3SU>@ zKUBDgAo;&jMWm_FtinhYR?kxyo>1W)6>d}E8Wqk|;dm7eQek%$wosv2RpDqA_Ece<3a{rW^{c(VCW!4_Yt`%m6;4%QjtbLNXjWmQ3V)xY zusNl|cT~7ug@r22RUy|IU)CQ4E7dGaz~n>_ng{U0R`2C@;^bzZMR2@O<{i~uytnJN zF^U)>qxDGDR8cTf1|M36>>H|(OWvLB#n=ux4katJP^45xD=l40 znM!$`nlDyi*b+aNi;qIiPY-xlY(?qKNtCGD!?Z|N6^@Sj<*1LEB+@V(bW@TcS)#yD9DhW znUsm%ncc+Bu5#eb$kQjFC{Z?iZU{dGbmf5bp>i@I2`rxwhJr~zx3ddV(cSQ12h?YJn~jR`ZUz1czomP zn~-m+cT;u94t3s7AjVZVE?Aw!Y(@V6R-IZSfezlwKMmDcG&abOS|fnY%&8iytc9RC=t3!y=6L6J7h4V> z-WNuzjFINbgSM$Lh==d~#&mVw%iYB;V&UHC9^!m)eBeKd6Dh8F2-~tnVOt-iN}wzf zcxB8EoiAs_gZQki#MZwvwtriiiPc<JmZ0CvEJ$wF)jr3>pGac#$IA*NZ~NiJEyZ~Y9?)ja|{<@+CIIo08jTm(o5_T zvSTiYj4rGuBVP$E2Lc2F=5JjJWpVVVW^=j zo*wUYq;jq1ri$_6t6mR)7V2Zjcq>&*_SS);w>(XZq@LoQk^^|wE2fF@(WMQvH=@oI zFSwA#Ug`sHTU6850zaO&WMgMhr(e`Shf=RQ4YKpRI{`YJrT+-r;8TsZ-E9u9v5y!N zvd<}c&)|^8+rN((*;<0rHT={+T~i?Nc2g3O(~5UzBf|HJhmO5|pPV zfU~5e2xY)gBNf~tp=@tPU+mwz(v85)Q~q&198&lhkC~FU%p+dp_RHYLDsIV=B~u@r z4!n7kGEnxMm;ac>VY3l!QX|}i34}{&h#RfYr3Z1Y$1I~2LMtJ$FWGaEqKq^RZvaDQ zC2v_o%+^mAg<8O#Gl<60n{dUsB7X)|91bA`VH0p?5eQWr&NMM*Vdi!KJEAJCFX1pP zTaYgH7A@Y_(#7Y)ddDLw*}Wqw%#NK@44pw$T4{79j!C8quOD}|aJsAhwFYfK@U zr|E^}^TMXodY{V>W24JevJ`}er|?(qJ(?kQjILABXMqYqW9z)PGQ{K--4Tk2=~kdV z$6xpRy?y(MiG6YjPYR3<7j)RuorJ$Um9}Y~Hm3=f>-F>#JGP>>2zI=8j8At;dh3$ z6K+Qz1s|gLiZBq6MguyNC%Wi%kLrYWQzL}dqniscY0ZSN`!#taV23JTt12L1|2(zg zM=Hbb0jX#FDqf7y2{BXg2g}63Dlxr{dET=AV%!+wTB>plaHEl%)Yz|%;5BMKx>tnI zE3KchO0i=&(u+r?QfYbny zoMc{@t2VS+Go=If7y~>eN~i`wpDaPZD_Pt(8|I@sHWxZV0sN>6b#85Vwf1aCo`!d@ z5lYaf7IjHLww-5%-NO~ZrvVAyx^IM#l-5p&6JvzPTJMni#3xgym_}im8QroE9`56J~6w;Bp{sX&bG}h)bbD8(bq>gCbJbe!lO%aVGt{{f~Ff zFW>L|z29Blz4yI&_d7RkW`1pDrhveHVK9c7Qsj@F}ip z7lMi{swyy5eA`*F*PYaJyUgk)ufR~&btuzn((PUWtCqUun!IJU;BrtgV?*|_`OFII zeLgwiW~Hn(r3&2^L=7CK5;T&R@gc9CH=#(qB08P`L-Gly)Q zvHcUB!R}ofB)OL>QgoDd>3A3vYx2@Uofqcd-wyJ^V_LoQP?KC^%cxwd6l)bwWlRBe zQ|<5W9DhGnfl)ndD3YPLMiR*PBmEb?2xhDQZqW zWcg80$L+BTCwmvwK7mIOg)6k(YvfU|^%vH+Z>!BR`%&A-tDmsxXF=t0AEyo7-49^j zHe<@L6stM=T9falPvDC#uoGP8y>y{F<1?9JhbKD~Zcp!|QKR`pPz9#U_H-{$%nzOg zR)XrSAgJED2uvS=q|Lhl)G_i&+xV5B^7iPW?&yu_(;-{Df6FE7b4C5RraXyqibU)%Jq3(*yd@*0ntMV?|S( zl1u9MFhQ{VYZOX3d?a`EnJ(!8=XN(Gs|K5rF%_o72^f9gcE*-XafXMJuW==>^K5gA zVvc2{2lS@aMfS5fO3&&@606=VD$OxPv)xbcpr-s%&dXox_2!+Razs7+rRR^lVe_|w z9Hr);5Cs;!X?yH4P{Xr2HtJI6>{8@4vjQ7hZ+j(^jxD~_nfL?E+G^^ibKLrqUOmWn zZu%6c(R(L**pppehM*=M<3 z&{^%6(A!zn@bXpr9t#yL-MwAX?)!6rsehl*#L_Wdy2?7dpwI_=24U%-Ogi=l{>jOC zYHS+~V7T0dwtD>#I&RY+gF2SQCQNfmMrp|6glouBgbMW;8aiRaCw%0EgBqO&V)ssS zX67neW9;BGw!!(YwYSq*oYi2wykLzYFSM>0Dall#Z;;b<7IP*Wle+4SA-k4@u{nnAoBkXLM<#hhsKk z{9RO2PFb0BY+H>p(*3}UjI;H8QDEyG#L8I54?(r5ht?epJxn1lPldC4hQ`xgMI7zL zCW(?bk?oKj(Hhd7~4PHS=V#B-&V79u@fr` zI{oq@EQ~`-VrwsVhLoKwu_xVzct6q^+Q_h(3_Ux@U^BVW4T?|B#u<*!sf!iOaQfs; zWl>Fe!)7?8-F!>jkohy5zGnq;VzkQ#DLB{=tOk8av z^42I;)7NKwhr^DE61Oue?!ky-0;91TGjVO}SnJ>$Z|l9$t)~NL{CrMi1?I2k^Pj-{ zEZ;DEA$&`-e6{#e_~vB!Lik$owPszMxjS~>OsDkBwuCd=63%Q(FlV+UoY|IeW?RDl zW?RCd|Npjx9XDL*Y%JNqMiHO$)N&_3KOCQjg6%_y!J6PvaJR=>!GC$Y6D%vTF#&L< z$7{i`$K&9enBL>rj9?~VQ{lDXR;&)51Pl7GB!UO~u)E}P63U=I`gI=`f@jO-lGA(j zWs#0wScxT-8FV#sxUd;}9WLC1b-=$7(BTr&7hw6b9Tzvj2hQegBO|9fuB`r#{;Wd@ z5+29qz`v2r;n|Y9r1s^0V}6QXxCV>Eh4)}dcoe*%4D|qB5AMbG!P~)5IKVm;Q9T6S zj!ASErp0z)&JcFs;1~A8>fyrqRTKcP2Y-V_;KI6LEcM{RC0G(30y~DY=7o2HlHZTQ zyJ+*$=sz7zgV5=P($|CVY@J=&{1I$2e&NhF{o(Jq#Ca z8_W17m4U>~7tkuW{n zu=5dNTOoxH(bD-;20n&$z$JG*i+wnRhc`teM2CR75@*ubcQ%n*}yy9{2@f7vDCm@Mey%S(!ugAf}YaR#F z+LZ8NEJ7oMP1u8Q=~2h9E$~k8);DZk=}h;$$?c7wEm?-`S?&Y}!$$vwS&hhuNi6#D z73@Pwt9M*1flr0A&&%ZOXClGb`(<+f&cj6W}0_;2Al|ab>2(9o*~qg|}c^;1MwJp>2@x^24^FwcziM z@C+a(1r{IU>fr(Kd&jwYxMZjgJDEH1a3|#E6E?F1r>2jXv5Oc>u;OFVy1&7^lQa&$ z4_u46?+W1JPpm%#Hk@J}5EBCHJ|zZT4^BR9c`Z2R- zp2DJVgYv{CM1F;Ugk-3@dLntiH-PMZbX&)+P;)<$lUf=DLl+N za0;fFdTFH5#VheZv>k$HBNX(30qdwXz&Df5-w5fd_Uu_pgKSQ zg98(90}BS&aqR_;z|3Nc(>*RMKAU$z{MrN38^u?R^MTi61My1*K7v)k%^=6j!m8nH z0yb+gKm46AL|^*q+ZM_7lIq2!yh@#oFnfjDuqa&8qa;Pe2&Ei~3#Af@3m0Nhu2T3j z&mRR3VrpnR`0WaMK#3sa4oo}(zU1)~IB2l-2f!N3ca*Pn4mwY*cXD#ryOg`Zxo}on zbEQX>V$bx=edZ)4ao zM{Tn=Gg~anRJ1a10oW8oo#Xg$9TCi|Ur|@25k&ui`_p*MPq%LzQd*HExXOH}; zQwpXC3P*4jpn|Y~t=DuGSFwGX&TOy7#D3MZ6xZ$v37Ran`Ycrx42k-$2}Ej7K1MNZ zFk2Md&iUzYf?ya|AysR`fTl-s>z^=+!fytr_KGYDQpwT8%RNO=I3<<*km&U^(02&X zzopV^fa3IG1O2!L`bPXeY~b%yGkhTU1w=Pw*Z}FOLcgjR=J^$RzJdPJU;PDG6rgVr zsT5#>pZ-x!@7CLqaCs_4@JMbgW7=nfzVHcqQbXPmJGH3 zI*V)BlVP*O<;)jm5yvx2xJ685_VBwzG)j2eSns_eXs%Cw~O7?EE6w%G1zGcM3gOqhJifY=)y6fY`?kq>25%J4`2J{f-(Z_3!9AKmLE!i1; zV#Mk|+Eu1$9~vDXx=~Kjq(P5sq8c0R}emmMH*5f(gJCx zS&kuI3}^EVoiyLc)KCrJ?v|w6)9Lku(hUf&1x00QXie>ji z$A~M~?C7?dKi*)jXtQQM;VtZJbjygXZ#3k0vY(>knKeeoVqy}>3S&~mP_`hZQ$*eC z4MeBe&X{=dEp{?yKtw?RJ&Q%Pj@Jx%9VKYRMzrp$xxS07X`LvZX1iN=j(KsHlHV?$ ztqKlVW|zkqCEp2e5B76wNz7ttZTgCOmfxmx%y+LfCYPgyO~4JtJagi{jVpo%Xq!u+Q1@xFjk{T!y%fEsN_W z{+qoLx6KwBBnS@qG0R%yvCE@$>4Opr!z6i7jN}Tvnkvbo%?i(R8bL@KWHqchi-0`H zYzwNeWmFqVDwx~cx%FlsY2+w!xF7I2`ETO{9U z>H5bG`H$?5zfUh?$sMQ<&*|WH=3pRn$Wtx(j- zwm32>7tC@bRTfModAjLkl5E#WrN<4cFy;v*ixLG=-xec@p;voK@+ejP-<2v646Cjn z#PzS@`hS6_-KRm8zQ_|oS?w7W^CKKd6@@Qe)p8+H$={-3^)tMTqaf@Tew9PVtN z{rU&2B7RWb%$_K3&`IA#o7?*L%YXZPzQR6NTT!IshGt>X)n>Mm8(PE0id7W_?In4b zxnxm{SlFp}kyR)#%Qn;|X*1u%H5tuCl+FN?%U4mTUKN+WXVW@%f#p8l@ouq#o$1&< z`g!Or$*C5*@1*_uzwPor_KtrU;)EwG{l#K7z%orNXD?Xp&&d79=aV+ekCklpE9f4i z=x12fA8A+HAr2J{t9wwadC5&hOG9aUHXtD*tN&es&>nw(^$-LdU`zZFE zFA91>wAtJ7vd_n!8QQVA1NxLnr9MOHJhTKm(5<^B@)q-Uvl!%?0-srFcE!Vt-CH}i zNh^NHLt{zRSvu6DPjWqN*@k$weAy=7aLB)8rF`p<->ZPO+37Q43OZ)VAC)YM5N(Ey zhi!6brB_o@C)zR&7lhl~nU;Spw+*3UiH3bTyL(8QT|U3+xMavWC%GnB@=4iNm@R## z@BS2o(u#sG*;(T=iBPM|F8`jLzJuz@u(|;9?MgAx@#<45Bo}eWs>7cb%ONa&vg#2Q z?Cz0f$xUgQ7Hd_eB~6e^Yrq20WV@WDBYYwP0tL+3@`%JNc}TMSsdxF$KHsW}!WhZv ztFmY8BeC<=tHr#04KaJk`^5lap}|(-OcV=swyM=Tfq-7krIN~+f>`u8*n34m)Ika8 zO!u$@neIUqk{sr%z1sYs4mkHPtIsqRg(I>!sT%7VbUs&}4u8f>S6)oDwH;PiP`Ai@ zHOMX>mE`wo5yc3m>YFWO%6q9mS#p)+9(#_;>;32#w37T}1<;vtxUKSXkgfQlh!M|d zm$RW?7OW=JR&9+T)^->ENjkeL$C%~HZzsv6R0_Mxnwae>N|t12);UDdveru0%R_=O zTwM#*7)oiWh6bQ&v1bf18%h>)!~&zkHPw>qKttzB@+WrrjQ0kbyR>4_KwI(hRH3ks z^ubT|j6z{v_r^3IDKu^UiN;2WPVau6YA7Xj?ecoXwjls5U+Hj7Gu!2%$!P0Dgp=*= zp=pvVrBU_dN-l>b&tOEz5Sf*RWaCZqoU*f!4p-ZoJb6$sa=-3)2yW^2b-($LG; zmG14F4gIP857eIu`blzDtyEbTB?V=bXP5q1pp`PJ=3BDmpAE*WorcIvOOe5tU2cek zg2t>GwA*G$uIIf-@*S~fJWCqNRKk@Gj78vJquCd;-!im?O};PLkv?tPf^OIx)tuY;P(Eny+n2C`AZRH;sBpHME)izW&s1+Wwy?f2)8|*6xvAxS@jfRoE z9vehHvSVOxht*3N%v8d{islq)#Zj7SZ1>ylvrV>5y8ph(M@}(I?^N*`8{fNQ>!LqR}O#f?4|viF^_gB>7{>y?!l_-svwfPoHkiVuZ24DOrs6HA@*I zVivS<$iGVNQ9eImtCs{poeK_9OG)XQNZ^7@htid%PdCJ^0)j@f%-S0mt}qn^*Gd^u z^1|kl@354?>E~U%+QRORs>PxTG#be@=!WD9cLt`&{l5e%@*9N9vP-WO%$DT#_LAi> zLP0CLd@l@txX#CD#bB=<5<@aE$LX@=A!eIQ6Rmfhg(vAKiL$y!n7ymNLN&UF$6)bE z>kceA(O%(p`4h}t%`ka|VO;l_-u#ul*SDk7A)g;-8)LiIHV*!UnJE?3m|3-8W_JzV zoh{!0sK|B?*3n!j-$b{OuwnooEE?>td&=$Zdv<2K9@Ke{#ZzGnYtK;gmn@GEX+4l9 zxn^5(t%i-qILhJL&X?bXxpp}uGd(Z2aHQmvH(L0N;C@V5nq!Phw3i%5@0WC0DHWNR zkIcR@GipFShk2+yZ3!rYL$0==f-oa^AL@w2BbSFkG))k$0!t2`!Q=chIBF`)c7WRQ zAQ@4X+ZKvyFwz|E6b=x|XIg+3P=+eQs>gt_%dM&f3{k#nYcR%BKCNf2&BMSlEC$m% z{h2YvW7f(;U#iSZd%$QnWFD1VLoj&_u}alLq+(=4ILG0do{PUc{LRE)KK_avt{s$| z=|nz+sYJ+r3PEn%E^f1kZ9@Vr9e%gTBO<8mJQ*XF)giCq6=9Qy=v*U$CA7RN#NisE zv#y;BmLp=k-+qZ9O+G|9>`Etq2SfD6Zl}^IYfP6j6~>7Q%P#d5=+za0rMNJhPu8Ec>xCXd$IgE6u*RhTCp8oe8yoNR66<;+pQN-5& zZW^{Bdc=o!khg6@j=8G9f-Z5^`zUx**D`oRYkfXj$x=&%Scp-|dDc7bPO%?79w>re z?72S_vFARxL+p!9$hmrg!d8;!qR3-|sb-dOx)C~AyAVUNE4T}nf9}q5jBUb@iakY@ zKmJbnotluta0)C&D)v&&o0t2|v!wS~@2uOzIeC+Pzy8khYE*ewP+v`2cz+XeUfveB z@#SnEZ$uZc1hjs@3N#VhUsF%yHJ6e5ZncyKgTK zuaFqxeGli&D`f2%T1za&z%~C7SORSMPPu!VFrtPSugX2ZEA%$gU)_Y9%QX}4^ISvOF~nJg#>)|9Mi?eX+`t5AhR?#QI|ozrRcB`? zdFFi~l{m4un2lW|29F3U?Iw$tuDvA`j&ITxFRTj`Cfp$}jG^U8s@>P=3E@>jU|I`iU(a z*mmr9E|NV|r+6Y8vdzVk5RMPOk741Zz_jrK!etKkBnwJ35nhRh0q0}C_)WuF^a%T5 zU^1MNvDVgNKI?1k82>%bu9LBl4Mq0qEG+ymHI}H=YgnDbmRbk2F9tGG&QG)@{o`78 zGSgLvbBUs4hilnalALd5pIbYHTH*PqD44RxXgeZyKL;n9Bv0NSP;}zA0Y%p!jQK4) znvN&>RX#D;9L9Y$++PzrxNGngN&bZmAKcwJ0*4u;6__t;B=>zW+{+kbFxriV$l*qV z(PlP84mM*}&2%l##nQD7TH0_x;WIt<4M}jX&ksi>y0z|OPzMV)Y>V_$5C&0BmV2n# zJM$=2nd?w~6s>L!6#juQ5k^3x)*0HAIVHxPOF`2(AFUlxxEUg zSHKMb+HDSzX#JD&p{-npH56FQ490AH@Dx&kOT*L*Trw7B-W|Y^&hQmbk`RoQnfD2f zT%IRk2YkSY0{$kw3Kf&7vaztG#TA+uajX?3*N7Zgm_&vc0V$Gu6ET+L^;8<)CIKr* z;7EZ6oDMkTkD=o);@=oE4MJPB3P z>R>t2lm#!XbtRcr{d~DA9H1nh>D1T6<_5}UShW=xsd^Q6w9^_{a<5*C)?&UalQSEO z*}j=Br_S|Agu^w%nDU|I8W$r~4>i-$SaJdkGAEyO4aJvDn2B&<8%5)tPu%47konMd}4yR-& zRn$Q<2;hdh2sfNpDU;s~7vnQsj!&QCOaiis3>|*j?DH z9kZ8I>EK1z7*@Uli8fz#X2v115yQ%fhzCh>75isMJWI-LmzBeJGqkG3(-yxfJ^H}T@#3VU(F|YOkEqSc>V@Q+a?vpuX?>;>H zfHgdpP{;Fif0IaDEAps2%GxEA-L+ySVh+~|>Z1;qi@K%5wZUlRa7zq;)MJBdw*sO%vFk?+&n62dNzcS%`gjqQ7Q+Zia0;jV(Vv!_2b~2x} zAO5OlZ4^5>yuYTjj)gfEYBMpZ#IO~P4*GMjs4V%;e8q%qH1Z@~3Hlv%6y8vBjP*YbN|=+}obzDoTX+J=wN4KMfvu z>u`7Q`|yrpGyXWAeS>P!;$Rb>q44E_<-j|rFQ^rUg@?oRDI#Td_Zk|`DXaC`1*z@v zl8ZVisM-$il^h{@J8q|*+pzAL9msFmOxbCx{?2uLg9G~qNu%!f_s1!3r|x}%9K~2L8C_w6wPeh$j)Ik3B0vRquH8~v%2oM>GRR- zK!d1Z&vjId{HeqJn1x1ZIv41}sS68mP`Y(wJC>T$t9=iirC`W^B0p?lt8!8&lUGPb zo+f_!DaoHp@}GsxaU?7h=v2pUhCD2$`DEU(?#5o$4`eB$DEbybD(;Y8tg$|J{ z>!b&*sOAW?&|_z5)!>c%nzb6$*Qxt7a0F%@dvCx`vEqjcs$%%Jaui?1(-z!nK`U#! z<6e%G0@uwdUOI9I%9qL6E=lf#f`L{f7(mf4&U$rpL&ZP)RS!+0S!&T|6q;JNlQ7~J?@74(B|15~KJU8#lfVBRVA2IlV0ah< zEa|0u#$3G#y#&6|OnBq!<&DQ^F@y2Mj8-a^DlcLQ1lJzan*s=7g*G?u;x$ay@3@-o;Sg&|rr+)F`0?5m9!AGuUD` z42CBy%+37g)Gz>n?(b-suss6r_Wr9yK3?V$udtkF8v9QJ;swQ!eFSyqr zUuLi15n^Vq1__RgAjvQo*Ori>S%7^gxZ`*_%-rSL^1&!{DYJhE@alUc9GPx=7&EBn575dGMr;r z_!*BSLov;j`Ph@mLxVBw8RZ)31FtkVXecdaum5sYLmQQ$iP&uhQiF!ZG)}Z;p;MglZh5Af+bmCQP z4+bS?%9ziL4e61Tj{Zgo=0d4h2(=`vn&mIbHC7 z(9rq+S5eh4PSNlaZdb#schnaZxhY9oIL*n! z9)ZvA?iF=xNp9Pi(*c7$iEtU#6h?dckUaK!Zkudch0=6DCpi&*eq>}srm9ZJhScv|ykoS+F=pXqy>RrvKb9IMq)~ivZA>0#u>DmU7G%2W( zR$s`5>lGt(Hs2=N(ZIoW#sR%;@<;5@#8x^Son9V{F^hdUF`AV*!kp^)%F~t_Q~^I* z`K8x~Ysco`94<@#A=5Q&TW0$6$}~(DbgqU>FEi{Ln`4vz!0fh<&u*$mCO(^*Zixz2SvhdqpY`5+oyLOOe4q;-rt;kGqCJU0oXD5#@{>Co&VZyQ z^qla8JNcR$(JjHf2XS6ST)kR{A$PJa@v{bJziWQRSrT{FyXMuV zRn)^frU^ZkstbAhyvpknt7C(=?q$k%*1MUN-q*?b%Nd1R>pQr4zn`g=OBF8I9~{~a z7w_->kT#9|D~U<3JKO1vCfP((FH8i=>x~%K{tscg~A{=_;ai0ql zgWW|~$jIv=S;v_Fpb+=seuxuID+p$NeX!&x>__J0noA zWs(e6o$mxU^oJK=aqm0o$av6bSb2y%)Lg6lJGGkJM5_&rST<;dg&MT#OKrpj5(0|7Qv}~Z%D0#ScV^1Vax>*Q+h|+kT9%8YIb3#GJJEyS zo*VoZRV+rUhJks@(ZIlb4B;}H?^vd5&Nhc!XwAxaoezd9pT?fFcv((Q3`j4duPCKn zcxS1nN_5SCJ_Z4@{FB~NHgif}XZCcePJY3^2PTN=uHc^V>|y6dGMeP*z-b5bGYhJ6 zT=C^Ms9#=)i?YYpW@8ln!B)bZi#Q|ehu()c?uRAcql}=Z7J$ed0b34?piO%JqC0tCo7%wIukeGYU{d(69S!5B zvu=B%0d%-Z@C68(lxXMe?7-(!x)?GA2jzx+**P#M^7AT&^emQAu$0@ZqzE`v^QxCn zLd8T1$wZvD_tyuSNKFYyMaNJM$okoRXXt#aS72HUHYgbDkrz83pM| zS-Fu-(Xf~(p$9e^e%33F@lSE~dC@xu$L$#GvNJBhd%?%KSva3T-)?-~c`xfdZRmt{ z)o6l2ykGw;7Q*v55ObS43Y_(}cz{aH&mkKhUp_UTrp|_eXgi!jSkmz&aV^4_Rk2Qk zH?9qr9IP6pRa+pb>;MK8p*_V1LSLl{DOuF}!mA-d2 zdv0d5IGydEnP^(pK>E!#<+vP|1K95~J83lkV(n&46`y7+W+g`ab;QrH^)|M5R(w}n z!=*DQyFBTbDk|l9YWIKBZrtph{;Xf4cE}$!(KFd!O+oRvkt5OFf((qvik4fRqpbI% zQZZz}=-SL)zV&)mN?fFg$=g`cR_yHrx78hiKf8#;YCIRvjzS;tF5E6otEk zeeh6Xv-u4rIL?9}F406*GS|Z+#Z30u!->t79QLbH$s%TVNm&C)Dc3UP?``ROl_m2K zaHPwRWf+eKr?2{(HcEz4Gq8$~y=>;}#OSBdiIjM+XQ;|QqS8b^#a^17fEPth&wfz+ zf%Tk|*sNIt-7GeBPABmRCeOJ)lHBapYgJ;wB!x#jXrq{6Zd5aSg`e+ptjF9G@g4TS z+{9)#VMz*Q8hdhXH*q364D2fn*mLX;VE=5U!oeZd+k1snfu+-eS=U#`Y+OBO2~A+YKqBa$YFBhCGgqUdf-j`G{}k@sd9~ zFF-_QTNg-o4Qi65SCcI9jkB0`v&e-5BbQ?P;ujtY;j`J4h2xqp-GwbygcJm4ug|oQ zRW0m#SLz$L>to^ZfId+?uIdx|28%1o>{0pp?Rnnh@j#yEd0fr2k!>i7)>vL+uN2u@ zTz-{CizAX8O(tUXnd;czMcvvodkuPFSB%ph+5_VEcxTReQCnHD2aBbSj#*RaEjGh6haS3}8f zUy6AYMHtYV#Vi@8(MGdJmn0eJ6p#*|=*1Cix<;06B^Aj%7H56~5`Y0{Zelx^#fACHwVKr~>lJ($nV6y2M#Z{+ zWzTlQIFr(1`YDRB3otR2PlE-QcI7i`7cIVb(v{&~A2IJH^`Y+||3h%~3aY*>^R; zN}6-U`SIv*UAD79Z~ksFRcMaEsGS_dn{YiGy46`v^TEwM7T1KAZMGRALkvc}0|W?; zzy%&{juIYXhSNcR4fV3e2h0M0+p=VMJ`y;8dyN< zp&)idZ?}tWYOdStLgq;QDOOOD(7w}))YY=o>n$+bSk9AoMZJsdFX_~N>kdwmX5&}B zK@!Fp@V3)uvA;?>4ZIg5-mqV&i*j^qERmFcSde&Uy(;~djKq5hwSrcuXL+mQ)c%Z4 zf6}{`e+zO@51P)o`$3YENOMm`!VJ$wG?H^ zXa2kGR?(RAUu-uG%;>Ge2HP$69)iJQUAzspTT;W`UEN+Y)5OlNrZ_Moeq(_d^lN|sAEqFbo%hneVw{p;s2IQ@#D z5GBcKi{kH7<{0HQrw< z$y)Rsp&{CBBb(svAwI)g?jFVvF4kQEQFKh@VbyN?WcvODraOHszN>**dk2gO3Mwju zDY&1gEju^^fVi8se;ZicNj|F=yuQq#jNGWmuv*+}acpC7T=|f}XuO z?c-a>y@X4Wm?qFZWO#X?NmOVeISt#&51I4452f%ndrBas6ha@qqyc7L0H5h05E7@U z#Ay^L;59N`P$;T7#c?=p(AHDE=3boDER_`z?o|k%<%HWn2)^k^$8j3Ql+J?4D#}I~ zj&O|jTOUvVj+r;~Ff0I0?p;S2t7PqYHhDvmc#63;3=}uA;~P3hccVP6(pt2atY7#m zj@`43vi1QM`h?_6;Nl+HMC?oz#1)2Nq-vJ+VLa^}52cNbkzA&2NT&kgSbxW9=wqE>Rbr663yEzQ9%3M0tcOtyjb>um>t!yQpeYP+9IMk*BB z2(doXC*bNkl7Z`NhLwwPNao!OkDq+brJeP}#w7g~WOTV5*m}t@vb2qJ`n^P~Tm$NF zaS+BC4~|J2Y@{g%ruZomNCvVf)0cnrI>5f#XmZw2G7V8!Tn<Gw}^a>Jvrm1Ygrco__y~(-g zK*h(ek^!%>L!0KA^ql6a4OD!+z#|&QvOFmxj`18>+f*CMx4B1QFR%XzXrIZbWVpy9 z8Bb7dQv_vzRYogT`5tF>Khmh3Se2(MKHu?+XHeS8KUJN_)`4FtC%c>TGgTw05sMg} z)(>f5@hW?Gb63&IHf`?E`ulA(wK@6LzPglNbh~nTYlf`d%4#=9<1Iz+=3^N%Z>VC% z;EIQ=KLFM~)8I0iAL+&PQY^y2_B}HI2DU+D)owQ-G4Fe~j0hdbGi+f4w#MksJ;_sP zVc>PJ2e$SJ{Sw@LroQaCtpmmT_k6W=t=Rg$KVY6U#9cCUt^CapS$YGtE$fHvDSkRi z6w}$0&lHQDSgU6T-sK?NUczQ1qFs>y#p( z$I_6U&XF0iihNQ)xq*)7Xp77yJ{R9_=Ni5j9g6*QHSMQ2^LWw?sl>`RYZYU-}g5v+{-8&iv4vxfI5y`7)cS?^ge+k({2KlH-o$b zE#x2l3e=N%igpA~xdG<5+O`|5Qiik@gp?!TK6Tgf&Y+#ac5LhB97M{vvevo^%OrOx zPjY2#mbIUQ-j%gY)_%+b+x?TWtW5+0;}kA-VVqLTXENH#XY!!LcmW#1cKwV{lJ;2w zl~5$(oMGiCI^35{ZAf0AA2>VmtOH!!ivQtCn(n%tM&KlE6q1%mt1iujC5EA7G?A9( zk&(JeX98esI|v?1rzav*XeDy^O#fcZQ$K2ws?)!~llQW7&)dYotj)jL46*_P4nEVk zwZyo4jG~pD0BH`Mv3`nIxh?3s5536898MW2t6b z+65e?uzbZ&egK+6YA+&@P4B2AR@l#33S;I%c)&eKwx&IT1szV@us_F@h+{sFavc^P zOqgsuKieZ}S9dN*Hu)OK%kazVSIXsmLus<7Kcf4g%nRAGFT_S3@DtULC|PS~r(THZoc1H$bnXa|cQxX-#828);n(sf)@-}o zT=x|+@?}tRC<**A6EEFbaeYZ~3|(Xe+q;Fny$W}U1{`BAZMSN=uVa_DcgPH_POaEEKoS_z9Pr1KRS8FU&kE%JuOP*!cC%tpM}x$U(J^hWhv z7z;lpT6#_WXf!AIo0YxTr_};jr{u~isxB=Ce>4r{Jr5CK=@2CE2(%}z9dDo-d=E+Qzc|_$}yHq zSmh8-XJvov=+vS$54S`fI+}6Cw!X@`zZBoLI}nsDSlc6ueugMnn}-ly zmP->2nMYi3&PDHBj;|hV+Ig>7%YNKBTi5mrK}gqzeZi)@+-=~c%lP`uRnsV0y9UXqzZ#$rdzqbj*{mrxF=KLTXEAm&KGXY4k%9vY+)8+o zGD>odH_3w`$fdO)FRD2k0JR{S`hQNz`rc3hj!q{}EWV^Bwp0>hc;W;kR%fMkhKe-u z;B6RJ7R3Mc8I6^W>MUxn*2Ftr$qwIE8aSWlWx0<%_eylqweKO!l~rGzg*lSA-9jE@ z(dv-}^mU-$%IV)`7hmZ*bPLe#e2oM9MjCIQOy*_LkLDzq%aEt!h;mc*Jb1yvh}SI= zqVVd%2?w*$uP!!i1DbOgiy1+`6`Xx0NO|><7>7UQNm^ecu_aIZgUx*H(YU1V{B4u0 zB(4Ls&lEv5jFlj}@tQ^Sve;emsac?ej_SbMnUmRkK?qj2%kStY55-3eJ%_T~ei=C0B1rVG-Gg!(@fSCV)FRD9#){3nVBUKDh|Xg`!+A6eT6vDlC@% z|5$uO2Vt(>Q#rXwOF%8E=r3v!sv9JiQ67ADtSgbY2ebqTT%5uuYUF#c(1L3jxm&`- zjv}2`R0gl86e;7Y!Y`!s-cjI!uR+PCFY0(#3?{C!{z;JA_~kgCsS-J;18Th>0k}*c zy>~y#nTVXvD>=I=IVV!i^m0)k16ZT9ijgOo7EwmEG`;=$ELBJ*Q<@xmjg5P=z2*{Q z3*YSQyt|O(^?r#b>1&7y?%NsYgoI2GNpEw;+CMmB`e2f5>cYcHp~oUjg~m3pcO>Ux zdO%I4bwwXF*>r3H=Qf0s=;Jua@q*@D($`8_vpSaaR`;0mAApiijns8zZ3NM}P4BV% zx6IO#9yw@gww!1qavQb^oS0fhYtN5TfZk!)BFM}-!s7URRM9- zCLMVWvX9^o1eE~TiO1{h2%?7p`j(Oo)EDD7_c@!JWZ z>!6y?l*+`t2gIA~oxLMD_OKTFx;*eQ4y&QES?O!r`H z(4YX)p?M;ez-Kx>kMeP~b_9a&aBkXD;6}wQf|wiZnSIIP8|=)!%=S+_$|WzMWcS#5 zRL;)Ni4vcmlC?jwLKv-RcGzbK{KJB?O_KGSS?*=;~OJ*broJN6#6Tl zsV5K!;p1_!hm=+s$=V-m*YCu`)SAh})1BSdl{FA`FKeR^N8=n6gtJCh)?mrCtS`DH zNKt#jXJnV!KUu-SOdM<Pa3C974Xsulk>6AVEpiO;la zCZC1+f6U6>i9v<^`EEsMTbPH>bc|KJw<&Zvtj1?bMEEZD*r8paPr){PrYvTCKTaIG zC-40*QTK2@p-j`*qZKT)_7RFKX89F&v3`}Mp(Y-GpH)?ENIO0Cb|v5A@qm)McwAL- zn?hXUeQ7XT@opS@{cy8^!_QD9evbDS(-E@E$L&cca3@$JWqdF{N=o|7?hf8U>mMTH zhnZ5wNkhp#v@QB*t26Az;clV3VMadF1eSE9m+qz0lw#V=`3RT|F z&>mLdct2q{$5YmO=*1`SOX2jSOb3MQa*d)4HiHeyU^A+XXvoTy+0B*F|3g-u^nqQj zv?o>YEhN(&Z5O&5hQ^gU+H3IRAOZ7Y0J~fP1Ggs~h7?YmRO{W=ht$>2B1T;6aTKD> zDAyX7g0)>uy>9>XpkHHkym^`qTGcf;to8pnF+K0V3RsA1EN zS#n=RM3J_ILbpkKoG`c7kzTnDMzTjC^}ovQ&n_M_X?mPwH;$r#V5(erRWIH9C%Ew4 zY+98imm#7EUqPYUgfApa6X9(+JqZs(=)Vg8on5RlX}&+M2%n`2ukNK=c$^EL$);6X z+7CxW5uQ$=+l1eBoV`@N86OO`)(pqpi^pnm#T)Fqnv@<9_uX#rwG-wN4m9j&{=YYR1H}w zR1J9)TH?D<7NkkltBb*d9*jJrS;mR};(M&1op;>?)ZShm-y|)nw^{=KF6k? zw1|h9d@@PXV-hPr*-hNZzB{>Ebg|i|+C@(VKfKIfmE^~Zi66F+$v)Fu_S~sd@dNhJ zsTZOPCvb|fk}UfvtUi;Itv_wY$7#=aZlO@(IT9ZIUjDpb(bq2 z?i07O=l{K3Jj;fBG=jb@{n448E)?5mYKz$cD|P)69@A3{SGBGtl-vj^j4>R_z9h*W zDl@cV8$a&WW9q2e^%%qB0X;H#T-Bp5`|0CRI2o~i5{Hjl=Y5hOKEhUiGG3D~f?fQi zr?`@}{Ip-VC!5+3uhs2i<36p_M2uu@&b3LaAHn6={I_T7csyWUwLGqx*Aa!d#(SbW zA5Z0T+4nd&Zq{L)2JDp`NdIe9BY9lq9jp-7c$*P#6SID{L;Rjy z`s^OfzZ}e6(~;f(`4;gb_UGsKh-EDMi*m7=g`6L$Su>1{_#&PyIzN{_rhL9U)Nv10 z;sy57m!IgIqbb~FC@a0tF?0-%{KMY3Fr(G

Ut$+k(D-7BokrRHHI|rty2aebrsm zF|>`(l#s=y)U^+t!y`Vnx~@9(jgg#+jk##n74dL*CR=*3eduT&`IbF@v0i+EJ@NI^ znpT5Y@;9T!t*qpmv#qu|C|@_5VgQfvSbH0r`E6W}X9nF~`)(c&)X*{>S8FJr?frH^ zT-adhBa$KOIvTTTF#7A@N5MYaO0BCO%!Yk81$VJt`>q|nK!5zZMDaKF>vySxr|&|t zYjA<&nm>~|9yS25gv|r_kWk~THd5{z*#G-X^Y6w@70k+2h6=&i>a9^X48*X?Ro0-*r-WJYcdNd0aKwR#s-bG*TSP@-CV2$@LYN`iRF^ z`K39-d=O6(u-0v4P>O_aD6Nr%fAe@i!Vw-v5E#`~XAs`xdyznIfvA#xl`8+y@Qod2{y&fcayym?-pjYJ=yV6a0}s?0 z%HYy|O28ewhb|*wB|HiKHkbAwAu&JkFRv*VtXTJ-lEi*&%B46>L_hY*Rg1Gd5pjhg z5DF-?sUP7@*S8bseS-j32o7*;x}eaHKq(3ZdAH!?4C4QSLRCcR(w-&Y4*u;nh2A1$ z6NM&U?W!4&&NXV6&bIzwaV{V#uF+J40vhc|C%kDSPwt&V;C79&6mnEikoV<38#HQ9 zlrC*&0!=jvC**(7C_9a7)Gdvz`_bZ@`vR!AMw1Wtzoh499As_kn1xA$HfW(U;i| z*V~ClS=f!P&f_UW-i&r#;gSolZo4HN4(%Woyeym-zU6X+YGC3Kptfulgus18xAxH{ z@t2##S2T%tX&krSv*)cFPiflh_t1uYQdex+WA`ZG2|7d){>5hewKDCgweYSdUir;1 z-t-uPfW3+7O`{C z`3-j!y+6^;h&Ms8g+-YxY9F26z>uzcs!zbO8j8|RE;=B6mNMB1c~ihM6V&n z?+`~g+Sh7B*LW@kiQ~nk9$T>3GOBnrxINm8Dg+Ki=+p&vk^ccyKoR;T#=rW1z*Pf<;^ri=V_xAi(9rS^|z|BaV#D%(6dV` zcGFK>P7*(pPe>`IPd(pj#W-=lCnQ81F3O(KA>xx7lf`p6M2zp(Wf=%FjubuuhX6j2 zv@(|RWyQcBVf2}nB8JUjPYUalFph4N_)Je$sMycAcPV9>&Is>)bdfHNpX2REs zD**6~V#RjJt#iYZ9VU(y4|`q+6Th~6)tacWWV(QW&s4#qH3!3j1*IpT{M$V*hl>{H z&&AO36;c|$Dw?)GAwlpIrYWznX{U3++V=^#w8?q|ESsX$e0!+{~b)!JZ$M1gS4 z)lfP5kRUx=K^-ClIH@{VgrNulyhYZ3UxWmgBiQW6(xr>f{7B?1^yD`aV@025eKT~N zFFiY(iG9RpJzq70`hUhzW_&2(9^GwYs5b1G}}S zco1SMkEp&5SgC|>Y`FDX{u}=tD2L?kqdo9n1-uauu(6Q<$jkDSM~JbS&T*bkBg7$^ z84gdJUTm*<$>ABO7rPkGFTi(=P)mk?Wy+P++T)%gJ?!Kg&ntS-+F}Y4@osKSa6&{*#QFU=XEy#xxgGVzeN#84g~I znm@N%`x-oJB&J%s!}DBov1j)pAWCX<)!G>dp!$SsSzfbCI|Z?VmL)ajYV8OFVR-OT zIy`^lKbUZ40)ZFWkN6TsV}|o3Oy$2l_^+A&w&1@O{(Cq7&5t3^g#YI9 z-_Ki-S_S(r8^njjmN6!Zy~2NAW;Ts^bm79^kKV|0qs-6awqvpxDP2tZ*@4;PO4Z;A5zL0kLg?&B}Ai3A2 z5#s{CJoh>)#is`1*zgIL#{}YeANXB zQwj5Y5i>YGhwwQHzKp_|ihiVEBJm8yd(tXD;}jUx<*9G*j5S1@i!de9gADmFt;@l5 zk-@#Lh%%Scd(Q54bT8@hYf5-0(bUuHm+p0C6pm8DT#>b&td`tmaYl+#TiT~hw3&I87?776_fo15YJgzQ7PbkDS-VZ#hqQr#6{n*2lamBc* zzJy*-+&Y#VulQ^tK1VIdAILiICeH^^Vv>mN7bRN7mppfy#KF$$ACXt73SP%pzDOkl z#w2AK_{~)K(Lpf2F<|Ni=x2BcHwqPoiIf8mF*Fk8?F^$R84P2IVT{5slE`G!&BXzJ za~t^814|F_JBP5!&*|5Y4seO#X|%Do20tkABBGN`4*>WTY~R2zmonfXhE^iHUBOIB z2ES#*uSnr%BQn|46+q#q@mhN{RH2!2;30-OqHM&FzNsN9ALe&y<@uO)zor)?WxdmL zcQo9P$w4{JAq&t6@Z_uT-4Z1}NsXUdrNob_aHk5_sc^0eC#Z0U3cIN=Mui#`dR6`( zD$ptXOGR)k@N=Htx5cwtg%ecx&oTx7wh9-ku$Kz2ELGCCt8k_Yd#Uik5+!||3fpst zq039JJ>l7@!iQAYUWFGHDe3pBP^ZG1MGF3V6@IS5!z$dS!Xgz8QDLGATM&eNp?;yl z=o1zGONFadI9r8JsDin+_!*(%(^QzK!d5EOsPHGXDKDt-gbH^9l6+yCide70Ln`zO zn$7=b9{ewlD$G>jDi!{qwyjQ$$E$F?3cIP$&u>hzQsB&Dk<0hv1!<*{P^-cM75=U= zn5oA1HDTCG#rqj`U7_%~rYiQ23jOIjRQjXEDx-BO;;;(as0{8_VRKdE#cKR%74B5w zJ{2BP;c}JFM{4|Q71pcprV4|dD&cHZK(3mgQ=wnuT$Qd$#h+0XzoOLT6Vs5Te2>D&sm8R;kdhNJyztwf=abYAZ1+3{l~)^d(C?AFA+>3SU!U znF5_cv5J_d!bvL3RADz2wo>6g1q$PzRQQn!53BHX6>d~vu?inh;V>2U;t=-NPDSWc zcxApK;1d8b}FoY zRAKbF3aeE3whFhZaIp$!s&KRl`>QZsg+>)#%NGStW_z)nlP}4jNdYkQOjY6CDoj+N zMuqi{D0CN8Sf#?(RQR+CSF5lZcH!Mwt0g0O$3S!^zbW>3k_ zpEgzSJl;W^6>3&u`>%HpyU-4Gf18ft>*ADc{S=;MX)13OZd2h-6_%^8MuoFg=vDJu zGnD*B6+W%vckb_!ATHE6&BK-Syl<4Un7>xbq{6ZbN__X13M^A=(5lAkK2z|URQ%H_ ztUIUR>pxYXpu*jsDDgZ6YJ|<64vAu1m-1nX9u?sVjMuW(ym8PZ{)d`K=h=`bwwwj` zhEO*}l`SKB`y$wBnI)au)bR;U7=sek7#g5LL zJW((MMGqNImUzNU1f+PC)w_=O;@1D#nAJZrj$9h@~t zM1((nC5R|yav+l)@f^h0k~VlK#a|CICk5yze-Yw!R6%(BbOgqH7?8 zYe~R=6U=e+gN>(Po#+|XRqW(EH{ni2iL!Bb;|hST0(g4-3Xs5a;{$w2PCepH8;tZ` z+lY>8Z!_X%Whf0WY$})p=vB1w$#bm#@3 z^Vk7Ai7BGdAlXrP@Yw7Sg>!aNNShCosxtI$E9QE>1ki2^im@Yayf&)CkuJN)xiK2T#;biKdTR1@J^i(xOr2j6ingk z4B-d#dnIm}N4&_;mr_1XV_Cd-@ze)q0AHV@%yvEI6+CFM+iXO8<2Kxc34{|yHKH4% z(CGp==Yy6p3ZW553Eihhk)llQG`z}-(kpRG5%JnST@>m8d(0vlOE1E0oFVdFr{Zv$ zh^;elwHPK;Tq@yciFObkP8HXOa6*}9N1E6R2KHH+_*{sltti~c^ej&oTcuYZOk-RJ ztwzumL{+Gq$P1;&I5I{0WObz&|J6;7mk`K&Oc9R!jU;%tIMA8tg{x?{M|v7njI zermYTdQ6xQm8ug$Mr-P*YIZiNnr*$|>Bcixu?c}LSyDW*XEUK^YO2sd>?WA%Th>L^ z8Y=Yp5qZrpSP+lm%uuC%^E|%{5IZ~l)m*FQ&+m(p;ORF&nQO)Za=zwp!8|omXg9_v z#H1R8h$nRQA$7s|#4}gbi@Gb(63=2a-&%pa)Ch3f%{Mr^JXnja2#*6Zp*DuVN2 zvsCM2nxWPs1hW;UkH5{BAh{`_GALe;znz2=l=Q~&#(5$w5rPHEnxSkvC~KS-Q6FBX zt0hUKa55Cm8AOUi`Fy((pl6R=(eRAj(`KL;-@bo43<4m00%`R~BdyQkS`|H49G(dS z#r95ORySBFbv{Z@PYED}_5*4FPXL;;mHc}F>G=|nXbdBi!<;^V7_WqoQEKgkXoQen znw-}0;X?e>2q6{~6O$S)gpUrX53bYjLX-~{I||8a1_nsaAvJ#2XeB;nw5N?#yxUnn zMiBJ(3c?sHq9VJ73tdy&3T=Kh);F(q&^m2xP>v?K11{?63L*tzJeD)#u|uzg;10&%|JUBR2S-(& zVf_0@2q6L41tgoGWr--ET)H44c8XoDra~a3s0SgV;F5c)m zqr$Xg43sKh)HtHWDm5xtq^JQeFco2os0dX1d(L;lE}c&Q=pX%yGxO$ozW2P}{X4hk zoFpc&si!Hf56EfSW%5(diD^!Q#2FPptY3&%M){~xGhk$;>19JEH>m}&PPP0$c4n-B z4y>g-H7a|&6X-9^vz@bWt{a)5pn6aZD)^7{TzUtnv@NuzkooJ+k3tU6=zK&5EW`b( z8hm1!z<-MgdQ;b(sq1D!iIJrz%w5zUX7iogFh>@;ik}3v?E=-nW4=H5U(O66%B!t+ z`9B4<6)ke>6@EsF88& zr~E&zpUr-G;k@$5GV0qQI09UQRlx6bEoQrCi7R+DDF5NB+|h97)z1G|y6bpO+pXzA zk+4@|?faokQzDRMLUBIK03jU2X>J^ULyotA+9sz5P4qis2>lhM zD>Ox@wnxC!!_K$z3D0HvKj|7d^!F~k3{)QPyj$PJyV|uk8FK*J%QKL>hbZ>a`onRV zId0u+zn|{m9SFHh=^Ybor8mOc$67OjtE4Ju_8Zj#s(G714i~c()M&MX8m)a`$CiD{ z<(=8=Jl}xYJARE@A@6dCEAP}lxPjcT&W-dN8_Zc=VS6{YfG|iz&PA3{%DltjvauF3PRxxfj^MiR9ai2zI+ylrj0~^6u2SfR3<~ zn;BVc$*pdLf)#}(Sm#~gBijFS_G{ElKNmL)6pQ*e)%VwKbNN#sTZMTFqQG4sJuwyU zxDn)wsA}7{0u9QH#YWF_qmoOfFRr%ZF4C%vQ8$C*)gSZgLF4k$uk2t4+jBcz+AXgl zgBE74#qw^Da=!Fu)wZ9vpho*eBdK}Ct@*$ecf&51F7Lqwv#ygj1OZ~ByYs4_F$S3TV`BUJh2Rerz6y3w!JJu1DOQ+ZV$X+~1 zbiYf#1Zo?T9)6)M8?Gs9C0tXMBvhztuKw_Wc<`Vf4r+Ghr*FE@PV1~}%hOvf2 z0#aY4DKF_{N=CTbhvjsoytKfSj_Xoe6mctddHS)*-dX&`$#!6+S{^4#or{IJd+OwP zNr5Z&&4P5v6k9PY%-@t6i-i~ZhgN-&InihG(&@S>c0@&OS8EnyNiPpwJDyIjm|}-{ zcWcRWT|KXtxOzVWwGHBNRBh_2oBaB&rU&=kJ?cv1#k5xuE#0_ga2#Gqn}zZPL!C`% zLGSjS@n9@KZ#!aNI>nj;*xkfx>l!LHq5432a*geNPDfC<2jh>eCrGHjX;oO6E9BXL zHIstYe9itz>3eGI;9y7A>w8+W0t=>Jt+7i>Zx6X@8W$mZMC_?WDV{=;i__OkwP*F) zROWVi4e@_;WY|uIH^@+Wm<%qHr;H$|6sEINZNLsl+iA8}(M%p;DX?OittbdD_F|?? zv%NcI3(^aw*%68MD{_^LzJawil7|%@XFIkeJ`>>tae2-cyOF*RC$=HyJ8~=SAZ~MB z+~Wxr!3gfzf0B#a{xeQ=_^MsKS9tXt#+f;r<03CGw3J&M0z-Me@8gT(Ys~Y-@U`J< z$nz!fwc=~b>s;WD^sTkF;^YhelP~;FzVJ6EU-%a#-AU=>3;&ZZ{QuuC{LlU$Uid#V z>k|7w+0$`bT%1_X8^lP!m|0jWJO#es^H%VH&$D1Zgc!wy!D&9P1rt8cfbU?si#I)a zPQya*TJQ;M0K6G2>BW5jJkkqEsD^|x#K3#8C_G=GmU7*#w=qlb3kP6HWd=QY8ZKOo zJq{Oc!q&r&Yt?XR)^oA#aN%-n-*j#(a>}*!6zuo=P+^@l!u?nk{J265&sVRdYF`;b z62dRM4U5BtcVS6*3jA?DbaZ$Od<$CzZwKRvFwfZ};^0p)>D$5%UE9O6__79-x)3}S zi@=3*1~OXk7`PUzg$rkV*O(Ywco~+2$HBdWdE$jDoO5XcUpq-?`m z$9zd!GWg%H-uQ*T8EMQAxbR`D8lJCYZ^1X`926D&!sXZ!xNyZe%zsiDNW46XA%aVS zzIKc;ZE(rQ?62T-hGI-hZGM65ne~5 zZiH(RJoQhfU=k8h*Va)mycIk!gCKOte9=_mX$w}5Ut;K&GmW_!p07>jYnakDznVqG z#0b01#*hw$0qnE{36WKp4i)K>Bj-~w{wO#Hlh!EQh^>MPU&C79!pj%%=`vhcxzPC~ zBT5Qv$1m)^h;_re_z2kjGG120CB!9KuI5l7(F(qO4MWNCEluqKMy48`0iR3K2zV>_ zCAJxEZZqZ}1 z1IG>tT)%wS&~1XizoIfl;W=@MN2;9iZG9k02}DR2|!`N6?k zTzmvP=yUTIVll-e!NeOr2XAWgIhc48^MXX)@^^rVw|x$Fs8PZtTbVz4AZ)}Qhf9Ha z4qFFr1t)HEd8I5}@(zbFe!f&Pe6{2jIQTiHdQp!O$K@o+N;|L#^Q?z^;4|TT{$#dp zXT`$#B+6`imqSh*9D`((g6GRel9BGiw&NGB#P-4Y{L0*mZlt43`q5Xrn6_Ag%)5E( zgGFQX0G#=u8*0AiGQY-@Ap=hT$hA5q{Mhk)0ZGDA-%rR-Il_V15_rC#Bw=aj9y)+u zcwT}ZDH1sW$$G-lsJ$F=_=V%Jb?{m+i>X1v)Bo-o8V2wGlyyN&2Hd@m-ovwC>3(_- zm+-V9i%dG7PJ%!Dhs&Fg#MF3@70Zq!co5Sy-h9rr982ME2kQ3y&zT(h=N5I~P9S?(rUppQE5Bc02;naMD`B#Q4i3vwt1yZ7RpgwI@#o+RP`aJ{V z-!KxWTEgqU#Sc${+dA=823#`IfdZsAcovMHrg#e+?1zjbo-ZGb?aci1+l^+(LWdu~ zqj2GVYz90F#)~*Fz%$@(Oj8ix73xH^r6u@t8k5tW*5JPzzwi|-18)QQHqSJ}r8`}O z*0ec6VkU%-0=?Gpd8L_$$fTCWz&NJq5dOmFcY_58OQxQC0tHRH3jF}Xs za)e)FL+~F1FGXjX0gr)+2l=7QkANRz%CHw)gYdM9pmktPkYR?`g4bb+5&qWa8SsG5 zvtXB=uFMcP9#i}Ta4oiua$3NKUQAa#=YRdzj;Am+;&m{1sx|uv>J2tv@;8FbJ{KOr zbSgatPU_=$HFz_o%7is#>{p|~7qP-c)<^~`2)UR6;KC5|uMEpc?7%dyJHcaEfDFRZ z${iQ(t{?`#KCyTYQ{&pf^G;)U@k{r878?m~1N)y&1K@m2YwEBnxZVuwt*~CNm_$Fm zf5DWH)+#+V#p)Ga8fM|ZB{oV#RE$tsp}0`GptvyDpLlvK9D~Up1@FPs&=mNd&)dPF zmDUW@jtJxyOgss0@p&6~)bVHfUui$E_eV;ezS9;K+J&#Z7^x zP*b?6swvVmx+&UJ-IR_kv%Sx#;uLE##hE}RlnH05GLg*aOf*xSsm;VP4Vmhdv6T(a K+_%hLZT|&<{vpc% diff --git a/docs/html/globals.html b/docs/html/globals.html index f2d4f80..a065686 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -83,12 +83,12 @@ $(function() {

  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_MAPPED_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT : vk_mem_alloc.h
  • -
  • VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT -: vk_mem_alloc.h -
  • VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT : vk_mem_alloc.h
  • @@ -122,9 +122,6 @@ $(function() {
  • VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT : vk_mem_alloc.h
  • -
  • VMA_POOL_CREATE_PERSISTENT_MAP_BIT -: vk_mem_alloc.h -
  • VMA_STATS_STRING_ENABLED : vk_mem_alloc.h
  • @@ -233,9 +230,6 @@ $(function() {
  • vmaMapMemory() : vk_mem_alloc.h
  • -
  • vmaMapPersistentlyMappedMemory() -: vk_mem_alloc.h -
  • VmaMemoryUsage : vk_mem_alloc.h
  • @@ -266,9 +260,6 @@ $(function() {
  • vmaUnmapMemory() : vk_mem_alloc.h
  • -
  • vmaUnmapPersistentlyMappedMemory() -: vk_mem_alloc.h -
  • VmaVulkanFunctions : vk_mem_alloc.h
  • diff --git a/docs/html/globals_eval.html b/docs/html/globals_eval.html index 4c50ead..95180fa 100644 --- a/docs/html/globals_eval.html +++ b/docs/html/globals_eval.html @@ -71,12 +71,12 @@ $(function() {
  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_MAPPED_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT : vk_mem_alloc.h
  • -
  • VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT -: vk_mem_alloc.h -
  • VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT : vk_mem_alloc.h
  • @@ -110,9 +110,6 @@ $(function() {
  • VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT : vk_mem_alloc.h
  • -
  • VMA_POOL_CREATE_PERSISTENT_MAP_BIT -: vk_mem_alloc.h -
  • diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index e1e7b51..4e529e3 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -134,9 +134,6 @@ $(function() {
  • vmaMapMemory() : vk_mem_alloc.h
  • -
  • vmaMapPersistentlyMappedMemory() -: vk_mem_alloc.h -
  • vmaSetAllocationUserData() : vk_mem_alloc.h
  • @@ -146,9 +143,6 @@ $(function() {
  • vmaUnmapMemory() : vk_mem_alloc.h
  • -
  • vmaUnmapPersistentlyMappedMemory() -: vk_mem_alloc.h -
  • diff --git a/docs/html/index.html b/docs/html/index.html index d21f630..acf7f30 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -62,7 +62,7 @@ $(function() {
    Vulkan Memory Allocator
    -

    Version 2.0.0-alpha.4 (2017-10-02)

    +

    Version 2.0.0-alpha.5 (2017-11-08)

    Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    License: MIT

    Documentation of all members: vk_mem_alloc.h

    @@ -70,7 +70,7 @@ License: MIT

    • User guide
      • Quick start
      • -
      • Persistently mapped memory
      • +
      • Memory mapping
      • Custom memory pools
      • Defragmentation
      • Lost allocations
      • diff --git a/docs/html/memory_mapping.html b/docs/html/memory_mapping.html new file mode 100644 index 0000000..bb32ecc --- /dev/null +++ b/docs/html/memory_mapping.html @@ -0,0 +1,93 @@ + + + + + + + +Vulkan Memory Allocator: Memory mapping + + + + + + + + + +
        +
        + + + + + + +
        +
        Vulkan Memory Allocator +
        +
        +
        + + + + + + + + +
        +
        + + +
        + +
        + + +
        +
        +
        +
        Memory mapping
        +
        +
        +

        +Persistently mapped memory

        +

        If you need to map memory on host, it may happen that two allocations are assigned to the same VkDeviceMemory block, so if you map them both at the same time, it will cause error because mapping single memory block multiple times is illegal in Vulkan.

        +

        TODO update this...

        +

        It is safer, more convenient and more efficient to use special feature designed for that: persistently mapped memory. Allocations made with VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in VmaAllocationCreateInfo::flags are returned from device memory blocks that stay mapped all the time, so you can just access CPU pointer to it. VmaAllocationInfo::pMappedData pointer is already offseted to the beginning of particular allocation. Example:

        +
        VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
        bufCreateInfo.size = 1024;
        bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
        VmaAllocationCreateInfo allocCreateInfo = {};
        allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
        VkBuffer buf;
        VmaAllocation alloc;
        vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
        // Buffer is immediately mapped. You can access its memory.
        memcpy(allocInfo.pMappedData, myData, 1024);

        Memory in Vulkan doesn't need to be unmapped before using it e.g. for transfers, but if you are not sure whether it's HOST_COHERENT (here is surely is because it's created with VMA_MEMORY_USAGE_CPU_ONLY), you should check it. If it's not, you should call vkInvalidateMappedMemoryRanges() before reading and vkFlushMappedMemoryRanges() after writing to mapped memory on CPU. Example:

        +
        VkMemoryPropertyFlags memFlags;
        vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
        if((memFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
        {
        VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
        memRange.memory = allocInfo.deviceMemory;
        memRange.offset = allocInfo.offset;
        memRange.size = allocInfo.size;
        vkFlushMappedMemoryRanges(device, 1, &memRange);
        }

        +Note on performance

        +

        There is a situation that you should be careful about. It happens only if all of following conditions are met:

        +
          +
        1. You use AMD GPU.
        2. +
        3. You use the memory type that is both DEVICE_LOCAL and HOST_VISIBLE (used when you specify VMA_MEMORY_USAGE_CPU_TO_GPU).
        4. +
        5. Operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2).
        6. +
        +

        Then whenever a VkDeviceMemory block allocated from this memory type is mapped for the time of any call to vkQueueSubmit() or vkQueuePresentKHR(), this block is migrated by WDDM to system RAM, which degrades performance. It doesn't matter if that particular memory block is actually used by the command buffer being submitted.

        +

        To avoid this problem, either make sure to unmap all allocations made from this memory type before your Submit and Present, or use VMA_MEMORY_USAGE_GPU_ONLY and transfer from a staging buffer in VMA_MEMORY_USAGE_CPU_ONLY, which can safely stay mapped all the time.

        +
        + + + + diff --git a/docs/html/quick_start.html b/docs/html/quick_start.html index 502651a..adb1f28 100644 --- a/docs/html/quick_start.html +++ b/docs/html/quick_start.html @@ -66,17 +66,23 @@ $(function() {
        Quick start
    -

    In your project code:

    +

    +Project setup

    +

    In your project code:

    1. Include "vk_mem_alloc.h" file wherever you want to use the library.
    2. In exacly one C++ file define following macro before include to build library implementation.
    -
    #define VMA_IMPLEMENTATION
    #include "vk_mem_alloc.h"

    At program startup:

    +
    #define VMA_IMPLEMENTATION
    #include "vk_mem_alloc.h"

    +Initialization

    +

    At program startup:

    1. Initialize Vulkan to have VkPhysicalDevice and VkDevice object.
    2. Fill VmaAllocatorCreateInfo structure and create VmaAllocator object by calling vmaCreateAllocator().
    -
    VmaAllocatorCreateInfo allocatorInfo = {};
    allocatorInfo.physicalDevice = physicalDevice;
    allocatorInfo.device = device;
    VmaAllocator allocator;
    vmaCreateAllocator(&allocatorInfo, &allocator);

    When you want to create a buffer or image:

    +
    VmaAllocatorCreateInfo allocatorInfo = {};
    allocatorInfo.physicalDevice = physicalDevice;
    allocatorInfo.device = device;
    VmaAllocator allocator;
    vmaCreateAllocator(&allocatorInfo, &allocator);

    +Resource allocation

    +

    When you want to create a buffer or image:

    1. Fill VkBufferCreateInfo / VkImageCreateInfo structure.
    2. Fill VmaAllocationCreateInfo structure.
    3. diff --git a/docs/html/search/all_6.js b/docs/html/search/all_6.js index 1002ad2..0140fe2 100644 --- a/docs/html/search/all_6.js +++ b/docs/html/search/all_6.js @@ -3,6 +3,7 @@ var searchData= ['maxallocationstomove',['maxAllocationsToMove',['../struct_vma_defragmentation_info.html#aa7c7304e13c71f604c907196c4e28fbc',1,'VmaDefragmentationInfo']]], ['maxblockcount',['maxBlockCount',['../struct_vma_pool_create_info.html#ae41142f2834fcdc82baa4883c187b75c',1,'VmaPoolCreateInfo']]], ['maxbytestomove',['maxBytesToMove',['../struct_vma_defragmentation_info.html#acb311c940a777270e67e1b81c5ab6a1d',1,'VmaDefragmentationInfo']]], + ['memory_20mapping',['Memory mapping',['../memory_mapping.html',1,'index']]], ['memoryheap',['memoryHeap',['../struct_vma_stats.html#a0e6611508c29a187f0fd14ff1a0329c0',1,'VmaStats']]], ['memorytype',['memoryType',['../struct_vma_stats.html#a13e3caf754be79352c42408756309331',1,'VmaStats::memoryType()'],['../struct_vma_allocation_info.html#a7f6b0aa58c135e488e6b40a388dad9d5',1,'VmaAllocationInfo::memoryType()']]], ['memorytypeindex',['memoryTypeIndex',['../struct_vma_pool_create_info.html#a596fa76b685d3f1f688f84a709a5b319',1,'VmaPoolCreateInfo']]], diff --git a/docs/html/search/all_8.js b/docs/html/search/all_8.js index a53d9e0..24c8ac1 100644 --- a/docs/html/search/all_8.js +++ b/docs/html/search/all_8.js @@ -2,7 +2,6 @@ var searchData= [ ['pallocationcallbacks',['pAllocationCallbacks',['../struct_vma_allocator_create_info.html#a6e409087e3be55400d0e4ccbe43c608d',1,'VmaAllocatorCreateInfo']]], ['pdevicememorycallbacks',['pDeviceMemoryCallbacks',['../struct_vma_allocator_create_info.html#af1380969b5e1ea4c3184a877892d260e',1,'VmaAllocatorCreateInfo']]], - ['persistently_20mapped_20memory',['Persistently mapped memory',['../persistently_mapped_memory.html',1,'index']]], ['pfn_5fvmaallocatedevicememoryfunction',['PFN_vmaAllocateDeviceMemoryFunction',['../vk__mem__alloc_8h.html#ab6a6477cda1ce775b30bde96d766203b',1,'vk_mem_alloc.h']]], ['pfn_5fvmafreedevicememoryfunction',['PFN_vmaFreeDeviceMemoryFunction',['../vk__mem__alloc_8h.html#aef2545dc2e9dd4f29ab9ba6ac6fe2f49',1,'vk_mem_alloc.h']]], ['pfnallocate',['pfnAllocate',['../struct_vma_device_memory_callbacks.html#a4f17f7b255101e733b44d5633aceabfb',1,'VmaDeviceMemoryCallbacks']]], diff --git a/docs/html/search/all_e.js b/docs/html/search/all_e.js index dc7e02f..a08d105 100644 --- a/docs/html/search/all_e.js +++ b/docs/html/search/all_e.js @@ -23,8 +23,8 @@ var searchData= ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fmapped_5fbit',['VMA_ALLOCATION_CREATE_MAPPED_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a11da372cc3a82931c5e5d6146cd9dd1f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], - ['vma_5fallocation_5fcreate_5fpersistent_5fmap_5fbit',['VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae443691ef3d077c0dc3de5576ac4c312',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca4816ddaed324ba110172ca608a20f29d',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cae4d5ad929caba5f23eb502b13bd5286c',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cace7da7cc6e71a625dfa763c55a597878',1,'vk_mem_alloc.h']]], @@ -36,7 +36,6 @@ var searchData= ['vma_5fmemory_5fusage_5funknown',['VMA_MEMORY_USAGE_UNKNOWN',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305ccaf50d27e34e0925cf3a63db8c839121dd',1,'vk_mem_alloc.h']]], ['vma_5fpool_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a1c7312bea9ea246846b9054fd6bd6aec',1,'vk_mem_alloc.h']]], ['vma_5fpool_5fcreate_5fignore_5fbuffer_5fimage_5fgranularity_5fbit',['VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a9f1a499508a8edb4e8ba40aa0290a3d2',1,'vk_mem_alloc.h']]], - ['vma_5fpool_5fcreate_5fpersistent_5fmap_5fbit',['VMA_POOL_CREATE_PERSISTENT_MAP_BIT',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a918441f7b40dca90481b114f5d224fe9',1,'vk_mem_alloc.h']]], ['vma_5fstats_5fstring_5fenabled',['VMA_STATS_STRING_ENABLED',['../vk__mem__alloc_8h.html#ae25f0d55fd91cb166f002b63244800e1',1,'vk_mem_alloc.h']]], ['vmaallocatememory',['vmaAllocateMemory',['../vk__mem__alloc_8h.html#abf28077dbf82d0908b8acbe8ee8dd9b8',1,'vk_mem_alloc.h']]], ['vmaallocatememoryforbuffer',['vmaAllocateMemoryForBuffer',['../vk__mem__alloc_8h.html#a7fdf64415b6c3d83c454f28d2c53df7b',1,'vk_mem_alloc.h']]], @@ -73,7 +72,6 @@ var searchData= ['vmagetpoolstats',['vmaGetPoolStats',['../vk__mem__alloc_8h.html#ae8bf76997b234ef68aad922616df4153',1,'vk_mem_alloc.h']]], ['vmamakepoolallocationslost',['vmaMakePoolAllocationsLost',['../vk__mem__alloc_8h.html#a736bd6cbda886f36c891727e73bd4024',1,'vk_mem_alloc.h']]], ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], - ['vmamappersistentlymappedmemory',['vmaMapPersistentlyMappedMemory',['../vk__mem__alloc_8h.html#a03366170bb8e186605518d2f5d65b85a',1,'vk_mem_alloc.h']]], ['vmamemoryusage',['VmaMemoryUsage',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305cc',1,'VmaMemoryUsage(): vk_mem_alloc.h'],['../vk__mem__alloc_8h.html#ad63b2113c0bfdbeade1cb498f5a8580d',1,'VmaMemoryUsage(): vk_mem_alloc.h']]], ['vmapoolcreateflagbits',['VmaPoolCreateFlagBits',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7',1,'VmaPoolCreateFlagBits(): vk_mem_alloc.h'],['../vk__mem__alloc_8h.html#a8f93195158e0e2ac80ca352064e71c1f',1,'VmaPoolCreateFlagBits(): vk_mem_alloc.h']]], ['vmapoolcreateflags',['VmaPoolCreateFlags',['../vk__mem__alloc_8h.html#a2770e325ea42e087c1b91fdf46d0292a',1,'vk_mem_alloc.h']]], @@ -84,6 +82,5 @@ var searchData= ['vmastatinfo',['VmaStatInfo',['../struct_vma_stat_info.html',1,'VmaStatInfo'],['../vk__mem__alloc_8h.html#a810b009a788ee8aac72a25b42ffbe31c',1,'VmaStatInfo(): vk_mem_alloc.h']]], ['vmastats',['VmaStats',['../struct_vma_stats.html',1,'VmaStats'],['../vk__mem__alloc_8h.html#a732be855fb4a7c248e6853d928a729af',1,'VmaStats(): vk_mem_alloc.h']]], ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]], - ['vmaunmappersistentlymappedmemory',['vmaUnmapPersistentlyMappedMemory',['../vk__mem__alloc_8h.html#a26b87244491c1fe77f11fe9ab5779c27',1,'vk_mem_alloc.h']]], ['vmavulkanfunctions',['VmaVulkanFunctions',['../struct_vma_vulkan_functions.html',1,'VmaVulkanFunctions'],['../vk__mem__alloc_8h.html#a97064a1a271b0061ebfc3a079862d0c5',1,'VmaVulkanFunctions(): vk_mem_alloc.h']]] ]; diff --git a/docs/html/search/enumvalues_0.js b/docs/html/search/enumvalues_0.js index 98b5661..a556456 100644 --- a/docs/html/search/enumvalues_0.js +++ b/docs/html/search/enumvalues_0.js @@ -4,8 +4,8 @@ var searchData= ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fmapped_5fbit',['VMA_ALLOCATION_CREATE_MAPPED_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a11da372cc3a82931c5e5d6146cd9dd1f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], - ['vma_5fallocation_5fcreate_5fpersistent_5fmap_5fbit',['VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597ae443691ef3d077c0dc3de5576ac4c312',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca4816ddaed324ba110172ca608a20f29d',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cae4d5ad929caba5f23eb502b13bd5286c',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cace7da7cc6e71a625dfa763c55a597878',1,'vk_mem_alloc.h']]], @@ -16,6 +16,5 @@ var searchData= ['vma_5fmemory_5fusage_5fmax_5fenum',['VMA_MEMORY_USAGE_MAX_ENUM',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305cca091e69437ef693e8d0d287f1c719ba6e',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5funknown',['VMA_MEMORY_USAGE_UNKNOWN',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305ccaf50d27e34e0925cf3a63db8c839121dd',1,'vk_mem_alloc.h']]], ['vma_5fpool_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a1c7312bea9ea246846b9054fd6bd6aec',1,'vk_mem_alloc.h']]], - ['vma_5fpool_5fcreate_5fignore_5fbuffer_5fimage_5fgranularity_5fbit',['VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a9f1a499508a8edb4e8ba40aa0290a3d2',1,'vk_mem_alloc.h']]], - ['vma_5fpool_5fcreate_5fpersistent_5fmap_5fbit',['VMA_POOL_CREATE_PERSISTENT_MAP_BIT',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a918441f7b40dca90481b114f5d224fe9',1,'vk_mem_alloc.h']]] + ['vma_5fpool_5fcreate_5fignore_5fbuffer_5fimage_5fgranularity_5fbit',['VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT',['../vk__mem__alloc_8h.html#a9a7c45f9c863695d98c83fa5ac940fe7a9f1a499508a8edb4e8ba40aa0290a3d2',1,'vk_mem_alloc.h']]] ]; diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index 27cbebf..9c722df 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -25,9 +25,7 @@ var searchData= ['vmagetpoolstats',['vmaGetPoolStats',['../vk__mem__alloc_8h.html#ae8bf76997b234ef68aad922616df4153',1,'vk_mem_alloc.h']]], ['vmamakepoolallocationslost',['vmaMakePoolAllocationsLost',['../vk__mem__alloc_8h.html#a736bd6cbda886f36c891727e73bd4024',1,'vk_mem_alloc.h']]], ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], - ['vmamappersistentlymappedmemory',['vmaMapPersistentlyMappedMemory',['../vk__mem__alloc_8h.html#a03366170bb8e186605518d2f5d65b85a',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], - ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]], - ['vmaunmappersistentlymappedmemory',['vmaUnmapPersistentlyMappedMemory',['../vk__mem__alloc_8h.html#a26b87244491c1fe77f11fe9ab5779c27',1,'vk_mem_alloc.h']]] + ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]] ]; diff --git a/docs/html/search/pages_3.js b/docs/html/search/pages_3.js index 776e84b..125c1d3 100644 --- a/docs/html/search/pages_3.js +++ b/docs/html/search/pages_3.js @@ -1,4 +1,4 @@ var searchData= [ - ['persistently_20mapped_20memory',['Persistently mapped memory',['../persistently_mapped_memory.html',1,'index']]] + ['memory_20mapping',['Memory mapping',['../memory_mapping.html',1,'index']]] ]; diff --git a/docs/html/search/searchdata.js b/docs/html/search/searchdata.js index d2a7116..6ea3f6a 100644 --- a/docs/html/search/searchdata.js +++ b/docs/html/search/searchdata.js @@ -9,7 +9,7 @@ var indexSectionsWithContent = 6: "v", 7: "v", 8: "v", - 9: "cdlpqtv" + 9: "cdlmqtv" }; var indexSectionNames = diff --git a/docs/html/struct_vma_allocation_info.html b/docs/html/struct_vma_allocation_info.html index 028a086..52cb2f8 100644 --- a/docs/html/struct_vma_allocation_info.html +++ b/docs/html/struct_vma_allocation_info.html @@ -86,7 +86,7 @@ Public Attributes  Size of this allocation, in bytes. More...
        void * pMappedData - Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistently mapped. More...
      + Pointer to the beginning of this allocation as mapped data. More...
        void * pUserData  Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). More...
      @@ -160,8 +160,9 @@ Public Attributes
    -

    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistently mapped.

    -

    It can change after call to vmaUnmapPersistentlyMappedMemory(), vmaMapPersistentlyMappedMemory(). It can also change after call to vmaDefragment() if this allocation is passed to the function.

    +

    Pointer to the beginning of this allocation as mapped data.

    +

    If the allocation hasn't been mapped using vmaMapMemory() and hasn't been created with VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null.

    +

    It can change after call to vmaMapMemory(), vmaUnmapMemory(). It can also change after call to vmaDefragment() if this allocation is passed to the function.

    diff --git a/docs/html/thread_safety.html b/docs/html/thread_safety.html index 6c5aee7..8585ce0 100644 --- a/docs/html/thread_safety.html +++ b/docs/html/thread_safety.html @@ -70,7 +70,7 @@ $(function() {
  • The library has no global state, so separate VmaAllocator objects can be used independently.
  • By default, all calls to functions that take VmaAllocator as first parameter are safe to call from multiple threads simultaneously because they are synchronized internally when needed.
  • When the allocator is created with VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT flag, calls to functions that take such VmaAllocator object must be synchronized externally.
  • -
  • Access to a VmaAllocation object must be externally synchronized. For example, you must not call vmaGetAllocationInfo() and vmaDefragment() from different threads at the same time if you pass the same VmaAllocation object to these functions.
  • +
  • Access to a VmaAllocation object must be externally synchronized. For example, you must not call vmaGetAllocationInfo() and vmaMapMemory() from different threads at the same time if you pass the same VmaAllocation object to these functions.
  • diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index b160f3b..9fadafa 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -192,7 +192,7 @@ Enumerations enum  VmaAllocationCreateFlagBits {
      VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, -VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT = 0x00000004, +VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008,
      VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, @@ -201,8 +201,7 @@ Enumerations } Flags to be passed as VmaAllocationCreateInfo::flags. More...
      -enum  VmaPoolCreateFlagBits { VMA_POOL_CREATE_PERSISTENT_MAP_BIT = 0x00000001, -VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, +enum  VmaPoolCreateFlagBits { VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } Flags to be passed as VmaPoolCreateInfo::flags. More...
    @@ -269,15 +268,11 @@ Functions  Creates new allocation that is in lost state from the beginning. More...
      VkResult vmaMapMemory (VmaAllocator allocator, VmaAllocation allocation, void **ppData) + Maps memory represented by given allocation and returns pointer to it. More...
      void vmaUnmapMemory (VmaAllocator allocator, VmaAllocation allocation) + Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). More...
      -void vmaUnmapPersistentlyMappedMemory (VmaAllocator allocator) - Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL. More...
    -  -VkResult vmaMapPersistentlyMappedMemory (VmaAllocator allocator) - Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL. More...
    -  VkResult vmaDefragment (VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)  Compacts memory by moving allocations. More...
      @@ -647,14 +642,15 @@ Functions

    You should not use VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.

    If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored.

    -VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT 

    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.

    -

    Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. You cannot map the memory on your own as multiple mappings of a single VkDeviceMemory are illegal.

    -

    If VmaAllocationCreateInfo::pool is not null, usage of this flag must match usage of flag VMA_POOL_CREATE_PERSISTENT_MAP_BIT used during pool creation.

    -

    Is it valid to use this flag for allocation made from memory type that is not HOST_VISIBLE. This flag is then ignored and memory is not mapped. This is useful if you need an allocation that is efficient to use on GPU (DEVICE_LOCAL) and still want to map it directly if possible on platforms that support it (e.g. Intel GPU).

    +VMA_ALLOCATION_CREATE_MAPPED_BIT 

    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.

    +

    Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.

    +

    Is it valid to use this flag for allocation made from memory type that is not HOST_VISIBLE. This flag is then ignored and memory is not mapped. This is useful if you need an allocation that is efficient to use on GPU (DEVICE_LOCAL) and still want to map it directly if possible on platforms that support it (e.g. Intel GPU).

    +

    You should not use this flag together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT.

    VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT 

    Allocation created with this flag can become lost as a result of another allocation with VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you must check it before use.

    To check if allocation is not lost, call vmaGetAllocationInfo() and check if VmaAllocationInfo::deviceMemory is not VK_NULL_HANDLE.

    -

    For details about supporting lost allocations, see Lost Allocations chapter of User Guide on Main Page.

    +

    For details about supporting lost allocations, see Lost Allocations chapter of User Guide on Main Page.

    +

    You should not use this flag together with VMA_ALLOCATION_CREATE_MAPPED_BIT.

    VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT 

    While creating allocation using this flag, other allocations that were created with flag VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost.

    For details about supporting lost allocations, see Lost Allocations chapter of User Guide on Main Page.

    @@ -743,11 +739,7 @@ Functions

    Flags to be passed as VmaPoolCreateInfo::flags.

    - -
    • Only allocations made in memory types that have VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT flag can be compacted. You may pass other allocations but it makes no sense - these will never be moved.
    • You may pass allocations made with VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT but it makes no sense - they will never be moved.
    • -
    • Both allocations made with or without VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT flag can be compacted. If not persistently mapped, memory will be mapped temporarily inside this function if needed, so it shouldn't be mapped by you for the time of this call.
    • +
    • Both allocations made with or without VMA_ALLOCATION_CREATE_MAPPED_BIT flag can be compacted. If not persistently mapped, memory will be mapped temporarily inside this function if needed.
    • You must not pass same VmaAllocation object multiple times in pAllocations array.

    The function also frees empty VkDeviceMemory blocks.

    After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or VmaAllocationInfo::offset changes. You must query them again using vmaGetAllocationInfo() if you need them.

    If an allocation has been moved, data in memory is copied to new place automatically, but if it was bound to a buffer or an image, you must destroy that object yourself, create new one and bind it to the new memory pointed by the allocation. You must use vkDestroyBuffer(), vkDestroyImage(), vkCreateBuffer(), vkCreateImage() for that purpose and NOT vmaDestroyBuffer(), vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage()! Example:

    -
    VkDevice device = ...;
    VmaAllocator allocator = ...;
    std::vector<VkBuffer> buffers = ...;
    std::vector<VmaAllocation> allocations = ...;
    std::vector<VkBool32> allocationsChanged(allocations.size());
    vmaDefragment(allocator, allocations.data(), allocations.size(), allocationsChanged.data(), nullptr, nullptr);
    for(size_t i = 0; i < allocations.size(); ++i)
    {
    if(allocationsChanged[i])
    {
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vkDestroyBuffer(device, buffers[i], nullptr);
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
    }
    }

    This function may be time-consuming, so you shouldn't call it too often (like every frame or after every resource creation/destruction), but rater you can call it on special occasions (like when reloading a game level, when you just destroyed a lot of objects).

    +
    VkDevice device = ...;
    VmaAllocator allocator = ...;
    std::vector<VkBuffer> buffers = ...;
    std::vector<VmaAllocation> allocations = ...;
    std::vector<VkBool32> allocationsChanged(allocations.size());
    vmaDefragment(allocator, allocations.data(), allocations.size(), allocationsChanged.data(), nullptr, nullptr);
    for(size_t i = 0; i < allocations.size(); ++i)
    {
    if(allocationsChanged[i])
    {
    VmaAllocationInfo allocInfo;
    vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
    vkDestroyBuffer(device, buffers[i], nullptr);
    VkBufferCreateInfo bufferInfo = ...;
    vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
    // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
    vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
    }
    }

    Warning! This function is not correct according to Vulkan specification. Use it at your own risk. That's becuase Vulkan doesn't guarantee that memory requirements (size and alignment) for a new buffer or image are consistent. They may be different even for subsequent calls with the same parameters. It really does happen on some platforms, especially with images.

    +

    This function may be time-consuming, so you shouldn't call it too often (like every frame or after every resource creation/destruction), but rater you can call it on special occasions (like when reloading a game level, when you just destroyed a lot of objects).

    @@ -1777,30 +1770,15 @@ Functions
    Enumerator
    VMA_POOL_CREATE_PERSISTENT_MAP_BIT 

    Set this flag to use a memory that will be persistently mapped.

    -

    Each allocation made from this pool will have VmaAllocationInfo::pMappedData available.

    -

    Usage of this flag must match usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT flag for every allocation made from this pool.

    -
    VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT 

    Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.

    +
    Enumerator
    VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT 

    Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.

    This is na optional optimization flag.

    If you always allocate using vmaCreateBuffer(), vmaCreateImage(), vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator knows exact type of your allocations so it can handle Buffer-Image Granularity in the optimal way.

    If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), exact type of such allocations is not known, so allocator must be conservative in handling Buffer-Image Granularity, which can lead to suboptimal allocation (wasted memory). In that case, if you can make sure you always allocate only buffers and linear images or only optimal images out of this pool, use this flag to make allocator disregard Buffer-Image Granularity and so make allocations more optimal.

    @@ -1285,13 +1277,14 @@ Functions
    -

    Feel free to use vkMapMemory on these memory blocks on you own if you want, but just for convenience and to make sure correct offset and size is always specified, usage of vmaMapMemory() / vmaUnmapMemory() is recommended.

    -

    Do not use it on memory allocated with VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT as multiple maps to same VkDeviceMemory is illegal.

    -
    - - -

    ◆ vmaMapPersistentlyMappedMemory()

    - -
    -
    - - - - - - - - -
    VkResult vmaMapPersistentlyMappedMemory (VmaAllocator allocator)
    -
    - -

    Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.

    -

    See vmaUnmapPersistentlyMappedMemory().

    -

    After this call VmaAllocationInfo::pMappedData of some allocation may have value different than before calling vmaUnmapPersistentlyMappedMemory().

    +

    Maps memory represented by given allocation and returns pointer to it.

    +

    Maps memory represented by given allocation to make it accessible to CPU code. When succeeded, *ppData contains pointer to first byte of this memory. If the allocation is part of bigger VkDeviceMemory block, the pointer is correctly offseted to the beginning of region assigned to this particular allocation.

    +

    Mapping is internally reference-counted and synchronized, so despite raw Vulkan function vkMapMemory() cannot be used to map same block of VkDeviceMemory multiple times simultaneously, it is safe to call this function on allocations assigned to the same memory block. Actual Vulkan memory will be mapped on first mapping and unmapped on last unmapping.

    +

    If the function succeeded, you must call vmaUnmapMemory() to unmap the allocation when mapping is no longer needed or before freeing the allocation, at the latest.

    +

    It also safe to call this function multiple times on the same allocation. You must call vmaUnmapMemory() same number of times as you called vmaMapMemory().

    +

    It is also safe to call this function on allocation created with VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. You must still call vmaUnmapMemory() same number of times as you called vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the "0-th" mapping made automatically due to VMA_ALLOCATION_CREATE_MAPPED_BIT flag.

    +

    This function fails when used on allocation made in memory type that is not HOST_VISIBLE.

    +

    This function always fails when called for allocation that was created with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be mapped.

    @@ -1897,28 +1875,8 @@ Functions
    -
    - - -

    ◆ vmaUnmapPersistentlyMappedMemory()

    - -
    -
    - - - - - - - - -
    void vmaUnmapPersistentlyMappedMemory (VmaAllocator allocator)
    -
    - -

    Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.

    -

    This is optional performance optimization. On AMD GPUs on Windows, Vulkan memory from the type that has both DEVICE_LOCAL and HOST_VISIBLE flags should not be mapped for the time of any call to vkQueueSubmit() or vkQueuePresent(). Although legal, that would cause performance degradation because WDDM migrates such memory to system RAM. To ensure this, you can unmap all persistently mapped memory using this function. Example:

    -
    vkQueueSubmit(...)

    After this call VmaAllocationInfo::pMappedData of some allocations may become null.

    -

    This call is reference-counted. Memory is mapped again after you call vmaMapPersistentlyMappedMemory() same number of times that you called vmaUnmapPersistentlyMappedMemory().

    +

    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().

    +

    For details, see description of vmaMapMemory().

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 2ca8750..529e33c 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,156 +62,153 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    492 #include <vulkan/vulkan.h>
    493 
    494 VK_DEFINE_HANDLE(VmaAllocator)
    495 
    496 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    498  VmaAllocator allocator,
    499  uint32_t memoryType,
    500  VkDeviceMemory memory,
    501  VkDeviceSize size);
    503 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    504  VmaAllocator allocator,
    505  uint32_t memoryType,
    506  VkDeviceMemory memory,
    507  VkDeviceSize size);
    508 
    516 typedef struct VmaDeviceMemoryCallbacks {
    522 
    558 
    561 typedef VkFlags VmaAllocatorCreateFlags;
    562 
    567 typedef struct VmaVulkanFunctions {
    568  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    569  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    570  PFN_vkAllocateMemory vkAllocateMemory;
    571  PFN_vkFreeMemory vkFreeMemory;
    572  PFN_vkMapMemory vkMapMemory;
    573  PFN_vkUnmapMemory vkUnmapMemory;
    574  PFN_vkBindBufferMemory vkBindBufferMemory;
    575  PFN_vkBindImageMemory vkBindImageMemory;
    576  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    577  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    578  PFN_vkCreateBuffer vkCreateBuffer;
    579  PFN_vkDestroyBuffer vkDestroyBuffer;
    580  PFN_vkCreateImage vkCreateImage;
    581  PFN_vkDestroyImage vkDestroyImage;
    582  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    583  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    585 
    588 {
    590  VmaAllocatorCreateFlags flags;
    592 
    593  VkPhysicalDevice physicalDevice;
    595 
    596  VkDevice device;
    598 
    601 
    604 
    605  const VkAllocationCallbacks* pAllocationCallbacks;
    607 
    622  uint32_t frameInUseCount;
    640  const VkDeviceSize* pHeapSizeLimit;
    654 
    656 VkResult vmaCreateAllocator(
    657  const VmaAllocatorCreateInfo* pCreateInfo,
    658  VmaAllocator* pAllocator);
    659 
    662  VmaAllocator allocator);
    663 
    669  VmaAllocator allocator,
    670  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    671 
    677  VmaAllocator allocator,
    678  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    679 
    687  VmaAllocator allocator,
    688  uint32_t memoryTypeIndex,
    689  VkMemoryPropertyFlags* pFlags);
    690 
    700  VmaAllocator allocator,
    701  uint32_t frameIndex);
    702 
    705 typedef struct VmaStatInfo
    706 {
    708  uint32_t blockCount;
    710  uint32_t allocationCount;
    714  VkDeviceSize usedBytes;
    716  VkDeviceSize unusedBytes;
    717  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    718  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    719 } VmaStatInfo;
    720 
    722 typedef struct VmaStats
    723 {
    724  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    725  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    727 } VmaStats;
    728 
    730 void vmaCalculateStats(
    731  VmaAllocator allocator,
    732  VmaStats* pStats);
    733 
    734 #define VMA_STATS_STRING_ENABLED 1
    735 
    736 #if VMA_STATS_STRING_ENABLED
    737 
    739 
    742  VmaAllocator allocator,
    743  char** ppStatsString,
    744  VkBool32 detailedMap);
    745 
    746 void vmaFreeStatsString(
    747  VmaAllocator allocator,
    748  char* pStatsString);
    749 
    750 #endif // #if VMA_STATS_STRING_ENABLED
    751 
    752 VK_DEFINE_HANDLE(VmaPool)
    753 
    754 typedef enum VmaMemoryUsage
    755 {
    761 
    764 
    767 
    771 
    786 
    831 
    834 typedef VkFlags VmaAllocationCreateFlags;
    835 
    837 {
    839  VmaAllocationCreateFlags flags;
    850  VkMemoryPropertyFlags requiredFlags;
    856  VkMemoryPropertyFlags preferredFlags;
    858  void* pUserData;
    863  VmaPool pool;
    865 
    880 VkResult vmaFindMemoryTypeIndex(
    881  VmaAllocator allocator,
    882  uint32_t memoryTypeBits,
    883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    884  uint32_t* pMemoryTypeIndex);
    885 
    887 typedef enum VmaPoolCreateFlagBits {
    915 
    918 typedef VkFlags VmaPoolCreateFlags;
    919 
    922 typedef struct VmaPoolCreateInfo {
    925  uint32_t memoryTypeIndex;
    928  VmaPoolCreateFlags flags;
    933  VkDeviceSize blockSize;
    960  uint32_t frameInUseCount;
    962 
    965 typedef struct VmaPoolStats {
    968  VkDeviceSize size;
    971  VkDeviceSize unusedSize;
    984  VkDeviceSize unusedRangeSizeMax;
    985 } VmaPoolStats;
    986 
    993 VkResult vmaCreatePool(
    994  VmaAllocator allocator,
    995  const VmaPoolCreateInfo* pCreateInfo,
    996  VmaPool* pPool);
    997 
    1000 void vmaDestroyPool(
    1001  VmaAllocator allocator,
    1002  VmaPool pool);
    1003 
    1010 void vmaGetPoolStats(
    1011  VmaAllocator allocator,
    1012  VmaPool pool,
    1013  VmaPoolStats* pPoolStats);
    1014 
    1022  VmaAllocator allocator,
    1023  VmaPool pool,
    1024  size_t* pLostAllocationCount);
    1025 
    1026 VK_DEFINE_HANDLE(VmaAllocation)
    1027 
    1028 
    1030 typedef struct VmaAllocationInfo {
    1035  uint32_t memoryType;
    1044  VkDeviceMemory deviceMemory;
    1049  VkDeviceSize offset;
    1054  VkDeviceSize size;
    1065  void* pUserData;
    1067 
    1078 VkResult vmaAllocateMemory(
    1079  VmaAllocator allocator,
    1080  const VkMemoryRequirements* pVkMemoryRequirements,
    1081  const VmaAllocationCreateInfo* pCreateInfo,
    1082  VmaAllocation* pAllocation,
    1083  VmaAllocationInfo* pAllocationInfo);
    1084 
    1092  VmaAllocator allocator,
    1093  VkBuffer buffer,
    1094  const VmaAllocationCreateInfo* pCreateInfo,
    1095  VmaAllocation* pAllocation,
    1096  VmaAllocationInfo* pAllocationInfo);
    1097 
    1099 VkResult vmaAllocateMemoryForImage(
    1100  VmaAllocator allocator,
    1101  VkImage image,
    1102  const VmaAllocationCreateInfo* pCreateInfo,
    1103  VmaAllocation* pAllocation,
    1104  VmaAllocationInfo* pAllocationInfo);
    1105 
    1107 void vmaFreeMemory(
    1108  VmaAllocator allocator,
    1109  VmaAllocation allocation);
    1110 
    1113  VmaAllocator allocator,
    1114  VmaAllocation allocation,
    1115  VmaAllocationInfo* pAllocationInfo);
    1116 
    1119  VmaAllocator allocator,
    1120  VmaAllocation allocation,
    1121  void* pUserData);
    1122 
    1134  VmaAllocator allocator,
    1135  VmaAllocation* pAllocation);
    1136 
    1145 VkResult vmaMapMemory(
    1146  VmaAllocator allocator,
    1147  VmaAllocation allocation,
    1148  void** ppData);
    1149 
    1150 void vmaUnmapMemory(
    1151  VmaAllocator allocator,
    1152  VmaAllocation allocation);
    1153 
    1176 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1177 
    1185 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1186 
    1188 typedef struct VmaDefragmentationInfo {
    1193  VkDeviceSize maxBytesToMove;
    1200 
    1202 typedef struct VmaDefragmentationStats {
    1204  VkDeviceSize bytesMoved;
    1206  VkDeviceSize bytesFreed;
    1212 
    1284 VkResult vmaDefragment(
    1285  VmaAllocator allocator,
    1286  VmaAllocation* pAllocations,
    1287  size_t allocationCount,
    1288  VkBool32* pAllocationsChanged,
    1289  const VmaDefragmentationInfo *pDefragmentationInfo,
    1290  VmaDefragmentationStats* pDefragmentationStats);
    1291 
    1318 VkResult vmaCreateBuffer(
    1319  VmaAllocator allocator,
    1320  const VkBufferCreateInfo* pBufferCreateInfo,
    1321  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1322  VkBuffer* pBuffer,
    1323  VmaAllocation* pAllocation,
    1324  VmaAllocationInfo* pAllocationInfo);
    1325 
    1337 void vmaDestroyBuffer(
    1338  VmaAllocator allocator,
    1339  VkBuffer buffer,
    1340  VmaAllocation allocation);
    1341 
    1343 VkResult vmaCreateImage(
    1344  VmaAllocator allocator,
    1345  const VkImageCreateInfo* pImageCreateInfo,
    1346  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1347  VkImage* pImage,
    1348  VmaAllocation* pAllocation,
    1349  VmaAllocationInfo* pAllocationInfo);
    1350 
    1362 void vmaDestroyImage(
    1363  VmaAllocator allocator,
    1364  VkImage image,
    1365  VmaAllocation allocation);
    1366 
    1367 #ifdef __cplusplus
    1368 }
    1369 #endif
    1370 
    1371 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1372 
    1373 // For Visual Studio IntelliSense.
    1374 #ifdef __INTELLISENSE__
    1375 #define VMA_IMPLEMENTATION
    1376 #endif
    1377 
    1378 #ifdef VMA_IMPLEMENTATION
    1379 #undef VMA_IMPLEMENTATION
    1380 
    1381 #include <cstdint>
    1382 #include <cstdlib>
    1383 #include <cstring>
    1384 
    1385 /*******************************************************************************
    1386 CONFIGURATION SECTION
    1387 
    1388 Define some of these macros before each #include of this header or change them
    1389 here if you need other then default behavior depending on your environment.
    1390 */
    1391 
    1392 /*
    1393 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1394 internally, like:
    1395 
    1396  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1397 
    1398 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1399 VmaAllocatorCreateInfo::pVulkanFunctions.
    1400 */
    1401 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1402 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1403 #endif
    1404 
    1405 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1406 //#define VMA_USE_STL_CONTAINERS 1
    1407 
    1408 /* Set this macro to 1 to make the library including and using STL containers:
    1409 std::pair, std::vector, std::list, std::unordered_map.
    1410 
    1411 Set it to 0 or undefined to make the library using its own implementation of
    1412 the containers.
    1413 */
    1414 #if VMA_USE_STL_CONTAINERS
    1415  #define VMA_USE_STL_VECTOR 1
    1416  #define VMA_USE_STL_UNORDERED_MAP 1
    1417  #define VMA_USE_STL_LIST 1
    1418 #endif
    1419 
    1420 #if VMA_USE_STL_VECTOR
    1421  #include <vector>
    1422 #endif
    1423 
    1424 #if VMA_USE_STL_UNORDERED_MAP
    1425  #include <unordered_map>
    1426 #endif
    1427 
    1428 #if VMA_USE_STL_LIST
    1429  #include <list>
    1430 #endif
    1431 
    1432 /*
    1433 Following headers are used in this CONFIGURATION section only, so feel free to
    1434 remove them if not needed.
    1435 */
    1436 #include <cassert> // for assert
    1437 #include <algorithm> // for min, max
    1438 #include <mutex> // for std::mutex
    1439 #include <atomic> // for std::atomic
    1440 
    1441 #if !defined(_WIN32)
    1442  #include <malloc.h> // for aligned_alloc()
    1443 #endif
    1444 
    1445 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1446 #ifndef VMA_ASSERT
    1447  #ifdef _DEBUG
    1448  #define VMA_ASSERT(expr) assert(expr)
    1449  #else
    1450  #define VMA_ASSERT(expr)
    1451  #endif
    1452 #endif
    1453 
    1454 // Assert that will be called very often, like inside data structures e.g. operator[].
    1455 // Making it non-empty can make program slow.
    1456 #ifndef VMA_HEAVY_ASSERT
    1457  #ifdef _DEBUG
    1458  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1459  #else
    1460  #define VMA_HEAVY_ASSERT(expr)
    1461  #endif
    1462 #endif
    1463 
    1464 #ifndef VMA_NULL
    1465  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1466  #define VMA_NULL nullptr
    1467 #endif
    1468 
    1469 #ifndef VMA_ALIGN_OF
    1470  #define VMA_ALIGN_OF(type) (__alignof(type))
    1471 #endif
    1472 
    1473 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1474  #if defined(_WIN32)
    1475  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1476  #else
    1477  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1478  #endif
    1479 #endif
    1480 
    1481 #ifndef VMA_SYSTEM_FREE
    1482  #if defined(_WIN32)
    1483  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1484  #else
    1485  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef VMA_MIN
    1490  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1491 #endif
    1492 
    1493 #ifndef VMA_MAX
    1494  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1495 #endif
    1496 
    1497 #ifndef VMA_SWAP
    1498  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1499 #endif
    1500 
    1501 #ifndef VMA_SORT
    1502  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1503 #endif
    1504 
    1505 #ifndef VMA_DEBUG_LOG
    1506  #define VMA_DEBUG_LOG(format, ...)
    1507  /*
    1508  #define VMA_DEBUG_LOG(format, ...) do { \
    1509  printf(format, __VA_ARGS__); \
    1510  printf("\n"); \
    1511  } while(false)
    1512  */
    1513 #endif
    1514 
    1515 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1516 #if VMA_STATS_STRING_ENABLED
    1517  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1518  {
    1519  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1520  }
    1521  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1522  {
    1523  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1524  }
    1525  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1526  {
    1527  snprintf(outStr, strLen, "%p", ptr);
    1528  }
    1529 #endif
    1530 
    1531 #ifndef VMA_MUTEX
    1532  class VmaMutex
    1533  {
    1534  public:
    1535  VmaMutex() { }
    1536  ~VmaMutex() { }
    1537  void Lock() { m_Mutex.lock(); }
    1538  void Unlock() { m_Mutex.unlock(); }
    1539  private:
    1540  std::mutex m_Mutex;
    1541  };
    1542  #define VMA_MUTEX VmaMutex
    1543 #endif
    1544 
    1545 /*
    1546 If providing your own implementation, you need to implement a subset of std::atomic:
    1547 
    1548 - Constructor(uint32_t desired)
    1549 - uint32_t load() const
    1550 - void store(uint32_t desired)
    1551 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1552 */
    1553 #ifndef VMA_ATOMIC_UINT32
    1554  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1555 #endif
    1556 
    1557 #ifndef VMA_BEST_FIT
    1558 
    1570  #define VMA_BEST_FIT (1)
    1571 #endif
    1572 
    1573 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1574 
    1578  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1579 #endif
    1580 
    1581 #ifndef VMA_DEBUG_ALIGNMENT
    1582 
    1586  #define VMA_DEBUG_ALIGNMENT (1)
    1587 #endif
    1588 
    1589 #ifndef VMA_DEBUG_MARGIN
    1590 
    1594  #define VMA_DEBUG_MARGIN (0)
    1595 #endif
    1596 
    1597 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1598 
    1602  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1603 #endif
    1604 
    1605 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1606 
    1610  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1611 #endif
    1612 
    1613 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1614  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1616 #endif
    1617 
    1618 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1619  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1621 #endif
    1622 
    1623 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1624  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1626 #endif
    1627 
    1628 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1629 
    1630 /*******************************************************************************
    1631 END OF CONFIGURATION
    1632 */
    1633 
    1634 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1635  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1636 
    1637 // Returns number of bits set to 1 in (v).
    1638 static inline uint32_t CountBitsSet(uint32_t v)
    1639 {
    1640  uint32_t c = v - ((v >> 1) & 0x55555555);
    1641  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1642  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1643  c = ((c >> 8) + c) & 0x00FF00FF;
    1644  c = ((c >> 16) + c) & 0x0000FFFF;
    1645  return c;
    1646 }
    1647 
    1648 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1649 // Use types like uint32_t, uint64_t as T.
    1650 template <typename T>
    1651 static inline T VmaAlignUp(T val, T align)
    1652 {
    1653  return (val + align - 1) / align * align;
    1654 }
    1655 
    1656 // Division with mathematical rounding to nearest number.
    1657 template <typename T>
    1658 inline T VmaRoundDiv(T x, T y)
    1659 {
    1660  return (x + (y / (T)2)) / y;
    1661 }
    1662 
    1663 #ifndef VMA_SORT
    1664 
    1665 template<typename Iterator, typename Compare>
    1666 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1667 {
    1668  Iterator centerValue = end; --centerValue;
    1669  Iterator insertIndex = beg;
    1670  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1671  {
    1672  if(cmp(*memTypeIndex, *centerValue))
    1673  {
    1674  if(insertIndex != memTypeIndex)
    1675  {
    1676  VMA_SWAP(*memTypeIndex, *insertIndex);
    1677  }
    1678  ++insertIndex;
    1679  }
    1680  }
    1681  if(insertIndex != centerValue)
    1682  {
    1683  VMA_SWAP(*insertIndex, *centerValue);
    1684  }
    1685  return insertIndex;
    1686 }
    1687 
    1688 template<typename Iterator, typename Compare>
    1689 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1690 {
    1691  if(beg < end)
    1692  {
    1693  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1694  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1695  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1696  }
    1697 }
    1698 
    1699 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1700 
    1701 #endif // #ifndef VMA_SORT
    1702 
    1703 /*
    1704 Returns true if two memory blocks occupy overlapping pages.
    1705 ResourceA must be in less memory offset than ResourceB.
    1706 
    1707 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1708 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1709 */
    1710 static inline bool VmaBlocksOnSamePage(
    1711  VkDeviceSize resourceAOffset,
    1712  VkDeviceSize resourceASize,
    1713  VkDeviceSize resourceBOffset,
    1714  VkDeviceSize pageSize)
    1715 {
    1716  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1717  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1718  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1719  VkDeviceSize resourceBStart = resourceBOffset;
    1720  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1721  return resourceAEndPage == resourceBStartPage;
    1722 }
    1723 
    1724 enum VmaSuballocationType
    1725 {
    1726  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1727  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1728  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1729  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1730  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1731  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1732  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1733 };
    1734 
    1735 /*
    1736 Returns true if given suballocation types could conflict and must respect
    1737 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1738 or linear image and another one is optimal image. If type is unknown, behave
    1739 conservatively.
    1740 */
    1741 static inline bool VmaIsBufferImageGranularityConflict(
    1742  VmaSuballocationType suballocType1,
    1743  VmaSuballocationType suballocType2)
    1744 {
    1745  if(suballocType1 > suballocType2)
    1746  {
    1747  VMA_SWAP(suballocType1, suballocType2);
    1748  }
    1749 
    1750  switch(suballocType1)
    1751  {
    1752  case VMA_SUBALLOCATION_TYPE_FREE:
    1753  return false;
    1754  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1755  return true;
    1756  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1757  return
    1758  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1759  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1760  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1761  return
    1762  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1763  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1764  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1765  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1766  return
    1767  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1768  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1769  return false;
    1770  default:
    1771  VMA_ASSERT(0);
    1772  return true;
    1773  }
    1774 }
    1775 
    1776 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1777 struct VmaMutexLock
    1778 {
    1779 public:
    1780  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1781  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1782  {
    1783  if(m_pMutex)
    1784  {
    1785  m_pMutex->Lock();
    1786  }
    1787  }
    1788 
    1789  ~VmaMutexLock()
    1790  {
    1791  if(m_pMutex)
    1792  {
    1793  m_pMutex->Unlock();
    1794  }
    1795  }
    1796 
    1797 private:
    1798  VMA_MUTEX* m_pMutex;
    1799 };
    1800 
    1801 #if VMA_DEBUG_GLOBAL_MUTEX
    1802  static VMA_MUTEX gDebugGlobalMutex;
    1803  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1804 #else
    1805  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1806 #endif
    1807 
    1808 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1809 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1810 
    1811 /*
    1812 Performs binary search and returns iterator to first element that is greater or
    1813 equal to (key), according to comparison (cmp).
    1814 
    1815 Cmp should return true if first argument is less than second argument.
    1816 
    1817 Returned value is the found element, if present in the collection or place where
    1818 new element with value (key) should be inserted.
    1819 */
    1820 template <typename IterT, typename KeyT, typename CmpT>
    1821 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1822 {
    1823  size_t down = 0, up = (end - beg);
    1824  while(down < up)
    1825  {
    1826  const size_t mid = (down + up) / 2;
    1827  if(cmp(*(beg+mid), key))
    1828  {
    1829  down = mid + 1;
    1830  }
    1831  else
    1832  {
    1833  up = mid;
    1834  }
    1835  }
    1836  return beg + down;
    1837 }
    1838 
    1840 // Memory allocation
    1841 
    1842 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1843 {
    1844  if((pAllocationCallbacks != VMA_NULL) &&
    1845  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1846  {
    1847  return (*pAllocationCallbacks->pfnAllocation)(
    1848  pAllocationCallbacks->pUserData,
    1849  size,
    1850  alignment,
    1851  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1852  }
    1853  else
    1854  {
    1855  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1856  }
    1857 }
    1858 
    1859 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1860 {
    1861  if((pAllocationCallbacks != VMA_NULL) &&
    1862  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1863  {
    1864  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1865  }
    1866  else
    1867  {
    1868  VMA_SYSTEM_FREE(ptr);
    1869  }
    1870 }
    1871 
    1872 template<typename T>
    1873 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1874 {
    1875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1876 }
    1877 
    1878 template<typename T>
    1879 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1880 {
    1881  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1882 }
    1883 
    1884 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1885 
    1886 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1887 
    1888 template<typename T>
    1889 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1890 {
    1891  ptr->~T();
    1892  VmaFree(pAllocationCallbacks, ptr);
    1893 }
    1894 
    1895 template<typename T>
    1896 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1897 {
    1898  if(ptr != VMA_NULL)
    1899  {
    1900  for(size_t i = count; i--; )
    1901  {
    1902  ptr[i].~T();
    1903  }
    1904  VmaFree(pAllocationCallbacks, ptr);
    1905  }
    1906 }
    1907 
    1908 // STL-compatible allocator.
    1909 template<typename T>
    1910 class VmaStlAllocator
    1911 {
    1912 public:
    1913  const VkAllocationCallbacks* const m_pCallbacks;
    1914  typedef T value_type;
    1915 
    1916  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1917  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1918 
    1919  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1920  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1921 
    1922  template<typename U>
    1923  bool operator==(const VmaStlAllocator<U>& rhs) const
    1924  {
    1925  return m_pCallbacks == rhs.m_pCallbacks;
    1926  }
    1927  template<typename U>
    1928  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1929  {
    1930  return m_pCallbacks != rhs.m_pCallbacks;
    1931  }
    1932 
    1933  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1934 };
    1935 
    1936 #if VMA_USE_STL_VECTOR
    1937 
    1938 #define VmaVector std::vector
    1939 
    1940 template<typename T, typename allocatorT>
    1941 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1942 {
    1943  vec.insert(vec.begin() + index, item);
    1944 }
    1945 
    1946 template<typename T, typename allocatorT>
    1947 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1948 {
    1949  vec.erase(vec.begin() + index);
    1950 }
    1951 
    1952 #else // #if VMA_USE_STL_VECTOR
    1953 
    1954 /* Class with interface compatible with subset of std::vector.
    1955 T must be POD because constructors and destructors are not called and memcpy is
    1956 used for these objects. */
    1957 template<typename T, typename AllocatorT>
    1958 class VmaVector
    1959 {
    1960 public:
    1961  typedef T value_type;
    1962 
    1963  VmaVector(const AllocatorT& allocator) :
    1964  m_Allocator(allocator),
    1965  m_pArray(VMA_NULL),
    1966  m_Count(0),
    1967  m_Capacity(0)
    1968  {
    1969  }
    1970 
    1971  VmaVector(size_t count, const AllocatorT& allocator) :
    1972  m_Allocator(allocator),
    1973  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1974  m_Count(count),
    1975  m_Capacity(count)
    1976  {
    1977  }
    1978 
    1979  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1980  m_Allocator(src.m_Allocator),
    1981  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1982  m_Count(src.m_Count),
    1983  m_Capacity(src.m_Count)
    1984  {
    1985  if(m_Count != 0)
    1986  {
    1987  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1988  }
    1989  }
    1990 
    1991  ~VmaVector()
    1992  {
    1993  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1994  }
    1995 
    1996  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1997  {
    1998  if(&rhs != this)
    1999  {
    2000  resize(rhs.m_Count);
    2001  if(m_Count != 0)
    2002  {
    2003  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2004  }
    2005  }
    2006  return *this;
    2007  }
    2008 
    2009  bool empty() const { return m_Count == 0; }
    2010  size_t size() const { return m_Count; }
    2011  T* data() { return m_pArray; }
    2012  const T* data() const { return m_pArray; }
    2013 
    2014  T& operator[](size_t index)
    2015  {
    2016  VMA_HEAVY_ASSERT(index < m_Count);
    2017  return m_pArray[index];
    2018  }
    2019  const T& operator[](size_t index) const
    2020  {
    2021  VMA_HEAVY_ASSERT(index < m_Count);
    2022  return m_pArray[index];
    2023  }
    2024 
    2025  T& front()
    2026  {
    2027  VMA_HEAVY_ASSERT(m_Count > 0);
    2028  return m_pArray[0];
    2029  }
    2030  const T& front() const
    2031  {
    2032  VMA_HEAVY_ASSERT(m_Count > 0);
    2033  return m_pArray[0];
    2034  }
    2035  T& back()
    2036  {
    2037  VMA_HEAVY_ASSERT(m_Count > 0);
    2038  return m_pArray[m_Count - 1];
    2039  }
    2040  const T& back() const
    2041  {
    2042  VMA_HEAVY_ASSERT(m_Count > 0);
    2043  return m_pArray[m_Count - 1];
    2044  }
    2045 
    2046  void reserve(size_t newCapacity, bool freeMemory = false)
    2047  {
    2048  newCapacity = VMA_MAX(newCapacity, m_Count);
    2049 
    2050  if((newCapacity < m_Capacity) && !freeMemory)
    2051  {
    2052  newCapacity = m_Capacity;
    2053  }
    2054 
    2055  if(newCapacity != m_Capacity)
    2056  {
    2057  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2058  if(m_Count != 0)
    2059  {
    2060  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2061  }
    2062  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2063  m_Capacity = newCapacity;
    2064  m_pArray = newArray;
    2065  }
    2066  }
    2067 
    2068  void resize(size_t newCount, bool freeMemory = false)
    2069  {
    2070  size_t newCapacity = m_Capacity;
    2071  if(newCount > m_Capacity)
    2072  {
    2073  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2074  }
    2075  else if(freeMemory)
    2076  {
    2077  newCapacity = newCount;
    2078  }
    2079 
    2080  if(newCapacity != m_Capacity)
    2081  {
    2082  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2083  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2084  if(elementsToCopy != 0)
    2085  {
    2086  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2087  }
    2088  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2089  m_Capacity = newCapacity;
    2090  m_pArray = newArray;
    2091  }
    2092 
    2093  m_Count = newCount;
    2094  }
    2095 
    2096  void clear(bool freeMemory = false)
    2097  {
    2098  resize(0, freeMemory);
    2099  }
    2100 
    2101  void insert(size_t index, const T& src)
    2102  {
    2103  VMA_HEAVY_ASSERT(index <= m_Count);
    2104  const size_t oldCount = size();
    2105  resize(oldCount + 1);
    2106  if(index < oldCount)
    2107  {
    2108  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2109  }
    2110  m_pArray[index] = src;
    2111  }
    2112 
    2113  void remove(size_t index)
    2114  {
    2115  VMA_HEAVY_ASSERT(index < m_Count);
    2116  const size_t oldCount = size();
    2117  if(index < oldCount - 1)
    2118  {
    2119  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2120  }
    2121  resize(oldCount - 1);
    2122  }
    2123 
    2124  void push_back(const T& src)
    2125  {
    2126  const size_t newIndex = size();
    2127  resize(newIndex + 1);
    2128  m_pArray[newIndex] = src;
    2129  }
    2130 
    2131  void pop_back()
    2132  {
    2133  VMA_HEAVY_ASSERT(m_Count > 0);
    2134  resize(size() - 1);
    2135  }
    2136 
    2137  void push_front(const T& src)
    2138  {
    2139  insert(0, src);
    2140  }
    2141 
    2142  void pop_front()
    2143  {
    2144  VMA_HEAVY_ASSERT(m_Count > 0);
    2145  remove(0);
    2146  }
    2147 
    2148  typedef T* iterator;
    2149 
    2150  iterator begin() { return m_pArray; }
    2151  iterator end() { return m_pArray + m_Count; }
    2152 
    2153 private:
    2154  AllocatorT m_Allocator;
    2155  T* m_pArray;
    2156  size_t m_Count;
    2157  size_t m_Capacity;
    2158 };
    2159 
    2160 template<typename T, typename allocatorT>
    2161 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2162 {
    2163  vec.insert(index, item);
    2164 }
    2165 
    2166 template<typename T, typename allocatorT>
    2167 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2168 {
    2169  vec.remove(index);
    2170 }
    2171 
    2172 #endif // #if VMA_USE_STL_VECTOR
    2173 
    2174 template<typename CmpLess, typename VectorT>
    2175 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2176 {
    2177  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2178  vector.data(),
    2179  vector.data() + vector.size(),
    2180  value,
    2181  CmpLess()) - vector.data();
    2182  VmaVectorInsert(vector, indexToInsert, value);
    2183  return indexToInsert;
    2184 }
    2185 
    2186 template<typename CmpLess, typename VectorT>
    2187 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2188 {
    2189  CmpLess comparator;
    2190  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2191  vector.begin(),
    2192  vector.end(),
    2193  value,
    2194  comparator);
    2195  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2196  {
    2197  size_t indexToRemove = it - vector.begin();
    2198  VmaVectorRemove(vector, indexToRemove);
    2199  return true;
    2200  }
    2201  return false;
    2202 }
    2203 
    2204 template<typename CmpLess, typename VectorT>
    2205 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2206 {
    2207  CmpLess comparator;
    2208  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2209  vector.data(),
    2210  vector.data() + vector.size(),
    2211  value,
    2212  comparator);
    2213  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2214  {
    2215  return it - vector.begin();
    2216  }
    2217  else
    2218  {
    2219  return vector.size();
    2220  }
    2221 }
    2222 
    2224 // class VmaPoolAllocator
    2225 
    2226 /*
    2227 Allocator for objects of type T using a list of arrays (pools) to speed up
    2228 allocation. Number of elements that can be allocated is not bounded because
    2229 allocator can create multiple blocks.
    2230 */
    2231 template<typename T>
    2232 class VmaPoolAllocator
    2233 {
    2234 public:
    2235  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2236  ~VmaPoolAllocator();
    2237  void Clear();
    2238  T* Alloc();
    2239  void Free(T* ptr);
    2240 
    2241 private:
    2242  union Item
    2243  {
    2244  uint32_t NextFreeIndex;
    2245  T Value;
    2246  };
    2247 
    2248  struct ItemBlock
    2249  {
    2250  Item* pItems;
    2251  uint32_t FirstFreeIndex;
    2252  };
    2253 
    2254  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2255  size_t m_ItemsPerBlock;
    2256  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2257 
    2258  ItemBlock& CreateNewBlock();
    2259 };
    2260 
    2261 template<typename T>
    2262 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2263  m_pAllocationCallbacks(pAllocationCallbacks),
    2264  m_ItemsPerBlock(itemsPerBlock),
    2265  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2266 {
    2267  VMA_ASSERT(itemsPerBlock > 0);
    2268 }
    2269 
    2270 template<typename T>
    2271 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2272 {
    2273  Clear();
    2274 }
    2275 
    2276 template<typename T>
    2277 void VmaPoolAllocator<T>::Clear()
    2278 {
    2279  for(size_t i = m_ItemBlocks.size(); i--; )
    2280  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2281  m_ItemBlocks.clear();
    2282 }
    2283 
    2284 template<typename T>
    2285 T* VmaPoolAllocator<T>::Alloc()
    2286 {
    2287  for(size_t i = m_ItemBlocks.size(); i--; )
    2288  {
    2289  ItemBlock& block = m_ItemBlocks[i];
    2290  // This block has some free items: Use first one.
    2291  if(block.FirstFreeIndex != UINT32_MAX)
    2292  {
    2293  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2294  block.FirstFreeIndex = pItem->NextFreeIndex;
    2295  return &pItem->Value;
    2296  }
    2297  }
    2298 
    2299  // No block has free item: Create new one and use it.
    2300  ItemBlock& newBlock = CreateNewBlock();
    2301  Item* const pItem = &newBlock.pItems[0];
    2302  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2303  return &pItem->Value;
    2304 }
    2305 
    2306 template<typename T>
    2307 void VmaPoolAllocator<T>::Free(T* ptr)
    2308 {
    2309  // Search all memory blocks to find ptr.
    2310  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2311  {
    2312  ItemBlock& block = m_ItemBlocks[i];
    2313 
    2314  // Casting to union.
    2315  Item* pItemPtr;
    2316  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2317 
    2318  // Check if pItemPtr is in address range of this block.
    2319  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2320  {
    2321  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2322  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2323  block.FirstFreeIndex = index;
    2324  return;
    2325  }
    2326  }
    2327  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2328 }
    2329 
    2330 template<typename T>
    2331 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2332 {
    2333  ItemBlock newBlock = {
    2334  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2335 
    2336  m_ItemBlocks.push_back(newBlock);
    2337 
    2338  // Setup singly-linked list of all free items in this block.
    2339  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2340  newBlock.pItems[i].NextFreeIndex = i + 1;
    2341  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2342  return m_ItemBlocks.back();
    2343 }
    2344 
    2346 // class VmaRawList, VmaList
    2347 
    2348 #if VMA_USE_STL_LIST
    2349 
    2350 #define VmaList std::list
    2351 
    2352 #else // #if VMA_USE_STL_LIST
    2353 
    2354 template<typename T>
    2355 struct VmaListItem
    2356 {
    2357  VmaListItem* pPrev;
    2358  VmaListItem* pNext;
    2359  T Value;
    2360 };
    2361 
    2362 // Doubly linked list.
    2363 template<typename T>
    2364 class VmaRawList
    2365 {
    2366 public:
    2367  typedef VmaListItem<T> ItemType;
    2368 
    2369  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2370  ~VmaRawList();
    2371  void Clear();
    2372 
    2373  size_t GetCount() const { return m_Count; }
    2374  bool IsEmpty() const { return m_Count == 0; }
    2375 
    2376  ItemType* Front() { return m_pFront; }
    2377  const ItemType* Front() const { return m_pFront; }
    2378  ItemType* Back() { return m_pBack; }
    2379  const ItemType* Back() const { return m_pBack; }
    2380 
    2381  ItemType* PushBack();
    2382  ItemType* PushFront();
    2383  ItemType* PushBack(const T& value);
    2384  ItemType* PushFront(const T& value);
    2385  void PopBack();
    2386  void PopFront();
    2387 
    2388  // Item can be null - it means PushBack.
    2389  ItemType* InsertBefore(ItemType* pItem);
    2390  // Item can be null - it means PushFront.
    2391  ItemType* InsertAfter(ItemType* pItem);
    2392 
    2393  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2394  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2395 
    2396  void Remove(ItemType* pItem);
    2397 
    2398 private:
    2399  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2400  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2401  ItemType* m_pFront;
    2402  ItemType* m_pBack;
    2403  size_t m_Count;
    2404 
    2405  // Declared not defined, to block copy constructor and assignment operator.
    2406  VmaRawList(const VmaRawList<T>& src);
    2407  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2408 };
    2409 
    2410 template<typename T>
    2411 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2412  m_pAllocationCallbacks(pAllocationCallbacks),
    2413  m_ItemAllocator(pAllocationCallbacks, 128),
    2414  m_pFront(VMA_NULL),
    2415  m_pBack(VMA_NULL),
    2416  m_Count(0)
    2417 {
    2418 }
    2419 
    2420 template<typename T>
    2421 VmaRawList<T>::~VmaRawList()
    2422 {
    2423  // Intentionally not calling Clear, because that would be unnecessary
    2424  // computations to return all items to m_ItemAllocator as free.
    2425 }
    2426 
    2427 template<typename T>
    2428 void VmaRawList<T>::Clear()
    2429 {
    2430  if(IsEmpty() == false)
    2431  {
    2432  ItemType* pItem = m_pBack;
    2433  while(pItem != VMA_NULL)
    2434  {
    2435  ItemType* const pPrevItem = pItem->pPrev;
    2436  m_ItemAllocator.Free(pItem);
    2437  pItem = pPrevItem;
    2438  }
    2439  m_pFront = VMA_NULL;
    2440  m_pBack = VMA_NULL;
    2441  m_Count = 0;
    2442  }
    2443 }
    2444 
    2445 template<typename T>
    2446 VmaListItem<T>* VmaRawList<T>::PushBack()
    2447 {
    2448  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2449  pNewItem->pNext = VMA_NULL;
    2450  if(IsEmpty())
    2451  {
    2452  pNewItem->pPrev = VMA_NULL;
    2453  m_pFront = pNewItem;
    2454  m_pBack = pNewItem;
    2455  m_Count = 1;
    2456  }
    2457  else
    2458  {
    2459  pNewItem->pPrev = m_pBack;
    2460  m_pBack->pNext = pNewItem;
    2461  m_pBack = pNewItem;
    2462  ++m_Count;
    2463  }
    2464  return pNewItem;
    2465 }
    2466 
    2467 template<typename T>
    2468 VmaListItem<T>* VmaRawList<T>::PushFront()
    2469 {
    2470  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2471  pNewItem->pPrev = VMA_NULL;
    2472  if(IsEmpty())
    2473  {
    2474  pNewItem->pNext = VMA_NULL;
    2475  m_pFront = pNewItem;
    2476  m_pBack = pNewItem;
    2477  m_Count = 1;
    2478  }
    2479  else
    2480  {
    2481  pNewItem->pNext = m_pFront;
    2482  m_pFront->pPrev = pNewItem;
    2483  m_pFront = pNewItem;
    2484  ++m_Count;
    2485  }
    2486  return pNewItem;
    2487 }
    2488 
    2489 template<typename T>
    2490 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2491 {
    2492  ItemType* const pNewItem = PushBack();
    2493  pNewItem->Value = value;
    2494  return pNewItem;
    2495 }
    2496 
    2497 template<typename T>
    2498 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2499 {
    2500  ItemType* const pNewItem = PushFront();
    2501  pNewItem->Value = value;
    2502  return pNewItem;
    2503 }
    2504 
    2505 template<typename T>
    2506 void VmaRawList<T>::PopBack()
    2507 {
    2508  VMA_HEAVY_ASSERT(m_Count > 0);
    2509  ItemType* const pBackItem = m_pBack;
    2510  ItemType* const pPrevItem = pBackItem->pPrev;
    2511  if(pPrevItem != VMA_NULL)
    2512  {
    2513  pPrevItem->pNext = VMA_NULL;
    2514  }
    2515  m_pBack = pPrevItem;
    2516  m_ItemAllocator.Free(pBackItem);
    2517  --m_Count;
    2518 }
    2519 
    2520 template<typename T>
    2521 void VmaRawList<T>::PopFront()
    2522 {
    2523  VMA_HEAVY_ASSERT(m_Count > 0);
    2524  ItemType* const pFrontItem = m_pFront;
    2525  ItemType* const pNextItem = pFrontItem->pNext;
    2526  if(pNextItem != VMA_NULL)
    2527  {
    2528  pNextItem->pPrev = VMA_NULL;
    2529  }
    2530  m_pFront = pNextItem;
    2531  m_ItemAllocator.Free(pFrontItem);
    2532  --m_Count;
    2533 }
    2534 
    2535 template<typename T>
    2536 void VmaRawList<T>::Remove(ItemType* pItem)
    2537 {
    2538  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2539  VMA_HEAVY_ASSERT(m_Count > 0);
    2540 
    2541  if(pItem->pPrev != VMA_NULL)
    2542  {
    2543  pItem->pPrev->pNext = pItem->pNext;
    2544  }
    2545  else
    2546  {
    2547  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2548  m_pFront = pItem->pNext;
    2549  }
    2550 
    2551  if(pItem->pNext != VMA_NULL)
    2552  {
    2553  pItem->pNext->pPrev = pItem->pPrev;
    2554  }
    2555  else
    2556  {
    2557  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2558  m_pBack = pItem->pPrev;
    2559  }
    2560 
    2561  m_ItemAllocator.Free(pItem);
    2562  --m_Count;
    2563 }
    2564 
    2565 template<typename T>
    2566 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2567 {
    2568  if(pItem != VMA_NULL)
    2569  {
    2570  ItemType* const prevItem = pItem->pPrev;
    2571  ItemType* const newItem = m_ItemAllocator.Alloc();
    2572  newItem->pPrev = prevItem;
    2573  newItem->pNext = pItem;
    2574  pItem->pPrev = newItem;
    2575  if(prevItem != VMA_NULL)
    2576  {
    2577  prevItem->pNext = newItem;
    2578  }
    2579  else
    2580  {
    2581  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2582  m_pFront = newItem;
    2583  }
    2584  ++m_Count;
    2585  return newItem;
    2586  }
    2587  else
    2588  return PushBack();
    2589 }
    2590 
    2591 template<typename T>
    2592 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2593 {
    2594  if(pItem != VMA_NULL)
    2595  {
    2596  ItemType* const nextItem = pItem->pNext;
    2597  ItemType* const newItem = m_ItemAllocator.Alloc();
    2598  newItem->pNext = nextItem;
    2599  newItem->pPrev = pItem;
    2600  pItem->pNext = newItem;
    2601  if(nextItem != VMA_NULL)
    2602  {
    2603  nextItem->pPrev = newItem;
    2604  }
    2605  else
    2606  {
    2607  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2608  m_pBack = newItem;
    2609  }
    2610  ++m_Count;
    2611  return newItem;
    2612  }
    2613  else
    2614  return PushFront();
    2615 }
    2616 
    2617 template<typename T>
    2618 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2619 {
    2620  ItemType* const newItem = InsertBefore(pItem);
    2621  newItem->Value = value;
    2622  return newItem;
    2623 }
    2624 
    2625 template<typename T>
    2626 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2627 {
    2628  ItemType* const newItem = InsertAfter(pItem);
    2629  newItem->Value = value;
    2630  return newItem;
    2631 }
    2632 
    2633 template<typename T, typename AllocatorT>
    2634 class VmaList
    2635 {
    2636 public:
    2637  class iterator
    2638  {
    2639  public:
    2640  iterator() :
    2641  m_pList(VMA_NULL),
    2642  m_pItem(VMA_NULL)
    2643  {
    2644  }
    2645 
    2646  T& operator*() const
    2647  {
    2648  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2649  return m_pItem->Value;
    2650  }
    2651  T* operator->() const
    2652  {
    2653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2654  return &m_pItem->Value;
    2655  }
    2656 
    2657  iterator& operator++()
    2658  {
    2659  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2660  m_pItem = m_pItem->pNext;
    2661  return *this;
    2662  }
    2663  iterator& operator--()
    2664  {
    2665  if(m_pItem != VMA_NULL)
    2666  {
    2667  m_pItem = m_pItem->pPrev;
    2668  }
    2669  else
    2670  {
    2671  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2672  m_pItem = m_pList->Back();
    2673  }
    2674  return *this;
    2675  }
    2676 
    2677  iterator operator++(int)
    2678  {
    2679  iterator result = *this;
    2680  ++*this;
    2681  return result;
    2682  }
    2683  iterator operator--(int)
    2684  {
    2685  iterator result = *this;
    2686  --*this;
    2687  return result;
    2688  }
    2689 
    2690  bool operator==(const iterator& rhs) const
    2691  {
    2692  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2693  return m_pItem == rhs.m_pItem;
    2694  }
    2695  bool operator!=(const iterator& rhs) const
    2696  {
    2697  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2698  return m_pItem != rhs.m_pItem;
    2699  }
    2700 
    2701  private:
    2702  VmaRawList<T>* m_pList;
    2703  VmaListItem<T>* m_pItem;
    2704 
    2705  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2706  m_pList(pList),
    2707  m_pItem(pItem)
    2708  {
    2709  }
    2710 
    2711  friend class VmaList<T, AllocatorT>;
    2712  };
    2713 
    2714  class const_iterator
    2715  {
    2716  public:
    2717  const_iterator() :
    2718  m_pList(VMA_NULL),
    2719  m_pItem(VMA_NULL)
    2720  {
    2721  }
    2722 
    2723  const_iterator(const iterator& src) :
    2724  m_pList(src.m_pList),
    2725  m_pItem(src.m_pItem)
    2726  {
    2727  }
    2728 
    2729  const T& operator*() const
    2730  {
    2731  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2732  return m_pItem->Value;
    2733  }
    2734  const T* operator->() const
    2735  {
    2736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2737  return &m_pItem->Value;
    2738  }
    2739 
    2740  const_iterator& operator++()
    2741  {
    2742  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2743  m_pItem = m_pItem->pNext;
    2744  return *this;
    2745  }
    2746  const_iterator& operator--()
    2747  {
    2748  if(m_pItem != VMA_NULL)
    2749  {
    2750  m_pItem = m_pItem->pPrev;
    2751  }
    2752  else
    2753  {
    2754  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2755  m_pItem = m_pList->Back();
    2756  }
    2757  return *this;
    2758  }
    2759 
    2760  const_iterator operator++(int)
    2761  {
    2762  const_iterator result = *this;
    2763  ++*this;
    2764  return result;
    2765  }
    2766  const_iterator operator--(int)
    2767  {
    2768  const_iterator result = *this;
    2769  --*this;
    2770  return result;
    2771  }
    2772 
    2773  bool operator==(const const_iterator& rhs) const
    2774  {
    2775  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2776  return m_pItem == rhs.m_pItem;
    2777  }
    2778  bool operator!=(const const_iterator& rhs) const
    2779  {
    2780  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2781  return m_pItem != rhs.m_pItem;
    2782  }
    2783 
    2784  private:
    2785  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2786  m_pList(pList),
    2787  m_pItem(pItem)
    2788  {
    2789  }
    2790 
    2791  const VmaRawList<T>* m_pList;
    2792  const VmaListItem<T>* m_pItem;
    2793 
    2794  friend class VmaList<T, AllocatorT>;
    2795  };
    2796 
    2797  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2798 
    2799  bool empty() const { return m_RawList.IsEmpty(); }
    2800  size_t size() const { return m_RawList.GetCount(); }
    2801 
    2802  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2803  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2804 
    2805  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2806  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2807 
    2808  void clear() { m_RawList.Clear(); }
    2809  void push_back(const T& value) { m_RawList.PushBack(value); }
    2810  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2811  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2812 
    2813 private:
    2814  VmaRawList<T> m_RawList;
    2815 };
    2816 
    2817 #endif // #if VMA_USE_STL_LIST
    2818 
    2820 // class VmaMap
    2821 
    2822 // Unused in this version.
    2823 #if 0
    2824 
    2825 #if VMA_USE_STL_UNORDERED_MAP
    2826 
    2827 #define VmaPair std::pair
    2828 
    2829 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2830  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2831 
    2832 #else // #if VMA_USE_STL_UNORDERED_MAP
    2833 
    2834 template<typename T1, typename T2>
    2835 struct VmaPair
    2836 {
    2837  T1 first;
    2838  T2 second;
    2839 
    2840  VmaPair() : first(), second() { }
    2841  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2842 };
    2843 
    2844 /* Class compatible with subset of interface of std::unordered_map.
    2845 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2846 */
    2847 template<typename KeyT, typename ValueT>
    2848 class VmaMap
    2849 {
    2850 public:
    2851  typedef VmaPair<KeyT, ValueT> PairType;
    2852  typedef PairType* iterator;
    2853 
    2854  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2855 
    2856  iterator begin() { return m_Vector.begin(); }
    2857  iterator end() { return m_Vector.end(); }
    2858 
    2859  void insert(const PairType& pair);
    2860  iterator find(const KeyT& key);
    2861  void erase(iterator it);
    2862 
    2863 private:
    2864  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2865 };
    2866 
    2867 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2868 
    2869 template<typename FirstT, typename SecondT>
    2870 struct VmaPairFirstLess
    2871 {
    2872  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2873  {
    2874  return lhs.first < rhs.first;
    2875  }
    2876  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2877  {
    2878  return lhs.first < rhsFirst;
    2879  }
    2880 };
    2881 
    2882 template<typename KeyT, typename ValueT>
    2883 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2884 {
    2885  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2886  m_Vector.data(),
    2887  m_Vector.data() + m_Vector.size(),
    2888  pair,
    2889  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2890  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2891 }
    2892 
    2893 template<typename KeyT, typename ValueT>
    2894 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2895 {
    2896  PairType* it = VmaBinaryFindFirstNotLess(
    2897  m_Vector.data(),
    2898  m_Vector.data() + m_Vector.size(),
    2899  key,
    2900  VmaPairFirstLess<KeyT, ValueT>());
    2901  if((it != m_Vector.end()) && (it->first == key))
    2902  {
    2903  return it;
    2904  }
    2905  else
    2906  {
    2907  return m_Vector.end();
    2908  }
    2909 }
    2910 
    2911 template<typename KeyT, typename ValueT>
    2912 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2913 {
    2914  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2915 }
    2916 
    2917 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2918 
    2919 #endif // #if 0
    2920 
    2922 
    2923 class VmaDeviceMemoryBlock;
    2924 
    2925 enum VMA_BLOCK_VECTOR_TYPE
    2926 {
    2927  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2928  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2929  VMA_BLOCK_VECTOR_TYPE_COUNT
    2930 };
    2931 
    2932 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2933 {
    2934  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2935  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2936  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2937 }
    2938 
    2939 struct VmaAllocation_T
    2940 {
    2941 public:
    2942  enum ALLOCATION_TYPE
    2943  {
    2944  ALLOCATION_TYPE_NONE,
    2945  ALLOCATION_TYPE_BLOCK,
    2946  ALLOCATION_TYPE_DEDICATED,
    2947  };
    2948 
    2949  VmaAllocation_T(uint32_t currentFrameIndex) :
    2950  m_Alignment(1),
    2951  m_Size(0),
    2952  m_pUserData(VMA_NULL),
    2953  m_Type(ALLOCATION_TYPE_NONE),
    2954  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2955  m_LastUseFrameIndex(currentFrameIndex)
    2956  {
    2957  }
    2958 
    2959  void InitBlockAllocation(
    2960  VmaPool hPool,
    2961  VmaDeviceMemoryBlock* block,
    2962  VkDeviceSize offset,
    2963  VkDeviceSize alignment,
    2964  VkDeviceSize size,
    2965  VmaSuballocationType suballocationType,
    2966  void* pUserData,
    2967  bool canBecomeLost)
    2968  {
    2969  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2970  VMA_ASSERT(block != VMA_NULL);
    2971  m_Type = ALLOCATION_TYPE_BLOCK;
    2972  m_Alignment = alignment;
    2973  m_Size = size;
    2974  m_pUserData = pUserData;
    2975  m_SuballocationType = suballocationType;
    2976  m_BlockAllocation.m_hPool = hPool;
    2977  m_BlockAllocation.m_Block = block;
    2978  m_BlockAllocation.m_Offset = offset;
    2979  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2980  }
    2981 
    2982  void InitLost()
    2983  {
    2984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2985  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2986  m_Type = ALLOCATION_TYPE_BLOCK;
    2987  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2988  m_BlockAllocation.m_Block = VMA_NULL;
    2989  m_BlockAllocation.m_Offset = 0;
    2990  m_BlockAllocation.m_CanBecomeLost = true;
    2991  }
    2992 
    2993  void ChangeBlockAllocation(
    2994  VmaDeviceMemoryBlock* block,
    2995  VkDeviceSize offset)
    2996  {
    2997  VMA_ASSERT(block != VMA_NULL);
    2998  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2999  m_BlockAllocation.m_Block = block;
    3000  m_BlockAllocation.m_Offset = offset;
    3001  }
    3002 
    3003  void InitDedicatedAllocation(
    3004  uint32_t memoryTypeIndex,
    3005  VkDeviceMemory hMemory,
    3006  VmaSuballocationType suballocationType,
    3007  bool persistentMap,
    3008  void* pMappedData,
    3009  VkDeviceSize size,
    3010  void* pUserData)
    3011  {
    3012  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3013  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3014  m_Type = ALLOCATION_TYPE_DEDICATED;
    3015  m_Alignment = 0;
    3016  m_Size = size;
    3017  m_pUserData = pUserData;
    3018  m_SuballocationType = suballocationType;
    3019  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3020  m_DedicatedAllocation.m_hMemory = hMemory;
    3021  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    3022  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3023  }
    3024 
    3025  ALLOCATION_TYPE GetType() const { return m_Type; }
    3026  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3027  VkDeviceSize GetSize() const { return m_Size; }
    3028  void* GetUserData() const { return m_pUserData; }
    3029  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    3030  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    3031 
    3032  VmaDeviceMemoryBlock* GetBlock() const
    3033  {
    3034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3035  return m_BlockAllocation.m_Block;
    3036  }
    3037  VkDeviceSize GetOffset() const;
    3038  VkDeviceMemory GetMemory() const;
    3039  uint32_t GetMemoryTypeIndex() const;
    3040  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    3041  void* GetMappedData() const;
    3042  bool CanBecomeLost() const;
    3043  VmaPool GetPool() const;
    3044 
    3045  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3046  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3047 
    3048  uint32_t GetLastUseFrameIndex() const
    3049  {
    3050  return m_LastUseFrameIndex.load();
    3051  }
    3052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3053  {
    3054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3055  }
    3056  /*
    3057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3059  - Else, returns false.
    3060 
    3061  If hAllocation is already lost, assert - you should not call it then.
    3062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3063  */
    3064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3065 
    3066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3067  {
    3068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3069  outInfo.blockCount = 1;
    3070  outInfo.allocationCount = 1;
    3071  outInfo.unusedRangeCount = 0;
    3072  outInfo.usedBytes = m_Size;
    3073  outInfo.unusedBytes = 0;
    3074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3075  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3076  outInfo.unusedRangeSizeMax = 0;
    3077  }
    3078 
    3079 private:
    3080  VkDeviceSize m_Alignment;
    3081  VkDeviceSize m_Size;
    3082  void* m_pUserData;
    3083  ALLOCATION_TYPE m_Type;
    3084  VmaSuballocationType m_SuballocationType;
    3085  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3086 
    3087  // Allocation out of VmaDeviceMemoryBlock.
    3088  struct BlockAllocation
    3089  {
    3090  VmaPool m_hPool; // Null if belongs to general memory.
    3091  VmaDeviceMemoryBlock* m_Block;
    3092  VkDeviceSize m_Offset;
    3093  bool m_CanBecomeLost;
    3094  };
    3095 
    3096  // Allocation for an object that has its own private VkDeviceMemory.
    3097  struct DedicatedAllocation
    3098  {
    3099  uint32_t m_MemoryTypeIndex;
    3100  VkDeviceMemory m_hMemory;
    3101  bool m_PersistentMap;
    3102  void* m_pMappedData;
    3103  };
    3104 
    3105  union
    3106  {
    3107  // Allocation out of VmaDeviceMemoryBlock.
    3108  BlockAllocation m_BlockAllocation;
    3109  // Allocation for an object that has its own private VkDeviceMemory.
    3110  DedicatedAllocation m_DedicatedAllocation;
    3111  };
    3112 };
    3113 
    3114 /*
    3115 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3116 allocated memory block or free.
    3117 */
    3118 struct VmaSuballocation
    3119 {
    3120  VkDeviceSize offset;
    3121  VkDeviceSize size;
    3122  VmaAllocation hAllocation;
    3123  VmaSuballocationType type;
    3124 };
    3125 
    3126 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3127 
    3128 // Cost of one additional allocation lost, as equivalent in bytes.
    3129 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3130 
    3131 /*
    3132 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3133 
    3134 If canMakeOtherLost was false:
    3135 - item points to a FREE suballocation.
    3136 - itemsToMakeLostCount is 0.
    3137 
    3138 If canMakeOtherLost was true:
    3139 - item points to first of sequence of suballocations, which are either FREE,
    3140  or point to VmaAllocations that can become lost.
    3141 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3142  the requested allocation to succeed.
    3143 */
    3144 struct VmaAllocationRequest
    3145 {
    3146  VkDeviceSize offset;
    3147  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3148  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3149  VmaSuballocationList::iterator item;
    3150  size_t itemsToMakeLostCount;
    3151 
    3152  VkDeviceSize CalcCost() const
    3153  {
    3154  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3155  }
    3156 };
    3157 
    3158 /*
    3159 Data structure used for bookkeeping of allocations and unused ranges of memory
    3160 in a single VkDeviceMemory block.
    3161 */
    3162 class VmaBlockMetadata
    3163 {
    3164 public:
    3165  VmaBlockMetadata(VmaAllocator hAllocator);
    3166  ~VmaBlockMetadata();
    3167  void Init(VkDeviceSize size);
    3168 
    3169  // Validates all data structures inside this object. If not valid, returns false.
    3170  bool Validate() const;
    3171  VkDeviceSize GetSize() const { return m_Size; }
    3172  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3173  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3174  VkDeviceSize GetUnusedRangeSizeMax() const;
    3175  // Returns true if this block is empty - contains only single free suballocation.
    3176  bool IsEmpty() const;
    3177 
    3178  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3179  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3180 
    3181 #if VMA_STATS_STRING_ENABLED
    3182  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3183 #endif
    3184 
    3185  // Creates trivial request for case when block is empty.
    3186  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3187 
    3188  // Tries to find a place for suballocation with given parameters inside this block.
    3189  // If succeeded, fills pAllocationRequest and returns true.
    3190  // If failed, returns false.
    3191  bool CreateAllocationRequest(
    3192  uint32_t currentFrameIndex,
    3193  uint32_t frameInUseCount,
    3194  VkDeviceSize bufferImageGranularity,
    3195  VkDeviceSize allocSize,
    3196  VkDeviceSize allocAlignment,
    3197  VmaSuballocationType allocType,
    3198  bool canMakeOtherLost,
    3199  VmaAllocationRequest* pAllocationRequest);
    3200 
    3201  bool MakeRequestedAllocationsLost(
    3202  uint32_t currentFrameIndex,
    3203  uint32_t frameInUseCount,
    3204  VmaAllocationRequest* pAllocationRequest);
    3205 
    3206  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3207 
    3208  // Makes actual allocation based on request. Request must already be checked and valid.
    3209  void Alloc(
    3210  const VmaAllocationRequest& request,
    3211  VmaSuballocationType type,
    3212  VkDeviceSize allocSize,
    3213  VmaAllocation hAllocation);
    3214 
    3215  // Frees suballocation assigned to given memory region.
    3216  void Free(const VmaAllocation allocation);
    3217 
    3218 private:
    3219  VkDeviceSize m_Size;
    3220  uint32_t m_FreeCount;
    3221  VkDeviceSize m_SumFreeSize;
    3222  VmaSuballocationList m_Suballocations;
    3223  // Suballocations that are free and have size greater than certain threshold.
    3224  // Sorted by size, ascending.
    3225  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3226 
    3227  bool ValidateFreeSuballocationList() const;
    3228 
    3229  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3230  // If yes, fills pOffset and returns true. If no, returns false.
    3231  bool CheckAllocation(
    3232  uint32_t currentFrameIndex,
    3233  uint32_t frameInUseCount,
    3234  VkDeviceSize bufferImageGranularity,
    3235  VkDeviceSize allocSize,
    3236  VkDeviceSize allocAlignment,
    3237  VmaSuballocationType allocType,
    3238  VmaSuballocationList::const_iterator suballocItem,
    3239  bool canMakeOtherLost,
    3240  VkDeviceSize* pOffset,
    3241  size_t* itemsToMakeLostCount,
    3242  VkDeviceSize* pSumFreeSize,
    3243  VkDeviceSize* pSumItemSize) const;
    3244  // Given free suballocation, it merges it with following one, which must also be free.
    3245  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3246  // Releases given suballocation, making it free.
    3247  // Merges it with adjacent free suballocations if applicable.
    3248  // Returns iterator to new free suballocation at this place.
    3249  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3250  // Given free suballocation, it inserts it into sorted list of
    3251  // m_FreeSuballocationsBySize if it's suitable.
    3252  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3253  // Given free suballocation, it removes it from sorted list of
    3254  // m_FreeSuballocationsBySize if it's suitable.
    3255  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3256 };
    3257 
    3258 /*
    3259 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3260 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3261 
    3262 Thread-safety: This class must be externally synchronized.
    3263 */
    3264 class VmaDeviceMemoryBlock
    3265 {
    3266 public:
    3267  uint32_t m_MemoryTypeIndex;
    3268  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3269  VkDeviceMemory m_hMemory;
    3270  bool m_PersistentMap;
    3271  void* m_pMappedData;
    3272  VmaBlockMetadata m_Metadata;
    3273 
    3274  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3275 
    3276  ~VmaDeviceMemoryBlock()
    3277  {
    3278  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3279  }
    3280 
    3281  // Always call after construction.
    3282  void Init(
    3283  uint32_t newMemoryTypeIndex,
    3284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3285  VkDeviceMemory newMemory,
    3286  VkDeviceSize newSize,
    3287  bool persistentMap,
    3288  void* pMappedData);
    3289  // Always call before destruction.
    3290  void Destroy(VmaAllocator allocator);
    3291 
    3292  // Validates all data structures inside this object. If not valid, returns false.
    3293  bool Validate() const;
    3294 };
    3295 
    3296 struct VmaPointerLess
    3297 {
    3298  bool operator()(const void* lhs, const void* rhs) const
    3299  {
    3300  return lhs < rhs;
    3301  }
    3302 };
    3303 
    3304 class VmaDefragmentator;
    3305 
    3306 /*
    3307 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3308 Vulkan memory type.
    3309 
    3310 Synchronized internally with a mutex.
    3311 */
    3312 struct VmaBlockVector
    3313 {
    3314  VmaBlockVector(
    3315  VmaAllocator hAllocator,
    3316  uint32_t memoryTypeIndex,
    3317  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3318  VkDeviceSize preferredBlockSize,
    3319  size_t minBlockCount,
    3320  size_t maxBlockCount,
    3321  VkDeviceSize bufferImageGranularity,
    3322  uint32_t frameInUseCount,
    3323  bool isCustomPool);
    3324  ~VmaBlockVector();
    3325 
    3326  VkResult CreateMinBlocks();
    3327 
    3328  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3329  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3330  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3331  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3332  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3333 
    3334  void GetPoolStats(VmaPoolStats* pStats);
    3335 
    3336  bool IsEmpty() const { return m_Blocks.empty(); }
    3337 
    3338  VkResult Allocate(
    3339  VmaPool hCurrentPool,
    3340  uint32_t currentFrameIndex,
    3341  const VkMemoryRequirements& vkMemReq,
    3342  const VmaAllocationCreateInfo& createInfo,
    3343  VmaSuballocationType suballocType,
    3344  VmaAllocation* pAllocation);
    3345 
    3346  void Free(
    3347  VmaAllocation hAllocation);
    3348 
    3349  // Adds statistics of this BlockVector to pStats.
    3350  void AddStats(VmaStats* pStats);
    3351 
    3352 #if VMA_STATS_STRING_ENABLED
    3353  void PrintDetailedMap(class VmaJsonWriter& json);
    3354 #endif
    3355 
    3356  void UnmapPersistentlyMappedMemory();
    3357  VkResult MapPersistentlyMappedMemory();
    3358 
    3359  void MakePoolAllocationsLost(
    3360  uint32_t currentFrameIndex,
    3361  size_t* pLostAllocationCount);
    3362 
    3363  VmaDefragmentator* EnsureDefragmentator(
    3364  VmaAllocator hAllocator,
    3365  uint32_t currentFrameIndex);
    3366 
    3367  VkResult Defragment(
    3368  VmaDefragmentationStats* pDefragmentationStats,
    3369  VkDeviceSize& maxBytesToMove,
    3370  uint32_t& maxAllocationsToMove);
    3371 
    3372  void DestroyDefragmentator();
    3373 
    3374 private:
    3375  friend class VmaDefragmentator;
    3376 
    3377  const VmaAllocator m_hAllocator;
    3378  const uint32_t m_MemoryTypeIndex;
    3379  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3380  const VkDeviceSize m_PreferredBlockSize;
    3381  const size_t m_MinBlockCount;
    3382  const size_t m_MaxBlockCount;
    3383  const VkDeviceSize m_BufferImageGranularity;
    3384  const uint32_t m_FrameInUseCount;
    3385  const bool m_IsCustomPool;
    3386  VMA_MUTEX m_Mutex;
    3387  // Incrementally sorted by sumFreeSize, ascending.
    3388  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3389  /* There can be at most one allocation that is completely empty - a
    3390  hysteresis to avoid pessimistic case of alternating creation and destruction
    3391  of a VkDeviceMemory. */
    3392  bool m_HasEmptyBlock;
    3393  VmaDefragmentator* m_pDefragmentator;
    3394 
    3395  // Finds and removes given block from vector.
    3396  void Remove(VmaDeviceMemoryBlock* pBlock);
    3397 
    3398  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3399  // after this call.
    3400  void IncrementallySortBlocks();
    3401 
    3402  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3403 };
    3404 
    3405 struct VmaPool_T
    3406 {
    3407 public:
    3408  VmaBlockVector m_BlockVector;
    3409 
    3410  // Takes ownership.
    3411  VmaPool_T(
    3412  VmaAllocator hAllocator,
    3413  const VmaPoolCreateInfo& createInfo);
    3414  ~VmaPool_T();
    3415 
    3416  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3417 
    3418 #if VMA_STATS_STRING_ENABLED
    3419  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3420 #endif
    3421 };
    3422 
    3423 class VmaDefragmentator
    3424 {
    3425  const VmaAllocator m_hAllocator;
    3426  VmaBlockVector* const m_pBlockVector;
    3427  uint32_t m_CurrentFrameIndex;
    3428  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3429  VkDeviceSize m_BytesMoved;
    3430  uint32_t m_AllocationsMoved;
    3431 
    3432  struct AllocationInfo
    3433  {
    3434  VmaAllocation m_hAllocation;
    3435  VkBool32* m_pChanged;
    3436 
    3437  AllocationInfo() :
    3438  m_hAllocation(VK_NULL_HANDLE),
    3439  m_pChanged(VMA_NULL)
    3440  {
    3441  }
    3442  };
    3443 
    3444  struct AllocationInfoSizeGreater
    3445  {
    3446  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3447  {
    3448  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3449  }
    3450  };
    3451 
    3452  // Used between AddAllocation and Defragment.
    3453  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3454 
    3455  struct BlockInfo
    3456  {
    3457  VmaDeviceMemoryBlock* m_pBlock;
    3458  bool m_HasNonMovableAllocations;
    3459  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3460 
    3461  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3462  m_pBlock(VMA_NULL),
    3463  m_HasNonMovableAllocations(true),
    3464  m_Allocations(pAllocationCallbacks),
    3465  m_pMappedDataForDefragmentation(VMA_NULL)
    3466  {
    3467  }
    3468 
    3469  void CalcHasNonMovableAllocations()
    3470  {
    3471  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3472  const size_t defragmentAllocCount = m_Allocations.size();
    3473  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3474  }
    3475 
    3476  void SortAllocationsBySizeDescecnding()
    3477  {
    3478  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3479  }
    3480 
    3481  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3482  void Unmap(VmaAllocator hAllocator);
    3483 
    3484  private:
    3485  // Not null if mapped for defragmentation only, not persistently mapped.
    3486  void* m_pMappedDataForDefragmentation;
    3487  };
    3488 
    3489  struct BlockPointerLess
    3490  {
    3491  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3492  {
    3493  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3494  }
    3495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3496  {
    3497  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3498  }
    3499  };
    3500 
    3501  // 1. Blocks with some non-movable allocations go first.
    3502  // 2. Blocks with smaller sumFreeSize go first.
    3503  struct BlockInfoCompareMoveDestination
    3504  {
    3505  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3506  {
    3507  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3508  {
    3509  return true;
    3510  }
    3511  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3512  {
    3513  return false;
    3514  }
    3515  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3516  {
    3517  return true;
    3518  }
    3519  return false;
    3520  }
    3521  };
    3522 
    3523  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3524  BlockInfoVector m_Blocks;
    3525 
    3526  VkResult DefragmentRound(
    3527  VkDeviceSize maxBytesToMove,
    3528  uint32_t maxAllocationsToMove);
    3529 
    3530  static bool MoveMakesSense(
    3531  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3532  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3533 
    3534 public:
    3535  VmaDefragmentator(
    3536  VmaAllocator hAllocator,
    3537  VmaBlockVector* pBlockVector,
    3538  uint32_t currentFrameIndex);
    3539 
    3540  ~VmaDefragmentator();
    3541 
    3542  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3543  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3544 
    3545  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3546 
    3547  VkResult Defragment(
    3548  VkDeviceSize maxBytesToMove,
    3549  uint32_t maxAllocationsToMove);
    3550 };
    3551 
    3552 // Main allocator object.
    3553 struct VmaAllocator_T
    3554 {
    3555  bool m_UseMutex;
    3556  bool m_UseKhrDedicatedAllocation;
    3557  VkDevice m_hDevice;
    3558  bool m_AllocationCallbacksSpecified;
    3559  VkAllocationCallbacks m_AllocationCallbacks;
    3560  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3561  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3562  // Counter to allow nested calls to these functions.
    3563  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3564 
    3565  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3566  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3567  VMA_MUTEX m_HeapSizeLimitMutex;
    3568 
    3569  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3570  VkPhysicalDeviceMemoryProperties m_MemProps;
    3571 
    3572  // Default pools.
    3573  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3574 
    3575  // Each vector is sorted by memory (handle value).
    3576  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3577  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3578  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3579 
    3580  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3581  ~VmaAllocator_T();
    3582 
    3583  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3584  {
    3585  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3586  }
    3587  const VmaVulkanFunctions& GetVulkanFunctions() const
    3588  {
    3589  return m_VulkanFunctions;
    3590  }
    3591 
    3592  VkDeviceSize GetBufferImageGranularity() const
    3593  {
    3594  return VMA_MAX(
    3595  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3596  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3597  }
    3598 
    3599  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3600  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3601 
    3602  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3603  {
    3604  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3605  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3606  }
    3607 
    3608  void GetBufferMemoryRequirements(
    3609  VkBuffer hBuffer,
    3610  VkMemoryRequirements& memReq,
    3611  bool& requiresDedicatedAllocation,
    3612  bool& prefersDedicatedAllocation) const;
    3613  void GetImageMemoryRequirements(
    3614  VkImage hImage,
    3615  VkMemoryRequirements& memReq,
    3616  bool& requiresDedicatedAllocation,
    3617  bool& prefersDedicatedAllocation) const;
    3618 
    3619  // Main allocation function.
    3620  VkResult AllocateMemory(
    3621  const VkMemoryRequirements& vkMemReq,
    3622  bool requiresDedicatedAllocation,
    3623  bool prefersDedicatedAllocation,
    3624  VkBuffer dedicatedBuffer,
    3625  VkImage dedicatedImage,
    3626  const VmaAllocationCreateInfo& createInfo,
    3627  VmaSuballocationType suballocType,
    3628  VmaAllocation* pAllocation);
    3629 
    3630  // Main deallocation function.
    3631  void FreeMemory(const VmaAllocation allocation);
    3632 
    3633  void CalculateStats(VmaStats* pStats);
    3634 
    3635 #if VMA_STATS_STRING_ENABLED
    3636  void PrintDetailedMap(class VmaJsonWriter& json);
    3637 #endif
    3638 
    3639  void UnmapPersistentlyMappedMemory();
    3640  VkResult MapPersistentlyMappedMemory();
    3641 
    3642  VkResult Defragment(
    3643  VmaAllocation* pAllocations,
    3644  size_t allocationCount,
    3645  VkBool32* pAllocationsChanged,
    3646  const VmaDefragmentationInfo* pDefragmentationInfo,
    3647  VmaDefragmentationStats* pDefragmentationStats);
    3648 
    3649  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3650 
    3651  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3652  void DestroyPool(VmaPool pool);
    3653  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3654 
    3655  void SetCurrentFrameIndex(uint32_t frameIndex);
    3656 
    3657  void MakePoolAllocationsLost(
    3658  VmaPool hPool,
    3659  size_t* pLostAllocationCount);
    3660 
    3661  void CreateLostAllocation(VmaAllocation* pAllocation);
    3662 
    3663  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3664  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3665 
    3666 private:
    3667  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3668  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3669 
    3670  VkPhysicalDevice m_PhysicalDevice;
    3671  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3672 
    3673  VMA_MUTEX m_PoolsMutex;
    3674  // Protected by m_PoolsMutex. Sorted by pointer value.
    3675  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3676 
    3677  VmaVulkanFunctions m_VulkanFunctions;
    3678 
    3679  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3680 
    3681  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3682 
    3683  VkResult AllocateMemoryOfType(
    3684  const VkMemoryRequirements& vkMemReq,
    3685  bool dedicatedAllocation,
    3686  VkBuffer dedicatedBuffer,
    3687  VkImage dedicatedImage,
    3688  const VmaAllocationCreateInfo& createInfo,
    3689  uint32_t memTypeIndex,
    3690  VmaSuballocationType suballocType,
    3691  VmaAllocation* pAllocation);
    3692 
    3693  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3694  VkResult AllocateDedicatedMemory(
    3695  VkDeviceSize size,
    3696  VmaSuballocationType suballocType,
    3697  uint32_t memTypeIndex,
    3698  bool map,
    3699  void* pUserData,
    3700  VkBuffer dedicatedBuffer,
    3701  VkImage dedicatedImage,
    3702  VmaAllocation* pAllocation);
    3703 
    3704  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3705  void FreeDedicatedMemory(VmaAllocation allocation);
    3706 };
    3707 
    3709 // Memory allocation #2 after VmaAllocator_T definition
    3710 
    3711 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3712 {
    3713  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3714 }
    3715 
    3716 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3717 {
    3718  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3719 }
    3720 
    3721 template<typename T>
    3722 static T* VmaAllocate(VmaAllocator hAllocator)
    3723 {
    3724  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3725 }
    3726 
    3727 template<typename T>
    3728 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3729 {
    3730  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3731 }
    3732 
    3733 template<typename T>
    3734 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3735 {
    3736  if(ptr != VMA_NULL)
    3737  {
    3738  ptr->~T();
    3739  VmaFree(hAllocator, ptr);
    3740  }
    3741 }
    3742 
    3743 template<typename T>
    3744 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3745 {
    3746  if(ptr != VMA_NULL)
    3747  {
    3748  for(size_t i = count; i--; )
    3749  ptr[i].~T();
    3750  VmaFree(hAllocator, ptr);
    3751  }
    3752 }
    3753 
    3755 // VmaStringBuilder
    3756 
    3757 #if VMA_STATS_STRING_ENABLED
    3758 
    3759 class VmaStringBuilder
    3760 {
    3761 public:
    3762  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3763  size_t GetLength() const { return m_Data.size(); }
    3764  const char* GetData() const { return m_Data.data(); }
    3765 
    3766  void Add(char ch) { m_Data.push_back(ch); }
    3767  void Add(const char* pStr);
    3768  void AddNewLine() { Add('\n'); }
    3769  void AddNumber(uint32_t num);
    3770  void AddNumber(uint64_t num);
    3771  void AddPointer(const void* ptr);
    3772 
    3773 private:
    3774  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3775 };
    3776 
    3777 void VmaStringBuilder::Add(const char* pStr)
    3778 {
    3779  const size_t strLen = strlen(pStr);
    3780  if(strLen > 0)
    3781  {
    3782  const size_t oldCount = m_Data.size();
    3783  m_Data.resize(oldCount + strLen);
    3784  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3785  }
    3786 }
    3787 
    3788 void VmaStringBuilder::AddNumber(uint32_t num)
    3789 {
    3790  char buf[11];
    3791  VmaUint32ToStr(buf, sizeof(buf), num);
    3792  Add(buf);
    3793 }
    3794 
    3795 void VmaStringBuilder::AddNumber(uint64_t num)
    3796 {
    3797  char buf[21];
    3798  VmaUint64ToStr(buf, sizeof(buf), num);
    3799  Add(buf);
    3800 }
    3801 
    3802 void VmaStringBuilder::AddPointer(const void* ptr)
    3803 {
    3804  char buf[21];
    3805  VmaPtrToStr(buf, sizeof(buf), ptr);
    3806  Add(buf);
    3807 }
    3808 
    3809 #endif // #if VMA_STATS_STRING_ENABLED
    3810 
    3812 // VmaJsonWriter
    3813 
    3814 #if VMA_STATS_STRING_ENABLED
    3815 
    3816 class VmaJsonWriter
    3817 {
    3818 public:
    3819  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3820  ~VmaJsonWriter();
    3821 
    3822  void BeginObject(bool singleLine = false);
    3823  void EndObject();
    3824 
    3825  void BeginArray(bool singleLine = false);
    3826  void EndArray();
    3827 
    3828  void WriteString(const char* pStr);
    3829  void BeginString(const char* pStr = VMA_NULL);
    3830  void ContinueString(const char* pStr);
    3831  void ContinueString(uint32_t n);
    3832  void ContinueString(uint64_t n);
    3833  void EndString(const char* pStr = VMA_NULL);
    3834 
    3835  void WriteNumber(uint32_t n);
    3836  void WriteNumber(uint64_t n);
    3837  void WriteBool(bool b);
    3838  void WriteNull();
    3839 
    3840 private:
    3841  static const char* const INDENT;
    3842 
    3843  enum COLLECTION_TYPE
    3844  {
    3845  COLLECTION_TYPE_OBJECT,
    3846  COLLECTION_TYPE_ARRAY,
    3847  };
    3848  struct StackItem
    3849  {
    3850  COLLECTION_TYPE type;
    3851  uint32_t valueCount;
    3852  bool singleLineMode;
    3853  };
    3854 
    3855  VmaStringBuilder& m_SB;
    3856  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3857  bool m_InsideString;
    3858 
    3859  void BeginValue(bool isString);
    3860  void WriteIndent(bool oneLess = false);
    3861 };
    3862 
    3863 const char* const VmaJsonWriter::INDENT = " ";
    3864 
    3865 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3866  m_SB(sb),
    3867  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3868  m_InsideString(false)
    3869 {
    3870 }
    3871 
    3872 VmaJsonWriter::~VmaJsonWriter()
    3873 {
    3874  VMA_ASSERT(!m_InsideString);
    3875  VMA_ASSERT(m_Stack.empty());
    3876 }
    3877 
    3878 void VmaJsonWriter::BeginObject(bool singleLine)
    3879 {
    3880  VMA_ASSERT(!m_InsideString);
    3881 
    3882  BeginValue(false);
    3883  m_SB.Add('{');
    3884 
    3885  StackItem item;
    3886  item.type = COLLECTION_TYPE_OBJECT;
    3887  item.valueCount = 0;
    3888  item.singleLineMode = singleLine;
    3889  m_Stack.push_back(item);
    3890 }
    3891 
    3892 void VmaJsonWriter::EndObject()
    3893 {
    3894  VMA_ASSERT(!m_InsideString);
    3895 
    3896  WriteIndent(true);
    3897  m_SB.Add('}');
    3898 
    3899  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3900  m_Stack.pop_back();
    3901 }
    3902 
    3903 void VmaJsonWriter::BeginArray(bool singleLine)
    3904 {
    3905  VMA_ASSERT(!m_InsideString);
    3906 
    3907  BeginValue(false);
    3908  m_SB.Add('[');
    3909 
    3910  StackItem item;
    3911  item.type = COLLECTION_TYPE_ARRAY;
    3912  item.valueCount = 0;
    3913  item.singleLineMode = singleLine;
    3914  m_Stack.push_back(item);
    3915 }
    3916 
    3917 void VmaJsonWriter::EndArray()
    3918 {
    3919  VMA_ASSERT(!m_InsideString);
    3920 
    3921  WriteIndent(true);
    3922  m_SB.Add(']');
    3923 
    3924  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3925  m_Stack.pop_back();
    3926 }
    3927 
    3928 void VmaJsonWriter::WriteString(const char* pStr)
    3929 {
    3930  BeginString(pStr);
    3931  EndString();
    3932 }
    3933 
    3934 void VmaJsonWriter::BeginString(const char* pStr)
    3935 {
    3936  VMA_ASSERT(!m_InsideString);
    3937 
    3938  BeginValue(true);
    3939  m_SB.Add('"');
    3940  m_InsideString = true;
    3941  if(pStr != VMA_NULL && pStr[0] != '\0')
    3942  {
    3943  ContinueString(pStr);
    3944  }
    3945 }
    3946 
    3947 void VmaJsonWriter::ContinueString(const char* pStr)
    3948 {
    3949  VMA_ASSERT(m_InsideString);
    3950 
    3951  const size_t strLen = strlen(pStr);
    3952  for(size_t i = 0; i < strLen; ++i)
    3953  {
    3954  char ch = pStr[i];
    3955  if(ch == '\'')
    3956  {
    3957  m_SB.Add("\\\\");
    3958  }
    3959  else if(ch == '"')
    3960  {
    3961  m_SB.Add("\\\"");
    3962  }
    3963  else if(ch >= 32)
    3964  {
    3965  m_SB.Add(ch);
    3966  }
    3967  else switch(ch)
    3968  {
    3969  case '\n':
    3970  m_SB.Add("\\n");
    3971  break;
    3972  case '\r':
    3973  m_SB.Add("\\r");
    3974  break;
    3975  case '\t':
    3976  m_SB.Add("\\t");
    3977  break;
    3978  default:
    3979  VMA_ASSERT(0 && "Character not currently supported.");
    3980  break;
    3981  }
    3982  }
    3983 }
    3984 
    3985 void VmaJsonWriter::ContinueString(uint32_t n)
    3986 {
    3987  VMA_ASSERT(m_InsideString);
    3988  m_SB.AddNumber(n);
    3989 }
    3990 
    3991 void VmaJsonWriter::ContinueString(uint64_t n)
    3992 {
    3993  VMA_ASSERT(m_InsideString);
    3994  m_SB.AddNumber(n);
    3995 }
    3996 
    3997 void VmaJsonWriter::EndString(const char* pStr)
    3998 {
    3999  VMA_ASSERT(m_InsideString);
    4000  if(pStr != VMA_NULL && pStr[0] != '\0')
    4001  {
    4002  ContinueString(pStr);
    4003  }
    4004  m_SB.Add('"');
    4005  m_InsideString = false;
    4006 }
    4007 
    4008 void VmaJsonWriter::WriteNumber(uint32_t n)
    4009 {
    4010  VMA_ASSERT(!m_InsideString);
    4011  BeginValue(false);
    4012  m_SB.AddNumber(n);
    4013 }
    4014 
    4015 void VmaJsonWriter::WriteNumber(uint64_t n)
    4016 {
    4017  VMA_ASSERT(!m_InsideString);
    4018  BeginValue(false);
    4019  m_SB.AddNumber(n);
    4020 }
    4021 
    4022 void VmaJsonWriter::WriteBool(bool b)
    4023 {
    4024  VMA_ASSERT(!m_InsideString);
    4025  BeginValue(false);
    4026  m_SB.Add(b ? "true" : "false");
    4027 }
    4028 
    4029 void VmaJsonWriter::WriteNull()
    4030 {
    4031  VMA_ASSERT(!m_InsideString);
    4032  BeginValue(false);
    4033  m_SB.Add("null");
    4034 }
    4035 
    4036 void VmaJsonWriter::BeginValue(bool isString)
    4037 {
    4038  if(!m_Stack.empty())
    4039  {
    4040  StackItem& currItem = m_Stack.back();
    4041  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4042  currItem.valueCount % 2 == 0)
    4043  {
    4044  VMA_ASSERT(isString);
    4045  }
    4046 
    4047  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4048  currItem.valueCount % 2 != 0)
    4049  {
    4050  m_SB.Add(": ");
    4051  }
    4052  else if(currItem.valueCount > 0)
    4053  {
    4054  m_SB.Add(", ");
    4055  WriteIndent();
    4056  }
    4057  else
    4058  {
    4059  WriteIndent();
    4060  }
    4061  ++currItem.valueCount;
    4062  }
    4063 }
    4064 
    4065 void VmaJsonWriter::WriteIndent(bool oneLess)
    4066 {
    4067  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4068  {
    4069  m_SB.AddNewLine();
    4070 
    4071  size_t count = m_Stack.size();
    4072  if(count > 0 && oneLess)
    4073  {
    4074  --count;
    4075  }
    4076  for(size_t i = 0; i < count; ++i)
    4077  {
    4078  m_SB.Add(INDENT);
    4079  }
    4080  }
    4081 }
    4082 
    4083 #endif // #if VMA_STATS_STRING_ENABLED
    4084 
    4086 
    4087 VkDeviceSize VmaAllocation_T::GetOffset() const
    4088 {
    4089  switch(m_Type)
    4090  {
    4091  case ALLOCATION_TYPE_BLOCK:
    4092  return m_BlockAllocation.m_Offset;
    4093  case ALLOCATION_TYPE_DEDICATED:
    4094  return 0;
    4095  default:
    4096  VMA_ASSERT(0);
    4097  return 0;
    4098  }
    4099 }
    4100 
    4101 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4102 {
    4103  switch(m_Type)
    4104  {
    4105  case ALLOCATION_TYPE_BLOCK:
    4106  return m_BlockAllocation.m_Block->m_hMemory;
    4107  case ALLOCATION_TYPE_DEDICATED:
    4108  return m_DedicatedAllocation.m_hMemory;
    4109  default:
    4110  VMA_ASSERT(0);
    4111  return VK_NULL_HANDLE;
    4112  }
    4113 }
    4114 
    4115 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4116 {
    4117  switch(m_Type)
    4118  {
    4119  case ALLOCATION_TYPE_BLOCK:
    4120  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4121  case ALLOCATION_TYPE_DEDICATED:
    4122  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4123  default:
    4124  VMA_ASSERT(0);
    4125  return UINT32_MAX;
    4126  }
    4127 }
    4128 
    4129 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4130 {
    4131  switch(m_Type)
    4132  {
    4133  case ALLOCATION_TYPE_BLOCK:
    4134  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4135  case ALLOCATION_TYPE_DEDICATED:
    4136  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4137  default:
    4138  VMA_ASSERT(0);
    4139  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4140  }
    4141 }
    4142 
    4143 void* VmaAllocation_T::GetMappedData() const
    4144 {
    4145  switch(m_Type)
    4146  {
    4147  case ALLOCATION_TYPE_BLOCK:
    4148  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4149  {
    4150  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4151  }
    4152  else
    4153  {
    4154  return VMA_NULL;
    4155  }
    4156  break;
    4157  case ALLOCATION_TYPE_DEDICATED:
    4158  return m_DedicatedAllocation.m_pMappedData;
    4159  default:
    4160  VMA_ASSERT(0);
    4161  return VMA_NULL;
    4162  }
    4163 }
    4164 
    4165 bool VmaAllocation_T::CanBecomeLost() const
    4166 {
    4167  switch(m_Type)
    4168  {
    4169  case ALLOCATION_TYPE_BLOCK:
    4170  return m_BlockAllocation.m_CanBecomeLost;
    4171  case ALLOCATION_TYPE_DEDICATED:
    4172  return false;
    4173  default:
    4174  VMA_ASSERT(0);
    4175  return false;
    4176  }
    4177 }
    4178 
    4179 VmaPool VmaAllocation_T::GetPool() const
    4180 {
    4181  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4182  return m_BlockAllocation.m_hPool;
    4183 }
    4184 
    4185 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4186 {
    4187  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4188  if(m_DedicatedAllocation.m_PersistentMap)
    4189  {
    4190  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4191  hAllocator->m_hDevice,
    4192  m_DedicatedAllocation.m_hMemory,
    4193  0,
    4194  VK_WHOLE_SIZE,
    4195  0,
    4196  &m_DedicatedAllocation.m_pMappedData);
    4197  }
    4198  return VK_SUCCESS;
    4199 }
    4200 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4201 {
    4202  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4203  if(m_DedicatedAllocation.m_pMappedData)
    4204  {
    4205  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4206  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4207  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4208  }
    4209 }
    4210 
    4211 
    4212 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4213 {
    4214  VMA_ASSERT(CanBecomeLost());
    4215 
    4216  /*
    4217  Warning: This is a carefully designed algorithm.
    4218  Do not modify unless you really know what you're doing :)
    4219  */
    4220  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4221  for(;;)
    4222  {
    4223  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4224  {
    4225  VMA_ASSERT(0);
    4226  return false;
    4227  }
    4228  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4229  {
    4230  return false;
    4231  }
    4232  else // Last use time earlier than current time.
    4233  {
    4234  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4235  {
    4236  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4237  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4238  return true;
    4239  }
    4240  }
    4241  }
    4242 }
    4243 
    4244 #if VMA_STATS_STRING_ENABLED
    4245 
    4246 // Correspond to values of enum VmaSuballocationType.
    4247 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4248  "FREE",
    4249  "UNKNOWN",
    4250  "BUFFER",
    4251  "IMAGE_UNKNOWN",
    4252  "IMAGE_LINEAR",
    4253  "IMAGE_OPTIMAL",
    4254 };
    4255 
    4256 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4257 {
    4258  json.BeginObject();
    4259 
    4260  json.WriteString("Blocks");
    4261  json.WriteNumber(stat.blockCount);
    4262 
    4263  json.WriteString("Allocations");
    4264  json.WriteNumber(stat.allocationCount);
    4265 
    4266  json.WriteString("UnusedRanges");
    4267  json.WriteNumber(stat.unusedRangeCount);
    4268 
    4269  json.WriteString("UsedBytes");
    4270  json.WriteNumber(stat.usedBytes);
    4271 
    4272  json.WriteString("UnusedBytes");
    4273  json.WriteNumber(stat.unusedBytes);
    4274 
    4275  if(stat.allocationCount > 1)
    4276  {
    4277  json.WriteString("AllocationSize");
    4278  json.BeginObject(true);
    4279  json.WriteString("Min");
    4280  json.WriteNumber(stat.allocationSizeMin);
    4281  json.WriteString("Avg");
    4282  json.WriteNumber(stat.allocationSizeAvg);
    4283  json.WriteString("Max");
    4284  json.WriteNumber(stat.allocationSizeMax);
    4285  json.EndObject();
    4286  }
    4287 
    4288  if(stat.unusedRangeCount > 1)
    4289  {
    4290  json.WriteString("UnusedRangeSize");
    4291  json.BeginObject(true);
    4292  json.WriteString("Min");
    4293  json.WriteNumber(stat.unusedRangeSizeMin);
    4294  json.WriteString("Avg");
    4295  json.WriteNumber(stat.unusedRangeSizeAvg);
    4296  json.WriteString("Max");
    4297  json.WriteNumber(stat.unusedRangeSizeMax);
    4298  json.EndObject();
    4299  }
    4300 
    4301  json.EndObject();
    4302 }
    4303 
    4304 #endif // #if VMA_STATS_STRING_ENABLED
    4305 
    4306 struct VmaSuballocationItemSizeLess
    4307 {
    4308  bool operator()(
    4309  const VmaSuballocationList::iterator lhs,
    4310  const VmaSuballocationList::iterator rhs) const
    4311  {
    4312  return lhs->size < rhs->size;
    4313  }
    4314  bool operator()(
    4315  const VmaSuballocationList::iterator lhs,
    4316  VkDeviceSize rhsSize) const
    4317  {
    4318  return lhs->size < rhsSize;
    4319  }
    4320 };
    4321 
    4323 // class VmaBlockMetadata
    4324 
    4325 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4326  m_Size(0),
    4327  m_FreeCount(0),
    4328  m_SumFreeSize(0),
    4329  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4330  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4331 {
    4332 }
    4333 
    4334 VmaBlockMetadata::~VmaBlockMetadata()
    4335 {
    4336 }
    4337 
    4338 void VmaBlockMetadata::Init(VkDeviceSize size)
    4339 {
    4340  m_Size = size;
    4341  m_FreeCount = 1;
    4342  m_SumFreeSize = size;
    4343 
    4344  VmaSuballocation suballoc = {};
    4345  suballoc.offset = 0;
    4346  suballoc.size = size;
    4347  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4348  suballoc.hAllocation = VK_NULL_HANDLE;
    4349 
    4350  m_Suballocations.push_back(suballoc);
    4351  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4352  --suballocItem;
    4353  m_FreeSuballocationsBySize.push_back(suballocItem);
    4354 }
    4355 
    4356 bool VmaBlockMetadata::Validate() const
    4357 {
    4358  if(m_Suballocations.empty())
    4359  {
    4360  return false;
    4361  }
    4362 
    4363  // Expected offset of new suballocation as calculates from previous ones.
    4364  VkDeviceSize calculatedOffset = 0;
    4365  // Expected number of free suballocations as calculated from traversing their list.
    4366  uint32_t calculatedFreeCount = 0;
    4367  // Expected sum size of free suballocations as calculated from traversing their list.
    4368  VkDeviceSize calculatedSumFreeSize = 0;
    4369  // Expected number of free suballocations that should be registered in
    4370  // m_FreeSuballocationsBySize calculated from traversing their list.
    4371  size_t freeSuballocationsToRegister = 0;
    4372  // True if previous visisted suballocation was free.
    4373  bool prevFree = false;
    4374 
    4375  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4376  suballocItem != m_Suballocations.cend();
    4377  ++suballocItem)
    4378  {
    4379  const VmaSuballocation& subAlloc = *suballocItem;
    4380 
    4381  // Actual offset of this suballocation doesn't match expected one.
    4382  if(subAlloc.offset != calculatedOffset)
    4383  {
    4384  return false;
    4385  }
    4386 
    4387  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4388  // Two adjacent free suballocations are invalid. They should be merged.
    4389  if(prevFree && currFree)
    4390  {
    4391  return false;
    4392  }
    4393  prevFree = currFree;
    4394 
    4395  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4396  {
    4397  return false;
    4398  }
    4399 
    4400  if(currFree)
    4401  {
    4402  calculatedSumFreeSize += subAlloc.size;
    4403  ++calculatedFreeCount;
    4404  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4405  {
    4406  ++freeSuballocationsToRegister;
    4407  }
    4408  }
    4409 
    4410  calculatedOffset += subAlloc.size;
    4411  }
    4412 
    4413  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4414  // match expected one.
    4415  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4416  {
    4417  return false;
    4418  }
    4419 
    4420  VkDeviceSize lastSize = 0;
    4421  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4422  {
    4423  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4424 
    4425  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4426  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4427  {
    4428  return false;
    4429  }
    4430  // They must be sorted by size ascending.
    4431  if(suballocItem->size < lastSize)
    4432  {
    4433  return false;
    4434  }
    4435 
    4436  lastSize = suballocItem->size;
    4437  }
    4438 
    4439  // Check if totals match calculacted values.
    4440  return
    4441  ValidateFreeSuballocationList() &&
    4442  (calculatedOffset == m_Size) &&
    4443  (calculatedSumFreeSize == m_SumFreeSize) &&
    4444  (calculatedFreeCount == m_FreeCount);
    4445 }
    4446 
    4447 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4448 {
    4449  if(!m_FreeSuballocationsBySize.empty())
    4450  {
    4451  return m_FreeSuballocationsBySize.back()->size;
    4452  }
    4453  else
    4454  {
    4455  return 0;
    4456  }
    4457 }
    4458 
    4459 bool VmaBlockMetadata::IsEmpty() const
    4460 {
    4461  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4462 }
    4463 
    4464 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4465 {
    4466  outInfo.blockCount = 1;
    4467 
    4468  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4469  outInfo.allocationCount = rangeCount - m_FreeCount;
    4470  outInfo.unusedRangeCount = m_FreeCount;
    4471 
    4472  outInfo.unusedBytes = m_SumFreeSize;
    4473  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4474 
    4475  outInfo.allocationSizeMin = UINT64_MAX;
    4476  outInfo.allocationSizeMax = 0;
    4477  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4478  outInfo.unusedRangeSizeMax = 0;
    4479 
    4480  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4481  suballocItem != m_Suballocations.cend();
    4482  ++suballocItem)
    4483  {
    4484  const VmaSuballocation& suballoc = *suballocItem;
    4485  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4486  {
    4487  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4488  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4489  }
    4490  else
    4491  {
    4492  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4493  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4494  }
    4495  }
    4496 }
    4497 
    4498 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4499 {
    4500  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4501 
    4502  inoutStats.size += m_Size;
    4503  inoutStats.unusedSize += m_SumFreeSize;
    4504  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4505  inoutStats.unusedRangeCount += m_FreeCount;
    4506  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4507 }
    4508 
    4509 #if VMA_STATS_STRING_ENABLED
    4510 
    4511 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4512 {
    4513  json.BeginObject();
    4514 
    4515  json.WriteString("TotalBytes");
    4516  json.WriteNumber(m_Size);
    4517 
    4518  json.WriteString("UnusedBytes");
    4519  json.WriteNumber(m_SumFreeSize);
    4520 
    4521  json.WriteString("Allocations");
    4522  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4523 
    4524  json.WriteString("UnusedRanges");
    4525  json.WriteNumber(m_FreeCount);
    4526 
    4527  json.WriteString("Suballocations");
    4528  json.BeginArray();
    4529  size_t i = 0;
    4530  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4531  suballocItem != m_Suballocations.cend();
    4532  ++suballocItem, ++i)
    4533  {
    4534  json.BeginObject(true);
    4535 
    4536  json.WriteString("Type");
    4537  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4538 
    4539  json.WriteString("Size");
    4540  json.WriteNumber(suballocItem->size);
    4541 
    4542  json.WriteString("Offset");
    4543  json.WriteNumber(suballocItem->offset);
    4544 
    4545  json.EndObject();
    4546  }
    4547  json.EndArray();
    4548 
    4549  json.EndObject();
    4550 }
    4551 
    4552 #endif // #if VMA_STATS_STRING_ENABLED
    4553 
    4554 /*
    4555 How many suitable free suballocations to analyze before choosing best one.
    4556 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4557  be chosen.
    4558 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4559  suballocations will be analized and best one will be chosen.
    4560 - Any other value is also acceptable.
    4561 */
    4562 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4563 
    4564 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4565 {
    4566  VMA_ASSERT(IsEmpty());
    4567  pAllocationRequest->offset = 0;
    4568  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4569  pAllocationRequest->sumItemSize = 0;
    4570  pAllocationRequest->item = m_Suballocations.begin();
    4571  pAllocationRequest->itemsToMakeLostCount = 0;
    4572 }
    4573 
    4574 bool VmaBlockMetadata::CreateAllocationRequest(
    4575  uint32_t currentFrameIndex,
    4576  uint32_t frameInUseCount,
    4577  VkDeviceSize bufferImageGranularity,
    4578  VkDeviceSize allocSize,
    4579  VkDeviceSize allocAlignment,
    4580  VmaSuballocationType allocType,
    4581  bool canMakeOtherLost,
    4582  VmaAllocationRequest* pAllocationRequest)
    4583 {
    4584  VMA_ASSERT(allocSize > 0);
    4585  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4586  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4587  VMA_HEAVY_ASSERT(Validate());
    4588 
    4589  // There is not enough total free space in this block to fullfill the request: Early return.
    4590  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4591  {
    4592  return false;
    4593  }
    4594 
    4595  // New algorithm, efficiently searching freeSuballocationsBySize.
    4596  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4597  if(freeSuballocCount > 0)
    4598  {
    4599  if(VMA_BEST_FIT)
    4600  {
    4601  // Find first free suballocation with size not less than allocSize.
    4602  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4603  m_FreeSuballocationsBySize.data(),
    4604  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4605  allocSize,
    4606  VmaSuballocationItemSizeLess());
    4607  size_t index = it - m_FreeSuballocationsBySize.data();
    4608  for(; index < freeSuballocCount; ++index)
    4609  {
    4610  if(CheckAllocation(
    4611  currentFrameIndex,
    4612  frameInUseCount,
    4613  bufferImageGranularity,
    4614  allocSize,
    4615  allocAlignment,
    4616  allocType,
    4617  m_FreeSuballocationsBySize[index],
    4618  false, // canMakeOtherLost
    4619  &pAllocationRequest->offset,
    4620  &pAllocationRequest->itemsToMakeLostCount,
    4621  &pAllocationRequest->sumFreeSize,
    4622  &pAllocationRequest->sumItemSize))
    4623  {
    4624  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4625  return true;
    4626  }
    4627  }
    4628  }
    4629  else
    4630  {
    4631  // Search staring from biggest suballocations.
    4632  for(size_t index = freeSuballocCount; index--; )
    4633  {
    4634  if(CheckAllocation(
    4635  currentFrameIndex,
    4636  frameInUseCount,
    4637  bufferImageGranularity,
    4638  allocSize,
    4639  allocAlignment,
    4640  allocType,
    4641  m_FreeSuballocationsBySize[index],
    4642  false, // canMakeOtherLost
    4643  &pAllocationRequest->offset,
    4644  &pAllocationRequest->itemsToMakeLostCount,
    4645  &pAllocationRequest->sumFreeSize,
    4646  &pAllocationRequest->sumItemSize))
    4647  {
    4648  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4649  return true;
    4650  }
    4651  }
    4652  }
    4653  }
    4654 
    4655  if(canMakeOtherLost)
    4656  {
    4657  // Brute-force algorithm. TODO: Come up with something better.
    4658 
    4659  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4660  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4661 
    4662  VmaAllocationRequest tmpAllocRequest = {};
    4663  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4664  suballocIt != m_Suballocations.end();
    4665  ++suballocIt)
    4666  {
    4667  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4668  suballocIt->hAllocation->CanBecomeLost())
    4669  {
    4670  if(CheckAllocation(
    4671  currentFrameIndex,
    4672  frameInUseCount,
    4673  bufferImageGranularity,
    4674  allocSize,
    4675  allocAlignment,
    4676  allocType,
    4677  suballocIt,
    4678  canMakeOtherLost,
    4679  &tmpAllocRequest.offset,
    4680  &tmpAllocRequest.itemsToMakeLostCount,
    4681  &tmpAllocRequest.sumFreeSize,
    4682  &tmpAllocRequest.sumItemSize))
    4683  {
    4684  tmpAllocRequest.item = suballocIt;
    4685 
    4686  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4687  {
    4688  *pAllocationRequest = tmpAllocRequest;
    4689  }
    4690  }
    4691  }
    4692  }
    4693 
    4694  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4695  {
    4696  return true;
    4697  }
    4698  }
    4699 
    4700  return false;
    4701 }
    4702 
    4703 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4704  uint32_t currentFrameIndex,
    4705  uint32_t frameInUseCount,
    4706  VmaAllocationRequest* pAllocationRequest)
    4707 {
    4708  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4709  {
    4710  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4711  {
    4712  ++pAllocationRequest->item;
    4713  }
    4714  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4715  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4716  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4717  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4718  {
    4719  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4720  --pAllocationRequest->itemsToMakeLostCount;
    4721  }
    4722  else
    4723  {
    4724  return false;
    4725  }
    4726  }
    4727 
    4728  VMA_HEAVY_ASSERT(Validate());
    4729  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4730  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4731 
    4732  return true;
    4733 }
    4734 
    4735 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4736 {
    4737  uint32_t lostAllocationCount = 0;
    4738  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4739  it != m_Suballocations.end();
    4740  ++it)
    4741  {
    4742  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4743  it->hAllocation->CanBecomeLost() &&
    4744  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4745  {
    4746  it = FreeSuballocation(it);
    4747  ++lostAllocationCount;
    4748  }
    4749  }
    4750  return lostAllocationCount;
    4751 }
    4752 
    4753 void VmaBlockMetadata::Alloc(
    4754  const VmaAllocationRequest& request,
    4755  VmaSuballocationType type,
    4756  VkDeviceSize allocSize,
    4757  VmaAllocation hAllocation)
    4758 {
    4759  VMA_ASSERT(request.item != m_Suballocations.end());
    4760  VmaSuballocation& suballoc = *request.item;
    4761  // Given suballocation is a free block.
    4762  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4763  // Given offset is inside this suballocation.
    4764  VMA_ASSERT(request.offset >= suballoc.offset);
    4765  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4766  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4767  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4768 
    4769  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4770  // it to become used.
    4771  UnregisterFreeSuballocation(request.item);
    4772 
    4773  suballoc.offset = request.offset;
    4774  suballoc.size = allocSize;
    4775  suballoc.type = type;
    4776  suballoc.hAllocation = hAllocation;
    4777 
    4778  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4779  if(paddingEnd)
    4780  {
    4781  VmaSuballocation paddingSuballoc = {};
    4782  paddingSuballoc.offset = request.offset + allocSize;
    4783  paddingSuballoc.size = paddingEnd;
    4784  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4785  VmaSuballocationList::iterator next = request.item;
    4786  ++next;
    4787  const VmaSuballocationList::iterator paddingEndItem =
    4788  m_Suballocations.insert(next, paddingSuballoc);
    4789  RegisterFreeSuballocation(paddingEndItem);
    4790  }
    4791 
    4792  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4793  if(paddingBegin)
    4794  {
    4795  VmaSuballocation paddingSuballoc = {};
    4796  paddingSuballoc.offset = request.offset - paddingBegin;
    4797  paddingSuballoc.size = paddingBegin;
    4798  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4799  const VmaSuballocationList::iterator paddingBeginItem =
    4800  m_Suballocations.insert(request.item, paddingSuballoc);
    4801  RegisterFreeSuballocation(paddingBeginItem);
    4802  }
    4803 
    4804  // Update totals.
    4805  m_FreeCount = m_FreeCount - 1;
    4806  if(paddingBegin > 0)
    4807  {
    4808  ++m_FreeCount;
    4809  }
    4810  if(paddingEnd > 0)
    4811  {
    4812  ++m_FreeCount;
    4813  }
    4814  m_SumFreeSize -= allocSize;
    4815 }
    4816 
    4817 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4818 {
    4819  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4820  suballocItem != m_Suballocations.end();
    4821  ++suballocItem)
    4822  {
    4823  VmaSuballocation& suballoc = *suballocItem;
    4824  if(suballoc.hAllocation == allocation)
    4825  {
    4826  FreeSuballocation(suballocItem);
    4827  VMA_HEAVY_ASSERT(Validate());
    4828  return;
    4829  }
    4830  }
    4831  VMA_ASSERT(0 && "Not found!");
    4832 }
    4833 
    4834 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4835 {
    4836  VkDeviceSize lastSize = 0;
    4837  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4838  {
    4839  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4840 
    4841  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4842  {
    4843  VMA_ASSERT(0);
    4844  return false;
    4845  }
    4846  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4847  {
    4848  VMA_ASSERT(0);
    4849  return false;
    4850  }
    4851  if(it->size < lastSize)
    4852  {
    4853  VMA_ASSERT(0);
    4854  return false;
    4855  }
    4856 
    4857  lastSize = it->size;
    4858  }
    4859  return true;
    4860 }
    4861 
    4862 bool VmaBlockMetadata::CheckAllocation(
    4863  uint32_t currentFrameIndex,
    4864  uint32_t frameInUseCount,
    4865  VkDeviceSize bufferImageGranularity,
    4866  VkDeviceSize allocSize,
    4867  VkDeviceSize allocAlignment,
    4868  VmaSuballocationType allocType,
    4869  VmaSuballocationList::const_iterator suballocItem,
    4870  bool canMakeOtherLost,
    4871  VkDeviceSize* pOffset,
    4872  size_t* itemsToMakeLostCount,
    4873  VkDeviceSize* pSumFreeSize,
    4874  VkDeviceSize* pSumItemSize) const
    4875 {
    4876  VMA_ASSERT(allocSize > 0);
    4877  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4878  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4879  VMA_ASSERT(pOffset != VMA_NULL);
    4880 
    4881  *itemsToMakeLostCount = 0;
    4882  *pSumFreeSize = 0;
    4883  *pSumItemSize = 0;
    4884 
    4885  if(canMakeOtherLost)
    4886  {
    4887  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4888  {
    4889  *pSumFreeSize = suballocItem->size;
    4890  }
    4891  else
    4892  {
    4893  if(suballocItem->hAllocation->CanBecomeLost() &&
    4894  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4895  {
    4896  ++*itemsToMakeLostCount;
    4897  *pSumItemSize = suballocItem->size;
    4898  }
    4899  else
    4900  {
    4901  return false;
    4902  }
    4903  }
    4904 
    4905  // Remaining size is too small for this request: Early return.
    4906  if(m_Size - suballocItem->offset < allocSize)
    4907  {
    4908  return false;
    4909  }
    4910 
    4911  // Start from offset equal to beginning of this suballocation.
    4912  *pOffset = suballocItem->offset;
    4913 
    4914  // Apply VMA_DEBUG_MARGIN at the beginning.
    4915  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4916  {
    4917  *pOffset += VMA_DEBUG_MARGIN;
    4918  }
    4919 
    4920  // Apply alignment.
    4921  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4922  *pOffset = VmaAlignUp(*pOffset, alignment);
    4923 
    4924  // Check previous suballocations for BufferImageGranularity conflicts.
    4925  // Make bigger alignment if necessary.
    4926  if(bufferImageGranularity > 1)
    4927  {
    4928  bool bufferImageGranularityConflict = false;
    4929  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4930  while(prevSuballocItem != m_Suballocations.cbegin())
    4931  {
    4932  --prevSuballocItem;
    4933  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4934  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4935  {
    4936  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4937  {
    4938  bufferImageGranularityConflict = true;
    4939  break;
    4940  }
    4941  }
    4942  else
    4943  // Already on previous page.
    4944  break;
    4945  }
    4946  if(bufferImageGranularityConflict)
    4947  {
    4948  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4949  }
    4950  }
    4951 
    4952  // Now that we have final *pOffset, check if we are past suballocItem.
    4953  // If yes, return false - this function should be called for another suballocItem as starting point.
    4954  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4955  {
    4956  return false;
    4957  }
    4958 
    4959  // Calculate padding at the beginning based on current offset.
    4960  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4961 
    4962  // Calculate required margin at the end if this is not last suballocation.
    4963  VmaSuballocationList::const_iterator next = suballocItem;
    4964  ++next;
    4965  const VkDeviceSize requiredEndMargin =
    4966  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4967 
    4968  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4969  // Another early return check.
    4970  if(suballocItem->offset + totalSize > m_Size)
    4971  {
    4972  return false;
    4973  }
    4974 
    4975  // Advance lastSuballocItem until desired size is reached.
    4976  // Update itemsToMakeLostCount.
    4977  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4978  if(totalSize > suballocItem->size)
    4979  {
    4980  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4981  while(remainingSize > 0)
    4982  {
    4983  ++lastSuballocItem;
    4984  if(lastSuballocItem == m_Suballocations.cend())
    4985  {
    4986  return false;
    4987  }
    4988  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4989  {
    4990  *pSumFreeSize += lastSuballocItem->size;
    4991  }
    4992  else
    4993  {
    4994  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4995  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4996  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4997  {
    4998  ++*itemsToMakeLostCount;
    4999  *pSumItemSize += lastSuballocItem->size;
    5000  }
    5001  else
    5002  {
    5003  return false;
    5004  }
    5005  }
    5006  remainingSize = (lastSuballocItem->size < remainingSize) ?
    5007  remainingSize - lastSuballocItem->size : 0;
    5008  }
    5009  }
    5010 
    5011  // Check next suballocations for BufferImageGranularity conflicts.
    5012  // If conflict exists, we must mark more allocations lost or fail.
    5013  if(bufferImageGranularity > 1)
    5014  {
    5015  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5016  ++nextSuballocItem;
    5017  while(nextSuballocItem != m_Suballocations.cend())
    5018  {
    5019  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5020  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5021  {
    5022  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5023  {
    5024  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5025  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5026  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5027  {
    5028  ++*itemsToMakeLostCount;
    5029  }
    5030  else
    5031  {
    5032  return false;
    5033  }
    5034  }
    5035  }
    5036  else
    5037  {
    5038  // Already on next page.
    5039  break;
    5040  }
    5041  ++nextSuballocItem;
    5042  }
    5043  }
    5044  }
    5045  else
    5046  {
    5047  const VmaSuballocation& suballoc = *suballocItem;
    5048  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5049 
    5050  *pSumFreeSize = suballoc.size;
    5051 
    5052  // Size of this suballocation is too small for this request: Early return.
    5053  if(suballoc.size < allocSize)
    5054  {
    5055  return false;
    5056  }
    5057 
    5058  // Start from offset equal to beginning of this suballocation.
    5059  *pOffset = suballoc.offset;
    5060 
    5061  // Apply VMA_DEBUG_MARGIN at the beginning.
    5062  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5063  {
    5064  *pOffset += VMA_DEBUG_MARGIN;
    5065  }
    5066 
    5067  // Apply alignment.
    5068  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5069  *pOffset = VmaAlignUp(*pOffset, alignment);
    5070 
    5071  // Check previous suballocations for BufferImageGranularity conflicts.
    5072  // Make bigger alignment if necessary.
    5073  if(bufferImageGranularity > 1)
    5074  {
    5075  bool bufferImageGranularityConflict = false;
    5076  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5077  while(prevSuballocItem != m_Suballocations.cbegin())
    5078  {
    5079  --prevSuballocItem;
    5080  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5081  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5082  {
    5083  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5084  {
    5085  bufferImageGranularityConflict = true;
    5086  break;
    5087  }
    5088  }
    5089  else
    5090  // Already on previous page.
    5091  break;
    5092  }
    5093  if(bufferImageGranularityConflict)
    5094  {
    5095  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5096  }
    5097  }
    5098 
    5099  // Calculate padding at the beginning based on current offset.
    5100  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5101 
    5102  // Calculate required margin at the end if this is not last suballocation.
    5103  VmaSuballocationList::const_iterator next = suballocItem;
    5104  ++next;
    5105  const VkDeviceSize requiredEndMargin =
    5106  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5107 
    5108  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5109  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5110  {
    5111  return false;
    5112  }
    5113 
    5114  // Check next suballocations for BufferImageGranularity conflicts.
    5115  // If conflict exists, allocation cannot be made here.
    5116  if(bufferImageGranularity > 1)
    5117  {
    5118  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5119  ++nextSuballocItem;
    5120  while(nextSuballocItem != m_Suballocations.cend())
    5121  {
    5122  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5123  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5124  {
    5125  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5126  {
    5127  return false;
    5128  }
    5129  }
    5130  else
    5131  {
    5132  // Already on next page.
    5133  break;
    5134  }
    5135  ++nextSuballocItem;
    5136  }
    5137  }
    5138  }
    5139 
    5140  // All tests passed: Success. pOffset is already filled.
    5141  return true;
    5142 }
    5143 
    5144 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5145 {
    5146  VMA_ASSERT(item != m_Suballocations.end());
    5147  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5148 
    5149  VmaSuballocationList::iterator nextItem = item;
    5150  ++nextItem;
    5151  VMA_ASSERT(nextItem != m_Suballocations.end());
    5152  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5153 
    5154  item->size += nextItem->size;
    5155  --m_FreeCount;
    5156  m_Suballocations.erase(nextItem);
    5157 }
    5158 
    5159 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5160 {
    5161  // Change this suballocation to be marked as free.
    5162  VmaSuballocation& suballoc = *suballocItem;
    5163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5164  suballoc.hAllocation = VK_NULL_HANDLE;
    5165 
    5166  // Update totals.
    5167  ++m_FreeCount;
    5168  m_SumFreeSize += suballoc.size;
    5169 
    5170  // Merge with previous and/or next suballocation if it's also free.
    5171  bool mergeWithNext = false;
    5172  bool mergeWithPrev = false;
    5173 
    5174  VmaSuballocationList::iterator nextItem = suballocItem;
    5175  ++nextItem;
    5176  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5177  {
    5178  mergeWithNext = true;
    5179  }
    5180 
    5181  VmaSuballocationList::iterator prevItem = suballocItem;
    5182  if(suballocItem != m_Suballocations.begin())
    5183  {
    5184  --prevItem;
    5185  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5186  {
    5187  mergeWithPrev = true;
    5188  }
    5189  }
    5190 
    5191  if(mergeWithNext)
    5192  {
    5193  UnregisterFreeSuballocation(nextItem);
    5194  MergeFreeWithNext(suballocItem);
    5195  }
    5196 
    5197  if(mergeWithPrev)
    5198  {
    5199  UnregisterFreeSuballocation(prevItem);
    5200  MergeFreeWithNext(prevItem);
    5201  RegisterFreeSuballocation(prevItem);
    5202  return prevItem;
    5203  }
    5204  else
    5205  {
    5206  RegisterFreeSuballocation(suballocItem);
    5207  return suballocItem;
    5208  }
    5209 }
    5210 
    5211 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5212 {
    5213  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5214  VMA_ASSERT(item->size > 0);
    5215 
    5216  // You may want to enable this validation at the beginning or at the end of
    5217  // this function, depending on what do you want to check.
    5218  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5219 
    5220  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5221  {
    5222  if(m_FreeSuballocationsBySize.empty())
    5223  {
    5224  m_FreeSuballocationsBySize.push_back(item);
    5225  }
    5226  else
    5227  {
    5228  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5229  }
    5230  }
    5231 
    5232  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5233 }
    5234 
    5235 
    5236 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5237 {
    5238  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5239  VMA_ASSERT(item->size > 0);
    5240 
    5241  // You may want to enable this validation at the beginning or at the end of
    5242  // this function, depending on what do you want to check.
    5243  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5244 
    5245  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5246  {
    5247  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5248  m_FreeSuballocationsBySize.data(),
    5249  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5250  item,
    5251  VmaSuballocationItemSizeLess());
    5252  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5253  index < m_FreeSuballocationsBySize.size();
    5254  ++index)
    5255  {
    5256  if(m_FreeSuballocationsBySize[index] == item)
    5257  {
    5258  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5259  return;
    5260  }
    5261  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5262  }
    5263  VMA_ASSERT(0 && "Not found.");
    5264  }
    5265 
    5266  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5267 }
    5268 
    5270 // class VmaDeviceMemoryBlock
    5271 
    5272 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5273  m_MemoryTypeIndex(UINT32_MAX),
    5274  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5275  m_hMemory(VK_NULL_HANDLE),
    5276  m_PersistentMap(false),
    5277  m_pMappedData(VMA_NULL),
    5278  m_Metadata(hAllocator)
    5279 {
    5280 }
    5281 
    5282 void VmaDeviceMemoryBlock::Init(
    5283  uint32_t newMemoryTypeIndex,
    5284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5285  VkDeviceMemory newMemory,
    5286  VkDeviceSize newSize,
    5287  bool persistentMap,
    5288  void* pMappedData)
    5289 {
    5290  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5291 
    5292  m_MemoryTypeIndex = newMemoryTypeIndex;
    5293  m_BlockVectorType = newBlockVectorType;
    5294  m_hMemory = newMemory;
    5295  m_PersistentMap = persistentMap;
    5296  m_pMappedData = pMappedData;
    5297 
    5298  m_Metadata.Init(newSize);
    5299 }
    5300 
    5301 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5302 {
    5303  // This is the most important assert in the entire library.
    5304  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5305  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5306 
    5307  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5308  if(m_pMappedData != VMA_NULL)
    5309  {
    5310  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5311  m_pMappedData = VMA_NULL;
    5312  }
    5313 
    5314  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5315  m_hMemory = VK_NULL_HANDLE;
    5316 }
    5317 
    5318 bool VmaDeviceMemoryBlock::Validate() const
    5319 {
    5320  if((m_hMemory == VK_NULL_HANDLE) ||
    5321  (m_Metadata.GetSize() == 0))
    5322  {
    5323  return false;
    5324  }
    5325 
    5326  return m_Metadata.Validate();
    5327 }
    5328 
    5329 static void InitStatInfo(VmaStatInfo& outInfo)
    5330 {
    5331  memset(&outInfo, 0, sizeof(outInfo));
    5332  outInfo.allocationSizeMin = UINT64_MAX;
    5333  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5334 }
    5335 
    5336 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5337 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5338 {
    5339  inoutInfo.blockCount += srcInfo.blockCount;
    5340  inoutInfo.allocationCount += srcInfo.allocationCount;
    5341  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5342  inoutInfo.usedBytes += srcInfo.usedBytes;
    5343  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5344  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5345  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5346  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5347  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5348 }
    5349 
    5350 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5351 {
    5352  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5353  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5354  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5355  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5356 }
    5357 
    5358 VmaPool_T::VmaPool_T(
    5359  VmaAllocator hAllocator,
    5360  const VmaPoolCreateInfo& createInfo) :
    5361  m_BlockVector(
    5362  hAllocator,
    5363  createInfo.memoryTypeIndex,
    5364  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5365  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5366  createInfo.blockSize,
    5367  createInfo.minBlockCount,
    5368  createInfo.maxBlockCount,
    5369  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5370  createInfo.frameInUseCount,
    5371  true) // isCustomPool
    5372 {
    5373 }
    5374 
    5375 VmaPool_T::~VmaPool_T()
    5376 {
    5377 }
    5378 
    5379 #if VMA_STATS_STRING_ENABLED
    5380 
    5381 #endif // #if VMA_STATS_STRING_ENABLED
    5382 
    5383 VmaBlockVector::VmaBlockVector(
    5384  VmaAllocator hAllocator,
    5385  uint32_t memoryTypeIndex,
    5386  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5387  VkDeviceSize preferredBlockSize,
    5388  size_t minBlockCount,
    5389  size_t maxBlockCount,
    5390  VkDeviceSize bufferImageGranularity,
    5391  uint32_t frameInUseCount,
    5392  bool isCustomPool) :
    5393  m_hAllocator(hAllocator),
    5394  m_MemoryTypeIndex(memoryTypeIndex),
    5395  m_BlockVectorType(blockVectorType),
    5396  m_PreferredBlockSize(preferredBlockSize),
    5397  m_MinBlockCount(minBlockCount),
    5398  m_MaxBlockCount(maxBlockCount),
    5399  m_BufferImageGranularity(bufferImageGranularity),
    5400  m_FrameInUseCount(frameInUseCount),
    5401  m_IsCustomPool(isCustomPool),
    5402  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5403  m_HasEmptyBlock(false),
    5404  m_pDefragmentator(VMA_NULL)
    5405 {
    5406 }
    5407 
    5408 VmaBlockVector::~VmaBlockVector()
    5409 {
    5410  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5411 
    5412  for(size_t i = m_Blocks.size(); i--; )
    5413  {
    5414  m_Blocks[i]->Destroy(m_hAllocator);
    5415  vma_delete(m_hAllocator, m_Blocks[i]);
    5416  }
    5417 }
    5418 
    5419 VkResult VmaBlockVector::CreateMinBlocks()
    5420 {
    5421  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5422  {
    5423  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5424  if(res != VK_SUCCESS)
    5425  {
    5426  return res;
    5427  }
    5428  }
    5429  return VK_SUCCESS;
    5430 }
    5431 
    5432 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5433 {
    5434  pStats->size = 0;
    5435  pStats->unusedSize = 0;
    5436  pStats->allocationCount = 0;
    5437  pStats->unusedRangeCount = 0;
    5438  pStats->unusedRangeSizeMax = 0;
    5439 
    5440  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5441 
    5442  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5443  {
    5444  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5445  VMA_ASSERT(pBlock);
    5446  VMA_HEAVY_ASSERT(pBlock->Validate());
    5447  pBlock->m_Metadata.AddPoolStats(*pStats);
    5448  }
    5449 }
    5450 
    5451 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5452 
    5453 VkResult VmaBlockVector::Allocate(
    5454  VmaPool hCurrentPool,
    5455  uint32_t currentFrameIndex,
    5456  const VkMemoryRequirements& vkMemReq,
    5457  const VmaAllocationCreateInfo& createInfo,
    5458  VmaSuballocationType suballocType,
    5459  VmaAllocation* pAllocation)
    5460 {
    5461  // Validate flags.
    5462  if(createInfo.pool != VK_NULL_HANDLE &&
    5463  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5464  {
    5465  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5467  }
    5468 
    5469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5470 
    5471  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5472  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5473  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5474  {
    5475  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5476  VMA_ASSERT(pCurrBlock);
    5477  VmaAllocationRequest currRequest = {};
    5478  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5479  currentFrameIndex,
    5480  m_FrameInUseCount,
    5481  m_BufferImageGranularity,
    5482  vkMemReq.size,
    5483  vkMemReq.alignment,
    5484  suballocType,
    5485  false, // canMakeOtherLost
    5486  &currRequest))
    5487  {
    5488  // Allocate from pCurrBlock.
    5489  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5490 
    5491  // We no longer have an empty Allocation.
    5492  if(pCurrBlock->m_Metadata.IsEmpty())
    5493  {
    5494  m_HasEmptyBlock = false;
    5495  }
    5496 
    5497  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5498  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5499  (*pAllocation)->InitBlockAllocation(
    5500  hCurrentPool,
    5501  pCurrBlock,
    5502  currRequest.offset,
    5503  vkMemReq.alignment,
    5504  vkMemReq.size,
    5505  suballocType,
    5506  createInfo.pUserData,
    5507  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5508  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5509  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5510  return VK_SUCCESS;
    5511  }
    5512  }
    5513 
    5514  const bool canCreateNewBlock =
    5515  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5516  (m_Blocks.size() < m_MaxBlockCount);
    5517 
    5518  // 2. Try to create new block.
    5519  if(canCreateNewBlock)
    5520  {
    5521  // 2.1. Start with full preferredBlockSize.
    5522  VkDeviceSize blockSize = m_PreferredBlockSize;
    5523  size_t newBlockIndex = 0;
    5524  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5525  // Allocating blocks of other sizes is allowed only in default pools.
    5526  // In custom pools block size is fixed.
    5527  if(res < 0 && m_IsCustomPool == false)
    5528  {
    5529  // 2.2. Try half the size.
    5530  blockSize /= 2;
    5531  if(blockSize >= vkMemReq.size)
    5532  {
    5533  res = CreateBlock(blockSize, &newBlockIndex);
    5534  if(res < 0)
    5535  {
    5536  // 2.3. Try quarter the size.
    5537  blockSize /= 2;
    5538  if(blockSize >= vkMemReq.size)
    5539  {
    5540  res = CreateBlock(blockSize, &newBlockIndex);
    5541  }
    5542  }
    5543  }
    5544  }
    5545  if(res == VK_SUCCESS)
    5546  {
    5547  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5548  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5549 
    5550  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5551  VmaAllocationRequest allocRequest;
    5552  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5554  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5555  (*pAllocation)->InitBlockAllocation(
    5556  hCurrentPool,
    5557  pBlock,
    5558  allocRequest.offset,
    5559  vkMemReq.alignment,
    5560  vkMemReq.size,
    5561  suballocType,
    5562  createInfo.pUserData,
    5563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5564  VMA_HEAVY_ASSERT(pBlock->Validate());
    5565  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5566 
    5567  return VK_SUCCESS;
    5568  }
    5569  }
    5570 
    5571  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5572 
    5573  // 3. Try to allocate from existing blocks with making other allocations lost.
    5574  if(canMakeOtherLost)
    5575  {
    5576  uint32_t tryIndex = 0;
    5577  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5578  {
    5579  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5580  VmaAllocationRequest bestRequest = {};
    5581  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5582 
    5583  // 1. Search existing allocations.
    5584  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5585  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5586  {
    5587  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5588  VMA_ASSERT(pCurrBlock);
    5589  VmaAllocationRequest currRequest = {};
    5590  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5591  currentFrameIndex,
    5592  m_FrameInUseCount,
    5593  m_BufferImageGranularity,
    5594  vkMemReq.size,
    5595  vkMemReq.alignment,
    5596  suballocType,
    5597  canMakeOtherLost,
    5598  &currRequest))
    5599  {
    5600  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5601  if(pBestRequestBlock == VMA_NULL ||
    5602  currRequestCost < bestRequestCost)
    5603  {
    5604  pBestRequestBlock = pCurrBlock;
    5605  bestRequest = currRequest;
    5606  bestRequestCost = currRequestCost;
    5607 
    5608  if(bestRequestCost == 0)
    5609  {
    5610  break;
    5611  }
    5612  }
    5613  }
    5614  }
    5615 
    5616  if(pBestRequestBlock != VMA_NULL)
    5617  {
    5618  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5619  currentFrameIndex,
    5620  m_FrameInUseCount,
    5621  &bestRequest))
    5622  {
    5623  // We no longer have an empty Allocation.
    5624  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5625  {
    5626  m_HasEmptyBlock = false;
    5627  }
    5628  // Allocate from this pBlock.
    5629  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5630  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5631  (*pAllocation)->InitBlockAllocation(
    5632  hCurrentPool,
    5633  pBestRequestBlock,
    5634  bestRequest.offset,
    5635  vkMemReq.alignment,
    5636  vkMemReq.size,
    5637  suballocType,
    5638  createInfo.pUserData,
    5639  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5640  VMA_HEAVY_ASSERT(pBlock->Validate());
    5641  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5642  return VK_SUCCESS;
    5643  }
    5644  // else: Some allocations must have been touched while we are here. Next try.
    5645  }
    5646  else
    5647  {
    5648  // Could not find place in any of the blocks - break outer loop.
    5649  break;
    5650  }
    5651  }
    5652  /* Maximum number of tries exceeded - a very unlike event when many other
    5653  threads are simultaneously touching allocations making it impossible to make
    5654  lost at the same time as we try to allocate. */
    5655  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5656  {
    5657  return VK_ERROR_TOO_MANY_OBJECTS;
    5658  }
    5659  }
    5660 
    5661  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5662 }
    5663 
    5664 void VmaBlockVector::Free(
    5665  VmaAllocation hAllocation)
    5666 {
    5667  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5668 
    5669  // Scope for lock.
    5670  {
    5671  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5672 
    5673  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5674 
    5675  pBlock->m_Metadata.Free(hAllocation);
    5676  VMA_HEAVY_ASSERT(pBlock->Validate());
    5677 
    5678  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5679 
    5680  // pBlock became empty after this deallocation.
    5681  if(pBlock->m_Metadata.IsEmpty())
    5682  {
    5683  // Already has empty Allocation. We don't want to have two, so delete this one.
    5684  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5685  {
    5686  pBlockToDelete = pBlock;
    5687  Remove(pBlock);
    5688  }
    5689  // We now have first empty Allocation.
    5690  else
    5691  {
    5692  m_HasEmptyBlock = true;
    5693  }
    5694  }
    5695  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5696  // (This is optional, heuristics.)
    5697  else if(m_HasEmptyBlock)
    5698  {
    5699  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5700  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5701  {
    5702  pBlockToDelete = pLastBlock;
    5703  m_Blocks.pop_back();
    5704  m_HasEmptyBlock = false;
    5705  }
    5706  }
    5707 
    5708  IncrementallySortBlocks();
    5709  }
    5710 
    5711  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5712  // lock, for performance reason.
    5713  if(pBlockToDelete != VMA_NULL)
    5714  {
    5715  VMA_DEBUG_LOG(" Deleted empty allocation");
    5716  pBlockToDelete->Destroy(m_hAllocator);
    5717  vma_delete(m_hAllocator, pBlockToDelete);
    5718  }
    5719 }
    5720 
    5721 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5722 {
    5723  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5724  {
    5725  if(m_Blocks[blockIndex] == pBlock)
    5726  {
    5727  VmaVectorRemove(m_Blocks, blockIndex);
    5728  return;
    5729  }
    5730  }
    5731  VMA_ASSERT(0);
    5732 }
    5733 
    5734 void VmaBlockVector::IncrementallySortBlocks()
    5735 {
    5736  // Bubble sort only until first swap.
    5737  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5738  {
    5739  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5740  {
    5741  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5742  return;
    5743  }
    5744  }
    5745 }
    5746 
    5747 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5748 {
    5749  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5750  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5751  allocInfo.allocationSize = blockSize;
    5752  VkDeviceMemory mem = VK_NULL_HANDLE;
    5753  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5754  if(res < 0)
    5755  {
    5756  return res;
    5757  }
    5758 
    5759  // New VkDeviceMemory successfully created.
    5760 
    5761  // Map memory if needed.
    5762  void* pMappedData = VMA_NULL;
    5763  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5764  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5765  {
    5766  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5767  m_hAllocator->m_hDevice,
    5768  mem,
    5769  0,
    5770  VK_WHOLE_SIZE,
    5771  0,
    5772  &pMappedData);
    5773  if(res < 0)
    5774  {
    5775  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5776  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5777  return res;
    5778  }
    5779  }
    5780 
    5781  // Create new Allocation for it.
    5782  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5783  pBlock->Init(
    5784  m_MemoryTypeIndex,
    5785  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5786  mem,
    5787  allocInfo.allocationSize,
    5788  persistentMap,
    5789  pMappedData);
    5790 
    5791  m_Blocks.push_back(pBlock);
    5792  if(pNewBlockIndex != VMA_NULL)
    5793  {
    5794  *pNewBlockIndex = m_Blocks.size() - 1;
    5795  }
    5796 
    5797  return VK_SUCCESS;
    5798 }
    5799 
    5800 #if VMA_STATS_STRING_ENABLED
    5801 
    5802 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5803 {
    5804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5805 
    5806  json.BeginObject();
    5807 
    5808  if(m_IsCustomPool)
    5809  {
    5810  json.WriteString("MemoryTypeIndex");
    5811  json.WriteNumber(m_MemoryTypeIndex);
    5812 
    5813  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5814  {
    5815  json.WriteString("Mapped");
    5816  json.WriteBool(true);
    5817  }
    5818 
    5819  json.WriteString("BlockSize");
    5820  json.WriteNumber(m_PreferredBlockSize);
    5821 
    5822  json.WriteString("BlockCount");
    5823  json.BeginObject(true);
    5824  if(m_MinBlockCount > 0)
    5825  {
    5826  json.WriteString("Min");
    5827  json.WriteNumber(m_MinBlockCount);
    5828  }
    5829  if(m_MaxBlockCount < SIZE_MAX)
    5830  {
    5831  json.WriteString("Max");
    5832  json.WriteNumber(m_MaxBlockCount);
    5833  }
    5834  json.WriteString("Cur");
    5835  json.WriteNumber(m_Blocks.size());
    5836  json.EndObject();
    5837 
    5838  if(m_FrameInUseCount > 0)
    5839  {
    5840  json.WriteString("FrameInUseCount");
    5841  json.WriteNumber(m_FrameInUseCount);
    5842  }
    5843  }
    5844  else
    5845  {
    5846  json.WriteString("PreferredBlockSize");
    5847  json.WriteNumber(m_PreferredBlockSize);
    5848  }
    5849 
    5850  json.WriteString("Blocks");
    5851  json.BeginArray();
    5852  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5853  {
    5854  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5855  }
    5856  json.EndArray();
    5857 
    5858  json.EndObject();
    5859 }
    5860 
    5861 #endif // #if VMA_STATS_STRING_ENABLED
    5862 
    5863 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5864 {
    5865  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5866 
    5867  for(size_t i = m_Blocks.size(); i--; )
    5868  {
    5869  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5870  if(pBlock->m_pMappedData != VMA_NULL)
    5871  {
    5872  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5873  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5874  pBlock->m_pMappedData = VMA_NULL;
    5875  }
    5876  }
    5877 }
    5878 
    5879 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5880 {
    5881  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5882 
    5883  VkResult finalResult = VK_SUCCESS;
    5884  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5885  {
    5886  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5887  if(pBlock->m_PersistentMap)
    5888  {
    5889  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5890  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5891  m_hAllocator->m_hDevice,
    5892  pBlock->m_hMemory,
    5893  0,
    5894  VK_WHOLE_SIZE,
    5895  0,
    5896  &pBlock->m_pMappedData);
    5897  if(localResult != VK_SUCCESS)
    5898  {
    5899  finalResult = localResult;
    5900  }
    5901  }
    5902  }
    5903  return finalResult;
    5904 }
    5905 
    5906 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5907  VmaAllocator hAllocator,
    5908  uint32_t currentFrameIndex)
    5909 {
    5910  if(m_pDefragmentator == VMA_NULL)
    5911  {
    5912  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5913  hAllocator,
    5914  this,
    5915  currentFrameIndex);
    5916  }
    5917 
    5918  return m_pDefragmentator;
    5919 }
    5920 
    5921 VkResult VmaBlockVector::Defragment(
    5922  VmaDefragmentationStats* pDefragmentationStats,
    5923  VkDeviceSize& maxBytesToMove,
    5924  uint32_t& maxAllocationsToMove)
    5925 {
    5926  if(m_pDefragmentator == VMA_NULL)
    5927  {
    5928  return VK_SUCCESS;
    5929  }
    5930 
    5931  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5932 
    5933  // Defragment.
    5934  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5935 
    5936  // Accumulate statistics.
    5937  if(pDefragmentationStats != VMA_NULL)
    5938  {
    5939  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5940  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5941  pDefragmentationStats->bytesMoved += bytesMoved;
    5942  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5943  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5944  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5945  maxBytesToMove -= bytesMoved;
    5946  maxAllocationsToMove -= allocationsMoved;
    5947  }
    5948 
    5949  // Free empty blocks.
    5950  m_HasEmptyBlock = false;
    5951  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5952  {
    5953  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5954  if(pBlock->m_Metadata.IsEmpty())
    5955  {
    5956  if(m_Blocks.size() > m_MinBlockCount)
    5957  {
    5958  if(pDefragmentationStats != VMA_NULL)
    5959  {
    5960  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5961  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5962  }
    5963 
    5964  VmaVectorRemove(m_Blocks, blockIndex);
    5965  pBlock->Destroy(m_hAllocator);
    5966  vma_delete(m_hAllocator, pBlock);
    5967  }
    5968  else
    5969  {
    5970  m_HasEmptyBlock = true;
    5971  }
    5972  }
    5973  }
    5974 
    5975  return result;
    5976 }
    5977 
    5978 void VmaBlockVector::DestroyDefragmentator()
    5979 {
    5980  if(m_pDefragmentator != VMA_NULL)
    5981  {
    5982  vma_delete(m_hAllocator, m_pDefragmentator);
    5983  m_pDefragmentator = VMA_NULL;
    5984  }
    5985 }
    5986 
    5987 void VmaBlockVector::MakePoolAllocationsLost(
    5988  uint32_t currentFrameIndex,
    5989  size_t* pLostAllocationCount)
    5990 {
    5991  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5992 
    5993  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5994  {
    5995  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5996  VMA_ASSERT(pBlock);
    5997  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5998  }
    5999 }
    6000 
    6001 void VmaBlockVector::AddStats(VmaStats* pStats)
    6002 {
    6003  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    6004  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    6005 
    6006  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6007 
    6008  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6009  {
    6010  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6011  VMA_ASSERT(pBlock);
    6012  VMA_HEAVY_ASSERT(pBlock->Validate());
    6013  VmaStatInfo allocationStatInfo;
    6014  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6015  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6016  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6017  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6018  }
    6019 }
    6020 
    6022 // VmaDefragmentator members definition
    6023 
    6024 VmaDefragmentator::VmaDefragmentator(
    6025  VmaAllocator hAllocator,
    6026  VmaBlockVector* pBlockVector,
    6027  uint32_t currentFrameIndex) :
    6028  m_hAllocator(hAllocator),
    6029  m_pBlockVector(pBlockVector),
    6030  m_CurrentFrameIndex(currentFrameIndex),
    6031  m_BytesMoved(0),
    6032  m_AllocationsMoved(0),
    6033  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6034  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6035 {
    6036 }
    6037 
    6038 VmaDefragmentator::~VmaDefragmentator()
    6039 {
    6040  for(size_t i = m_Blocks.size(); i--; )
    6041  {
    6042  vma_delete(m_hAllocator, m_Blocks[i]);
    6043  }
    6044 }
    6045 
    6046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6047 {
    6048  AllocationInfo allocInfo;
    6049  allocInfo.m_hAllocation = hAlloc;
    6050  allocInfo.m_pChanged = pChanged;
    6051  m_Allocations.push_back(allocInfo);
    6052 }
    6053 
    6054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6055 {
    6056  // It has already been mapped for defragmentation.
    6057  if(m_pMappedDataForDefragmentation)
    6058  {
    6059  *ppMappedData = m_pMappedDataForDefragmentation;
    6060  return VK_SUCCESS;
    6061  }
    6062 
    6063  // It is persistently mapped.
    6064  if(m_pBlock->m_PersistentMap)
    6065  {
    6066  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    6067  *ppMappedData = m_pBlock->m_pMappedData;
    6068  return VK_SUCCESS;
    6069  }
    6070 
    6071  // Map on first usage.
    6072  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6073  hAllocator->m_hDevice,
    6074  m_pBlock->m_hMemory,
    6075  0,
    6076  VK_WHOLE_SIZE,
    6077  0,
    6078  &m_pMappedDataForDefragmentation);
    6079  *ppMappedData = m_pMappedDataForDefragmentation;
    6080  return res;
    6081 }
    6082 
    6083 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6084 {
    6085  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6086  {
    6087  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    6088  }
    6089 }
    6090 
    6091 VkResult VmaDefragmentator::DefragmentRound(
    6092  VkDeviceSize maxBytesToMove,
    6093  uint32_t maxAllocationsToMove)
    6094 {
    6095  if(m_Blocks.empty())
    6096  {
    6097  return VK_SUCCESS;
    6098  }
    6099 
    6100  size_t srcBlockIndex = m_Blocks.size() - 1;
    6101  size_t srcAllocIndex = SIZE_MAX;
    6102  for(;;)
    6103  {
    6104  // 1. Find next allocation to move.
    6105  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6106  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6107  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6108  {
    6109  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6110  {
    6111  // Finished: no more allocations to process.
    6112  if(srcBlockIndex == 0)
    6113  {
    6114  return VK_SUCCESS;
    6115  }
    6116  else
    6117  {
    6118  --srcBlockIndex;
    6119  srcAllocIndex = SIZE_MAX;
    6120  }
    6121  }
    6122  else
    6123  {
    6124  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6125  }
    6126  }
    6127 
    6128  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6129  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6130 
    6131  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6132  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6133  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6134  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6135 
    6136  // 2. Try to find new place for this allocation in preceding or current block.
    6137  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6138  {
    6139  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6140  VmaAllocationRequest dstAllocRequest;
    6141  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6142  m_CurrentFrameIndex,
    6143  m_pBlockVector->GetFrameInUseCount(),
    6144  m_pBlockVector->GetBufferImageGranularity(),
    6145  size,
    6146  alignment,
    6147  suballocType,
    6148  false, // canMakeOtherLost
    6149  &dstAllocRequest) &&
    6150  MoveMakesSense(
    6151  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6152  {
    6153  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6154 
    6155  // Reached limit on number of allocations or bytes to move.
    6156  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6157  (m_BytesMoved + size > maxBytesToMove))
    6158  {
    6159  return VK_INCOMPLETE;
    6160  }
    6161 
    6162  void* pDstMappedData = VMA_NULL;
    6163  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6164  if(res != VK_SUCCESS)
    6165  {
    6166  return res;
    6167  }
    6168 
    6169  void* pSrcMappedData = VMA_NULL;
    6170  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6171  if(res != VK_SUCCESS)
    6172  {
    6173  return res;
    6174  }
    6175 
    6176  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6177  memcpy(
    6178  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6179  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6180  static_cast<size_t>(size));
    6181 
    6182  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6183  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6184 
    6185  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6186 
    6187  if(allocInfo.m_pChanged != VMA_NULL)
    6188  {
    6189  *allocInfo.m_pChanged = VK_TRUE;
    6190  }
    6191 
    6192  ++m_AllocationsMoved;
    6193  m_BytesMoved += size;
    6194 
    6195  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6196 
    6197  break;
    6198  }
    6199  }
    6200 
    6201  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6202 
    6203  if(srcAllocIndex > 0)
    6204  {
    6205  --srcAllocIndex;
    6206  }
    6207  else
    6208  {
    6209  if(srcBlockIndex > 0)
    6210  {
    6211  --srcBlockIndex;
    6212  srcAllocIndex = SIZE_MAX;
    6213  }
    6214  else
    6215  {
    6216  return VK_SUCCESS;
    6217  }
    6218  }
    6219  }
    6220 }
    6221 
    6222 VkResult VmaDefragmentator::Defragment(
    6223  VkDeviceSize maxBytesToMove,
    6224  uint32_t maxAllocationsToMove)
    6225 {
    6226  if(m_Allocations.empty())
    6227  {
    6228  return VK_SUCCESS;
    6229  }
    6230 
    6231  // Create block info for each block.
    6232  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6233  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6234  {
    6235  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6236  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6237  m_Blocks.push_back(pBlockInfo);
    6238  }
    6239 
    6240  // Sort them by m_pBlock pointer value.
    6241  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6242 
    6243  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6244  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6245  {
    6246  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6247  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6248  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6249  {
    6250  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6251  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6252  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6253  {
    6254  (*it)->m_Allocations.push_back(allocInfo);
    6255  }
    6256  else
    6257  {
    6258  VMA_ASSERT(0);
    6259  }
    6260  }
    6261  }
    6262  m_Allocations.clear();
    6263 
    6264  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6265  {
    6266  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6267  pBlockInfo->CalcHasNonMovableAllocations();
    6268  pBlockInfo->SortAllocationsBySizeDescecnding();
    6269  }
    6270 
    6271  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6272  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6273 
    6274  // Execute defragmentation rounds (the main part).
    6275  VkResult result = VK_SUCCESS;
    6276  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6277  {
    6278  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6279  }
    6280 
    6281  // Unmap blocks that were mapped for defragmentation.
    6282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6283  {
    6284  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6285  }
    6286 
    6287  return result;
    6288 }
    6289 
    6290 bool VmaDefragmentator::MoveMakesSense(
    6291  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6292  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6293 {
    6294  if(dstBlockIndex < srcBlockIndex)
    6295  {
    6296  return true;
    6297  }
    6298  if(dstBlockIndex > srcBlockIndex)
    6299  {
    6300  return false;
    6301  }
    6302  if(dstOffset < srcOffset)
    6303  {
    6304  return true;
    6305  }
    6306  return false;
    6307 }
    6308 
    6310 // VmaAllocator_T
    6311 
    6312 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6313  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6314  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6315  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6316  m_hDevice(pCreateInfo->device),
    6317  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6318  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6319  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6320  m_UnmapPersistentlyMappedMemoryCounter(0),
    6321  m_PreferredLargeHeapBlockSize(0),
    6322  m_PreferredSmallHeapBlockSize(0),
    6323  m_CurrentFrameIndex(0),
    6324  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6325 {
    6326  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6327 
    6328  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6329  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6330  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6331 
    6332  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6333  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6334 
    6335  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6336  {
    6337  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6338  }
    6339 
    6340  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6341  {
    6342  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6343  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6344  }
    6345 
    6346  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6347 
    6348  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6349  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6350 
    6351  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6352  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6353  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6354  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6355 
    6356  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6357  {
    6358  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6359  {
    6360  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6361  if(limit != VK_WHOLE_SIZE)
    6362  {
    6363  m_HeapSizeLimit[heapIndex] = limit;
    6364  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6365  {
    6366  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6367  }
    6368  }
    6369  }
    6370  }
    6371 
    6372  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6373  {
    6374  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6375 
    6376  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6377  {
    6378  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6379  this,
    6380  memTypeIndex,
    6381  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6382  preferredBlockSize,
    6383  0,
    6384  SIZE_MAX,
    6385  GetBufferImageGranularity(),
    6386  pCreateInfo->frameInUseCount,
    6387  false); // isCustomPool
    6388  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6389  // becase minBlockCount is 0.
    6390  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6391  }
    6392  }
    6393 }
    6394 
    6395 VmaAllocator_T::~VmaAllocator_T()
    6396 {
    6397  VMA_ASSERT(m_Pools.empty());
    6398 
    6399  for(size_t i = GetMemoryTypeCount(); i--; )
    6400  {
    6401  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6402  {
    6403  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6404  vma_delete(this, m_pBlockVectors[i][j]);
    6405  }
    6406  }
    6407 }
    6408 
    6409 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6410 {
    6411 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6412  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6413  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6414  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6415  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6416  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6417  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6418  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6419  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6420  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6421  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6422  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6423  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6424  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6425  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6426  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6427  // Ignoring vkGetImageMemoryRequirements2KHR.
    6428 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6429 
    6430 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6431  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6432 
    6433  if(pVulkanFunctions != VMA_NULL)
    6434  {
    6435  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6436  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6437  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6438  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6439  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6440  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6441  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6442  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6443  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6444  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6445  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6446  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6447  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6448  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6449  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6450  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6451  }
    6452 
    6453 #undef VMA_COPY_IF_NOT_NULL
    6454 
    6455  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6456  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6457  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6458  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6459  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6460  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6461  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6462  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6463  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6464  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6465  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6466  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6467  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6468  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6469  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6470  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6471  if(m_UseKhrDedicatedAllocation)
    6472  {
    6473  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6474  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6475  }
    6476 }
    6477 
    6478 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6479 {
    6480  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6481  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6482  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6483  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6484 }
    6485 
    6486 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6487  const VkMemoryRequirements& vkMemReq,
    6488  bool dedicatedAllocation,
    6489  VkBuffer dedicatedBuffer,
    6490  VkImage dedicatedImage,
    6491  const VmaAllocationCreateInfo& createInfo,
    6492  uint32_t memTypeIndex,
    6493  VmaSuballocationType suballocType,
    6494  VmaAllocation* pAllocation)
    6495 {
    6496  VMA_ASSERT(pAllocation != VMA_NULL);
    6497  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6498 
    6499  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6500  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6501  VMA_ASSERT(blockVector);
    6502 
    6503  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6504 
    6505  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6506  bool preferDedicatedMemory =
    6507  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6508  dedicatedAllocation ||
    6509  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6510  vkMemReq.size > preferredBlockSize / 2;
    6511 
    6512  if(preferDedicatedMemory &&
    6513  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6514  finalCreateInfo.pool == VK_NULL_HANDLE)
    6515  {
    6517  }
    6518 
    6519  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6520  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6521  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6522  {
    6523  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6524  }
    6525 
    6526  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6527  {
    6528  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6529  {
    6530  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6531  }
    6532  else
    6533  {
    6534  return AllocateDedicatedMemory(
    6535  vkMemReq.size,
    6536  suballocType,
    6537  memTypeIndex,
    6538  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6539  finalCreateInfo.pUserData,
    6540  dedicatedBuffer,
    6541  dedicatedImage,
    6542  pAllocation);
    6543  }
    6544  }
    6545  else
    6546  {
    6547  VkResult res = blockVector->Allocate(
    6548  VK_NULL_HANDLE, // hCurrentPool
    6549  m_CurrentFrameIndex.load(),
    6550  vkMemReq,
    6551  finalCreateInfo,
    6552  suballocType,
    6553  pAllocation);
    6554  if(res == VK_SUCCESS)
    6555  {
    6556  return res;
    6557  }
    6558 
    6559  // 5. Try dedicated memory.
    6560  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6561  {
    6562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6563  }
    6564  else
    6565  {
    6566  res = AllocateDedicatedMemory(
    6567  vkMemReq.size,
    6568  suballocType,
    6569  memTypeIndex,
    6570  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6571  finalCreateInfo.pUserData,
    6572  dedicatedBuffer,
    6573  dedicatedImage,
    6574  pAllocation);
    6575  if(res == VK_SUCCESS)
    6576  {
    6577  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6578  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6579  return VK_SUCCESS;
    6580  }
    6581  else
    6582  {
    6583  // Everything failed: Return error code.
    6584  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6585  return res;
    6586  }
    6587  }
    6588  }
    6589 }
    6590 
    6591 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6592  VkDeviceSize size,
    6593  VmaSuballocationType suballocType,
    6594  uint32_t memTypeIndex,
    6595  bool map,
    6596  void* pUserData,
    6597  VkBuffer dedicatedBuffer,
    6598  VkImage dedicatedImage,
    6599  VmaAllocation* pAllocation)
    6600 {
    6601  VMA_ASSERT(pAllocation);
    6602 
    6603  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6604  allocInfo.memoryTypeIndex = memTypeIndex;
    6605  allocInfo.allocationSize = size;
    6606 
    6607  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6608  if(m_UseKhrDedicatedAllocation)
    6609  {
    6610  if(dedicatedBuffer != VK_NULL_HANDLE)
    6611  {
    6612  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6613  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6614  allocInfo.pNext = &dedicatedAllocInfo;
    6615  }
    6616  else if(dedicatedImage != VK_NULL_HANDLE)
    6617  {
    6618  dedicatedAllocInfo.image = dedicatedImage;
    6619  allocInfo.pNext = &dedicatedAllocInfo;
    6620  }
    6621  }
    6622 
    6623  // Allocate VkDeviceMemory.
    6624  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6625  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6626  if(res < 0)
    6627  {
    6628  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6629  return res;
    6630  }
    6631 
    6632  void* pMappedData = nullptr;
    6633  if(map)
    6634  {
    6635  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6636  {
    6637  res = (*m_VulkanFunctions.vkMapMemory)(
    6638  m_hDevice,
    6639  hMemory,
    6640  0,
    6641  VK_WHOLE_SIZE,
    6642  0,
    6643  &pMappedData);
    6644  if(res < 0)
    6645  {
    6646  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6647  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6648  return res;
    6649  }
    6650  }
    6651  }
    6652 
    6653  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6654  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6655 
    6656  // Register it in m_pDedicatedAllocations.
    6657  {
    6658  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6659  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6660  VMA_ASSERT(pDedicatedAllocations);
    6661  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6662  }
    6663 
    6664  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6665 
    6666  return VK_SUCCESS;
    6667 }
    6668 
    6669 void VmaAllocator_T::GetBufferMemoryRequirements(
    6670  VkBuffer hBuffer,
    6671  VkMemoryRequirements& memReq,
    6672  bool& requiresDedicatedAllocation,
    6673  bool& prefersDedicatedAllocation) const
    6674 {
    6675  if(m_UseKhrDedicatedAllocation)
    6676  {
    6677  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6678  memReqInfo.buffer = hBuffer;
    6679 
    6680  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6681 
    6682  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6683  memReq2.pNext = &memDedicatedReq;
    6684 
    6685  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6686 
    6687  memReq = memReq2.memoryRequirements;
    6688  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6689  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6690  }
    6691  else
    6692  {
    6693  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6694  requiresDedicatedAllocation = false;
    6695  prefersDedicatedAllocation = false;
    6696  }
    6697 }
    6698 
    6699 void VmaAllocator_T::GetImageMemoryRequirements(
    6700  VkImage hImage,
    6701  VkMemoryRequirements& memReq,
    6702  bool& requiresDedicatedAllocation,
    6703  bool& prefersDedicatedAllocation) const
    6704 {
    6705  if(m_UseKhrDedicatedAllocation)
    6706  {
    6707  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6708  memReqInfo.image = hImage;
    6709 
    6710  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6711 
    6712  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6713  memReq2.pNext = &memDedicatedReq;
    6714 
    6715  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6716 
    6717  memReq = memReq2.memoryRequirements;
    6718  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6719  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6720  }
    6721  else
    6722  {
    6723  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6724  requiresDedicatedAllocation = false;
    6725  prefersDedicatedAllocation = false;
    6726  }
    6727 }
    6728 
    6729 VkResult VmaAllocator_T::AllocateMemory(
    6730  const VkMemoryRequirements& vkMemReq,
    6731  bool requiresDedicatedAllocation,
    6732  bool prefersDedicatedAllocation,
    6733  VkBuffer dedicatedBuffer,
    6734  VkImage dedicatedImage,
    6735  const VmaAllocationCreateInfo& createInfo,
    6736  VmaSuballocationType suballocType,
    6737  VmaAllocation* pAllocation)
    6738 {
    6739  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6740  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6741  {
    6742  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6743  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6744  }
    6745  if(requiresDedicatedAllocation)
    6746  {
    6747  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6748  {
    6749  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6750  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6751  }
    6752  if(createInfo.pool != VK_NULL_HANDLE)
    6753  {
    6754  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6755  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6756  }
    6757  }
    6758  if((createInfo.pool != VK_NULL_HANDLE) &&
    6759  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6760  {
    6761  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6763  }
    6764 
    6765  if(createInfo.pool != VK_NULL_HANDLE)
    6766  {
    6767  return createInfo.pool->m_BlockVector.Allocate(
    6768  createInfo.pool,
    6769  m_CurrentFrameIndex.load(),
    6770  vkMemReq,
    6771  createInfo,
    6772  suballocType,
    6773  pAllocation);
    6774  }
    6775  else
    6776  {
    6777  // Bit mask of memory Vulkan types acceptable for this allocation.
    6778  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6779  uint32_t memTypeIndex = UINT32_MAX;
    6780  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6781  if(res == VK_SUCCESS)
    6782  {
    6783  res = AllocateMemoryOfType(
    6784  vkMemReq,
    6785  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6786  dedicatedBuffer,
    6787  dedicatedImage,
    6788  createInfo,
    6789  memTypeIndex,
    6790  suballocType,
    6791  pAllocation);
    6792  // Succeeded on first try.
    6793  if(res == VK_SUCCESS)
    6794  {
    6795  return res;
    6796  }
    6797  // Allocation from this memory type failed. Try other compatible memory types.
    6798  else
    6799  {
    6800  for(;;)
    6801  {
    6802  // Remove old memTypeIndex from list of possibilities.
    6803  memoryTypeBits &= ~(1u << memTypeIndex);
    6804  // Find alternative memTypeIndex.
    6805  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6806  if(res == VK_SUCCESS)
    6807  {
    6808  res = AllocateMemoryOfType(
    6809  vkMemReq,
    6810  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6811  dedicatedBuffer,
    6812  dedicatedImage,
    6813  createInfo,
    6814  memTypeIndex,
    6815  suballocType,
    6816  pAllocation);
    6817  // Allocation from this alternative memory type succeeded.
    6818  if(res == VK_SUCCESS)
    6819  {
    6820  return res;
    6821  }
    6822  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6823  }
    6824  // No other matching memory type index could be found.
    6825  else
    6826  {
    6827  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6828  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6829  }
    6830  }
    6831  }
    6832  }
    6833  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6834  else
    6835  return res;
    6836  }
    6837 }
    6838 
    6839 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6840 {
    6841  VMA_ASSERT(allocation);
    6842 
    6843  if(allocation->CanBecomeLost() == false ||
    6844  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6845  {
    6846  switch(allocation->GetType())
    6847  {
    6848  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6849  {
    6850  VmaBlockVector* pBlockVector = VMA_NULL;
    6851  VmaPool hPool = allocation->GetPool();
    6852  if(hPool != VK_NULL_HANDLE)
    6853  {
    6854  pBlockVector = &hPool->m_BlockVector;
    6855  }
    6856  else
    6857  {
    6858  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6859  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6860  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6861  }
    6862  pBlockVector->Free(allocation);
    6863  }
    6864  break;
    6865  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6866  FreeDedicatedMemory(allocation);
    6867  break;
    6868  default:
    6869  VMA_ASSERT(0);
    6870  }
    6871  }
    6872 
    6873  vma_delete(this, allocation);
    6874 }
    6875 
    6876 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6877 {
    6878  // Initialize.
    6879  InitStatInfo(pStats->total);
    6880  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6881  InitStatInfo(pStats->memoryType[i]);
    6882  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6883  InitStatInfo(pStats->memoryHeap[i]);
    6884 
    6885  // Process default pools.
    6886  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6887  {
    6888  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6889  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6890  {
    6891  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6892  VMA_ASSERT(pBlockVector);
    6893  pBlockVector->AddStats(pStats);
    6894  }
    6895  }
    6896 
    6897  // Process custom pools.
    6898  {
    6899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6900  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6901  {
    6902  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6903  }
    6904  }
    6905 
    6906  // Process dedicated allocations.
    6907  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6908  {
    6909  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6910  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6911  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6912  {
    6913  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6914  VMA_ASSERT(pDedicatedAllocVector);
    6915  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6916  {
    6917  VmaStatInfo allocationStatInfo;
    6918  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6919  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6920  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6921  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6922  }
    6923  }
    6924  }
    6925 
    6926  // Postprocess.
    6927  VmaPostprocessCalcStatInfo(pStats->total);
    6928  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6929  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6930  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6931  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6932 }
    6933 
    6934 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6935 
    6936 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6937 {
    6938  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6939  {
    6940  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6941  {
    6942  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6943  {
    6944  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6945  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6946  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6947  {
    6948  // Process DedicatedAllocations.
    6949  {
    6950  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6951  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6952  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6953  {
    6954  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6955  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6956  }
    6957  }
    6958 
    6959  // Process normal Allocations.
    6960  {
    6961  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6962  pBlockVector->UnmapPersistentlyMappedMemory();
    6963  }
    6964  }
    6965  }
    6966 
    6967  // Process custom pools.
    6968  {
    6969  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6970  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6971  {
    6972  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6973  }
    6974  }
    6975  }
    6976  }
    6977 }
    6978 
    6979 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6980 {
    6981  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6982  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6983  {
    6984  VkResult finalResult = VK_SUCCESS;
    6985  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6986  {
    6987  // Process custom pools.
    6988  {
    6989  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6990  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6991  {
    6992  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6993  }
    6994  }
    6995 
    6996  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6997  {
    6998  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6999  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    7000  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7001  {
    7002  // Process DedicatedAllocations.
    7003  {
    7004  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7005  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    7006  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    7007  {
    7008  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    7009  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    7010  }
    7011  }
    7012 
    7013  // Process normal Allocations.
    7014  {
    7015  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    7016  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    7017  if(localResult != VK_SUCCESS)
    7018  {
    7019  finalResult = localResult;
    7020  }
    7021  }
    7022  }
    7023  }
    7024  }
    7025  return finalResult;
    7026  }
    7027  else
    7028  return VK_SUCCESS;
    7029 }
    7030 
    7031 VkResult VmaAllocator_T::Defragment(
    7032  VmaAllocation* pAllocations,
    7033  size_t allocationCount,
    7034  VkBool32* pAllocationsChanged,
    7035  const VmaDefragmentationInfo* pDefragmentationInfo,
    7036  VmaDefragmentationStats* pDefragmentationStats)
    7037 {
    7038  if(pAllocationsChanged != VMA_NULL)
    7039  {
    7040  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7041  }
    7042  if(pDefragmentationStats != VMA_NULL)
    7043  {
    7044  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7045  }
    7046 
    7047  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    7048  {
    7049  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    7050  return VK_ERROR_MEMORY_MAP_FAILED;
    7051  }
    7052 
    7053  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7054 
    7055  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7056 
    7057  const size_t poolCount = m_Pools.size();
    7058 
    7059  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7060  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7061  {
    7062  VmaAllocation hAlloc = pAllocations[allocIndex];
    7063  VMA_ASSERT(hAlloc);
    7064  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7065  // DedicatedAlloc cannot be defragmented.
    7066  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7067  // Only HOST_VISIBLE memory types can be defragmented.
    7068  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7069  // Lost allocation cannot be defragmented.
    7070  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7071  {
    7072  VmaBlockVector* pAllocBlockVector = nullptr;
    7073 
    7074  const VmaPool hAllocPool = hAlloc->GetPool();
    7075  // This allocation belongs to custom pool.
    7076  if(hAllocPool != VK_NULL_HANDLE)
    7077  {
    7078  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7079  }
    7080  // This allocation belongs to general pool.
    7081  else
    7082  {
    7083  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    7084  }
    7085 
    7086  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7087 
    7088  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7089  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7090  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7091  }
    7092  }
    7093 
    7094  VkResult result = VK_SUCCESS;
    7095 
    7096  // ======== Main processing.
    7097 
    7098  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7099  uint32_t maxAllocationsToMove = UINT32_MAX;
    7100  if(pDefragmentationInfo != VMA_NULL)
    7101  {
    7102  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7103  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7104  }
    7105 
    7106  // Process standard memory.
    7107  for(uint32_t memTypeIndex = 0;
    7108  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7109  ++memTypeIndex)
    7110  {
    7111  // Only HOST_VISIBLE memory types can be defragmented.
    7112  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7113  {
    7114  for(uint32_t blockVectorType = 0;
    7115  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    7116  ++blockVectorType)
    7117  {
    7118  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    7119  pDefragmentationStats,
    7120  maxBytesToMove,
    7121  maxAllocationsToMove);
    7122  }
    7123  }
    7124  }
    7125 
    7126  // Process custom pools.
    7127  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7128  {
    7129  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7130  pDefragmentationStats,
    7131  maxBytesToMove,
    7132  maxAllocationsToMove);
    7133  }
    7134 
    7135  // ======== Destroy defragmentators.
    7136 
    7137  // Process custom pools.
    7138  for(size_t poolIndex = poolCount; poolIndex--; )
    7139  {
    7140  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7141  }
    7142 
    7143  // Process standard memory.
    7144  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7145  {
    7146  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7147  {
    7148  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    7149  {
    7150  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    7151  }
    7152  }
    7153  }
    7154 
    7155  return result;
    7156 }
    7157 
    7158 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7159 {
    7160  if(hAllocation->CanBecomeLost())
    7161  {
    7162  /*
    7163  Warning: This is a carefully designed algorithm.
    7164  Do not modify unless you really know what you're doing :)
    7165  */
    7166  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7167  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7168  for(;;)
    7169  {
    7170  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7171  {
    7172  pAllocationInfo->memoryType = UINT32_MAX;
    7173  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7174  pAllocationInfo->offset = 0;
    7175  pAllocationInfo->size = hAllocation->GetSize();
    7176  pAllocationInfo->pMappedData = VMA_NULL;
    7177  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7178  return;
    7179  }
    7180  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7181  {
    7182  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7183  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7184  pAllocationInfo->offset = hAllocation->GetOffset();
    7185  pAllocationInfo->size = hAllocation->GetSize();
    7186  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7187  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7188  return;
    7189  }
    7190  else // Last use time earlier than current time.
    7191  {
    7192  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7193  {
    7194  localLastUseFrameIndex = localCurrFrameIndex;
    7195  }
    7196  }
    7197  }
    7198  }
    7199  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7200  else
    7201  {
    7202  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7203  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7204  pAllocationInfo->offset = hAllocation->GetOffset();
    7205  pAllocationInfo->size = hAllocation->GetSize();
    7206  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7207  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7208  }
    7209 }
    7210 
    7211 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7212 {
    7213  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7214 
    7215  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7216 
    7217  if(newCreateInfo.maxBlockCount == 0)
    7218  {
    7219  newCreateInfo.maxBlockCount = SIZE_MAX;
    7220  }
    7221  if(newCreateInfo.blockSize == 0)
    7222  {
    7223  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7224  }
    7225 
    7226  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7227 
    7228  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7229  if(res != VK_SUCCESS)
    7230  {
    7231  vma_delete(this, *pPool);
    7232  *pPool = VMA_NULL;
    7233  return res;
    7234  }
    7235 
    7236  // Add to m_Pools.
    7237  {
    7238  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7239  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7240  }
    7241 
    7242  return VK_SUCCESS;
    7243 }
    7244 
    7245 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7246 {
    7247  // Remove from m_Pools.
    7248  {
    7249  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7250  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7251  VMA_ASSERT(success && "Pool not found in Allocator.");
    7252  }
    7253 
    7254  vma_delete(this, pool);
    7255 }
    7256 
    7257 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7258 {
    7259  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7260 }
    7261 
    7262 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7263 {
    7264  m_CurrentFrameIndex.store(frameIndex);
    7265 }
    7266 
    7267 void VmaAllocator_T::MakePoolAllocationsLost(
    7268  VmaPool hPool,
    7269  size_t* pLostAllocationCount)
    7270 {
    7271  hPool->m_BlockVector.MakePoolAllocationsLost(
    7272  m_CurrentFrameIndex.load(),
    7273  pLostAllocationCount);
    7274 }
    7275 
    7276 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7277 {
    7278  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7279  (*pAllocation)->InitLost();
    7280 }
    7281 
    7282 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7283 {
    7284  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7285 
    7286  VkResult res;
    7287  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7288  {
    7289  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7290  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7291  {
    7292  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7293  if(res == VK_SUCCESS)
    7294  {
    7295  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7296  }
    7297  }
    7298  else
    7299  {
    7300  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7301  }
    7302  }
    7303  else
    7304  {
    7305  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7306  }
    7307 
    7308  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7309  {
    7310  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7311  }
    7312 
    7313  return res;
    7314 }
    7315 
    7316 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7317 {
    7318  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7319  {
    7320  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7321  }
    7322 
    7323  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7324 
    7325  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7326  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7327  {
    7328  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7329  m_HeapSizeLimit[heapIndex] += size;
    7330  }
    7331 }
    7332 
    7333 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7334 {
    7335  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7336 
    7337  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7338  {
    7339  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7340  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7341  VMA_ASSERT(pDedicatedAllocations);
    7342  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7343  VMA_ASSERT(success);
    7344  }
    7345 
    7346  VkDeviceMemory hMemory = allocation->GetMemory();
    7347 
    7348  if(allocation->GetMappedData() != VMA_NULL)
    7349  {
    7350  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7351  }
    7352 
    7353  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7354 
    7355  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7356 }
    7357 
    7358 #if VMA_STATS_STRING_ENABLED
    7359 
    7360 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7361 {
    7362  bool dedicatedAllocationsStarted = false;
    7363  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7364  {
    7365  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7366  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7367  {
    7368  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7369  VMA_ASSERT(pDedicatedAllocVector);
    7370  if(pDedicatedAllocVector->empty() == false)
    7371  {
    7372  if(dedicatedAllocationsStarted == false)
    7373  {
    7374  dedicatedAllocationsStarted = true;
    7375  json.WriteString("DedicatedAllocations");
    7376  json.BeginObject();
    7377  }
    7378 
    7379  json.BeginString("Type ");
    7380  json.ContinueString(memTypeIndex);
    7381  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7382  {
    7383  json.ContinueString(" Mapped");
    7384  }
    7385  json.EndString();
    7386 
    7387  json.BeginArray();
    7388 
    7389  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7390  {
    7391  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7392  json.BeginObject(true);
    7393 
    7394  json.WriteString("Size");
    7395  json.WriteNumber(hAlloc->GetSize());
    7396 
    7397  json.WriteString("Type");
    7398  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7399 
    7400  json.EndObject();
    7401  }
    7402 
    7403  json.EndArray();
    7404  }
    7405  }
    7406  }
    7407  if(dedicatedAllocationsStarted)
    7408  {
    7409  json.EndObject();
    7410  }
    7411 
    7412  {
    7413  bool allocationsStarted = false;
    7414  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7415  {
    7416  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7417  {
    7418  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7419  {
    7420  if(allocationsStarted == false)
    7421  {
    7422  allocationsStarted = true;
    7423  json.WriteString("DefaultPools");
    7424  json.BeginObject();
    7425  }
    7426 
    7427  json.BeginString("Type ");
    7428  json.ContinueString(memTypeIndex);
    7429  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7430  {
    7431  json.ContinueString(" Mapped");
    7432  }
    7433  json.EndString();
    7434 
    7435  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7436  }
    7437  }
    7438  }
    7439  if(allocationsStarted)
    7440  {
    7441  json.EndObject();
    7442  }
    7443  }
    7444 
    7445  {
    7446  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7447  const size_t poolCount = m_Pools.size();
    7448  if(poolCount > 0)
    7449  {
    7450  json.WriteString("Pools");
    7451  json.BeginArray();
    7452  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7453  {
    7454  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7455  }
    7456  json.EndArray();
    7457  }
    7458  }
    7459 }
    7460 
    7461 #endif // #if VMA_STATS_STRING_ENABLED
    7462 
    7463 static VkResult AllocateMemoryForImage(
    7464  VmaAllocator allocator,
    7465  VkImage image,
    7466  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7467  VmaSuballocationType suballocType,
    7468  VmaAllocation* pAllocation)
    7469 {
    7470  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7471 
    7472  VkMemoryRequirements vkMemReq = {};
    7473  bool requiresDedicatedAllocation = false;
    7474  bool prefersDedicatedAllocation = false;
    7475  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7476  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7477 
    7478  return allocator->AllocateMemory(
    7479  vkMemReq,
    7480  requiresDedicatedAllocation,
    7481  prefersDedicatedAllocation,
    7482  VK_NULL_HANDLE, // dedicatedBuffer
    7483  image, // dedicatedImage
    7484  *pAllocationCreateInfo,
    7485  suballocType,
    7486  pAllocation);
    7487 }
    7488 
    7490 // Public interface
    7491 
    7492 VkResult vmaCreateAllocator(
    7493  const VmaAllocatorCreateInfo* pCreateInfo,
    7494  VmaAllocator* pAllocator)
    7495 {
    7496  VMA_ASSERT(pCreateInfo && pAllocator);
    7497  VMA_DEBUG_LOG("vmaCreateAllocator");
    7498  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7499  return VK_SUCCESS;
    7500 }
    7501 
    7502 void vmaDestroyAllocator(
    7503  VmaAllocator allocator)
    7504 {
    7505  if(allocator != VK_NULL_HANDLE)
    7506  {
    7507  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7508  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7509  vma_delete(&allocationCallbacks, allocator);
    7510  }
    7511 }
    7512 
    7514  VmaAllocator allocator,
    7515  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7516 {
    7517  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7518  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7519 }
    7520 
    7522  VmaAllocator allocator,
    7523  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7524 {
    7525  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7526  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7527 }
    7528 
    7530  VmaAllocator allocator,
    7531  uint32_t memoryTypeIndex,
    7532  VkMemoryPropertyFlags* pFlags)
    7533 {
    7534  VMA_ASSERT(allocator && pFlags);
    7535  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7536  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7537 }
    7538 
    7540  VmaAllocator allocator,
    7541  uint32_t frameIndex)
    7542 {
    7543  VMA_ASSERT(allocator);
    7544  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7545 
    7546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7547 
    7548  allocator->SetCurrentFrameIndex(frameIndex);
    7549 }
    7550 
    7551 void vmaCalculateStats(
    7552  VmaAllocator allocator,
    7553  VmaStats* pStats)
    7554 {
    7555  VMA_ASSERT(allocator && pStats);
    7556  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7557  allocator->CalculateStats(pStats);
    7558 }
    7559 
    7560 #if VMA_STATS_STRING_ENABLED
    7561 
    7562 void vmaBuildStatsString(
    7563  VmaAllocator allocator,
    7564  char** ppStatsString,
    7565  VkBool32 detailedMap)
    7566 {
    7567  VMA_ASSERT(allocator && ppStatsString);
    7568  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7569 
    7570  VmaStringBuilder sb(allocator);
    7571  {
    7572  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7573  json.BeginObject();
    7574 
    7575  VmaStats stats;
    7576  allocator->CalculateStats(&stats);
    7577 
    7578  json.WriteString("Total");
    7579  VmaPrintStatInfo(json, stats.total);
    7580 
    7581  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7582  {
    7583  json.BeginString("Heap ");
    7584  json.ContinueString(heapIndex);
    7585  json.EndString();
    7586  json.BeginObject();
    7587 
    7588  json.WriteString("Size");
    7589  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7590 
    7591  json.WriteString("Flags");
    7592  json.BeginArray(true);
    7593  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7594  {
    7595  json.WriteString("DEVICE_LOCAL");
    7596  }
    7597  json.EndArray();
    7598 
    7599  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7600  {
    7601  json.WriteString("Stats");
    7602  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7603  }
    7604 
    7605  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7606  {
    7607  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7608  {
    7609  json.BeginString("Type ");
    7610  json.ContinueString(typeIndex);
    7611  json.EndString();
    7612 
    7613  json.BeginObject();
    7614 
    7615  json.WriteString("Flags");
    7616  json.BeginArray(true);
    7617  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7618  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7619  {
    7620  json.WriteString("DEVICE_LOCAL");
    7621  }
    7622  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7623  {
    7624  json.WriteString("HOST_VISIBLE");
    7625  }
    7626  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7627  {
    7628  json.WriteString("HOST_COHERENT");
    7629  }
    7630  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7631  {
    7632  json.WriteString("HOST_CACHED");
    7633  }
    7634  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7635  {
    7636  json.WriteString("LAZILY_ALLOCATED");
    7637  }
    7638  json.EndArray();
    7639 
    7640  if(stats.memoryType[typeIndex].blockCount > 0)
    7641  {
    7642  json.WriteString("Stats");
    7643  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7644  }
    7645 
    7646  json.EndObject();
    7647  }
    7648  }
    7649 
    7650  json.EndObject();
    7651  }
    7652  if(detailedMap == VK_TRUE)
    7653  {
    7654  allocator->PrintDetailedMap(json);
    7655  }
    7656 
    7657  json.EndObject();
    7658  }
    7659 
    7660  const size_t len = sb.GetLength();
    7661  char* const pChars = vma_new_array(allocator, char, len + 1);
    7662  if(len > 0)
    7663  {
    7664  memcpy(pChars, sb.GetData(), len);
    7665  }
    7666  pChars[len] = '\0';
    7667  *ppStatsString = pChars;
    7668 }
    7669 
    7670 void vmaFreeStatsString(
    7671  VmaAllocator allocator,
    7672  char* pStatsString)
    7673 {
    7674  if(pStatsString != VMA_NULL)
    7675  {
    7676  VMA_ASSERT(allocator);
    7677  size_t len = strlen(pStatsString);
    7678  vma_delete_array(allocator, pStatsString, len + 1);
    7679  }
    7680 }
    7681 
    7682 #endif // #if VMA_STATS_STRING_ENABLED
    7683 
    7686 VkResult vmaFindMemoryTypeIndex(
    7687  VmaAllocator allocator,
    7688  uint32_t memoryTypeBits,
    7689  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7690  uint32_t* pMemoryTypeIndex)
    7691 {
    7692  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7693  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7694  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7695 
    7696  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7697  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7698  if(preferredFlags == 0)
    7699  {
    7700  preferredFlags = requiredFlags;
    7701  }
    7702  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7703  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7704 
    7705  // Convert usage to requiredFlags and preferredFlags.
    7706  switch(pAllocationCreateInfo->usage)
    7707  {
    7709  break;
    7711  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7712  break;
    7714  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7715  break;
    7717  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7718  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7719  break;
    7721  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7722  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7723  break;
    7724  default:
    7725  break;
    7726  }
    7727 
    7728  *pMemoryTypeIndex = UINT32_MAX;
    7729  uint32_t minCost = UINT32_MAX;
    7730  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7731  memTypeIndex < allocator->GetMemoryTypeCount();
    7732  ++memTypeIndex, memTypeBit <<= 1)
    7733  {
    7734  // This memory type is acceptable according to memoryTypeBits bitmask.
    7735  if((memTypeBit & memoryTypeBits) != 0)
    7736  {
    7737  const VkMemoryPropertyFlags currFlags =
    7738  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7739  // This memory type contains requiredFlags.
    7740  if((requiredFlags & ~currFlags) == 0)
    7741  {
    7742  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7743  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7744  // Remember memory type with lowest cost.
    7745  if(currCost < minCost)
    7746  {
    7747  *pMemoryTypeIndex = memTypeIndex;
    7748  if(currCost == 0)
    7749  {
    7750  return VK_SUCCESS;
    7751  }
    7752  minCost = currCost;
    7753  }
    7754  }
    7755  }
    7756  }
    7757  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7758 }
    7759 
    7760 VkResult vmaCreatePool(
    7761  VmaAllocator allocator,
    7762  const VmaPoolCreateInfo* pCreateInfo,
    7763  VmaPool* pPool)
    7764 {
    7765  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7766 
    7767  VMA_DEBUG_LOG("vmaCreatePool");
    7768 
    7769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7770 
    7771  return allocator->CreatePool(pCreateInfo, pPool);
    7772 }
    7773 
    7774 void vmaDestroyPool(
    7775  VmaAllocator allocator,
    7776  VmaPool pool)
    7777 {
    7778  VMA_ASSERT(allocator && pool);
    7779 
    7780  VMA_DEBUG_LOG("vmaDestroyPool");
    7781 
    7782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7783 
    7784  allocator->DestroyPool(pool);
    7785 }
    7786 
    7787 void vmaGetPoolStats(
    7788  VmaAllocator allocator,
    7789  VmaPool pool,
    7790  VmaPoolStats* pPoolStats)
    7791 {
    7792  VMA_ASSERT(allocator && pool && pPoolStats);
    7793 
    7794  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7795 
    7796  allocator->GetPoolStats(pool, pPoolStats);
    7797 }
    7798 
    7800  VmaAllocator allocator,
    7801  VmaPool pool,
    7802  size_t* pLostAllocationCount)
    7803 {
    7804  VMA_ASSERT(allocator && pool);
    7805 
    7806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7807 
    7808  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7809 }
    7810 
    7811 VkResult vmaAllocateMemory(
    7812  VmaAllocator allocator,
    7813  const VkMemoryRequirements* pVkMemoryRequirements,
    7814  const VmaAllocationCreateInfo* pCreateInfo,
    7815  VmaAllocation* pAllocation,
    7816  VmaAllocationInfo* pAllocationInfo)
    7817 {
    7818  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7819 
    7820  VMA_DEBUG_LOG("vmaAllocateMemory");
    7821 
    7822  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7823 
    7824  VkResult result = allocator->AllocateMemory(
    7825  *pVkMemoryRequirements,
    7826  false, // requiresDedicatedAllocation
    7827  false, // prefersDedicatedAllocation
    7828  VK_NULL_HANDLE, // dedicatedBuffer
    7829  VK_NULL_HANDLE, // dedicatedImage
    7830  *pCreateInfo,
    7831  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7832  pAllocation);
    7833 
    7834  if(pAllocationInfo && result == VK_SUCCESS)
    7835  {
    7836  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7837  }
    7838 
    7839  return result;
    7840 }
    7841 
    7843  VmaAllocator allocator,
    7844  VkBuffer buffer,
    7845  const VmaAllocationCreateInfo* pCreateInfo,
    7846  VmaAllocation* pAllocation,
    7847  VmaAllocationInfo* pAllocationInfo)
    7848 {
    7849  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7850 
    7851  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7852 
    7853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7854 
    7855  VkMemoryRequirements vkMemReq = {};
    7856  bool requiresDedicatedAllocation = false;
    7857  bool prefersDedicatedAllocation = false;
    7858  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7859  requiresDedicatedAllocation,
    7860  prefersDedicatedAllocation);
    7861 
    7862  VkResult result = allocator->AllocateMemory(
    7863  vkMemReq,
    7864  requiresDedicatedAllocation,
    7865  prefersDedicatedAllocation,
    7866  buffer, // dedicatedBuffer
    7867  VK_NULL_HANDLE, // dedicatedImage
    7868  *pCreateInfo,
    7869  VMA_SUBALLOCATION_TYPE_BUFFER,
    7870  pAllocation);
    7871 
    7872  if(pAllocationInfo && result == VK_SUCCESS)
    7873  {
    7874  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7875  }
    7876 
    7877  return result;
    7878 }
    7879 
    7880 VkResult vmaAllocateMemoryForImage(
    7881  VmaAllocator allocator,
    7882  VkImage image,
    7883  const VmaAllocationCreateInfo* pCreateInfo,
    7884  VmaAllocation* pAllocation,
    7885  VmaAllocationInfo* pAllocationInfo)
    7886 {
    7887  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7888 
    7889  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7890 
    7891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7892 
    7893  VkResult result = AllocateMemoryForImage(
    7894  allocator,
    7895  image,
    7896  pCreateInfo,
    7897  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7898  pAllocation);
    7899 
    7900  if(pAllocationInfo && result == VK_SUCCESS)
    7901  {
    7902  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7903  }
    7904 
    7905  return result;
    7906 }
    7907 
    7908 void vmaFreeMemory(
    7909  VmaAllocator allocator,
    7910  VmaAllocation allocation)
    7911 {
    7912  VMA_ASSERT(allocator && allocation);
    7913 
    7914  VMA_DEBUG_LOG("vmaFreeMemory");
    7915 
    7916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7917 
    7918  allocator->FreeMemory(allocation);
    7919 }
    7920 
    7922  VmaAllocator allocator,
    7923  VmaAllocation allocation,
    7924  VmaAllocationInfo* pAllocationInfo)
    7925 {
    7926  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7927 
    7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7929 
    7930  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7931 }
    7932 
    7934  VmaAllocator allocator,
    7935  VmaAllocation allocation,
    7936  void* pUserData)
    7937 {
    7938  VMA_ASSERT(allocator && allocation);
    7939 
    7940  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7941 
    7942  allocation->SetUserData(pUserData);
    7943 }
    7944 
    7946  VmaAllocator allocator,
    7947  VmaAllocation* pAllocation)
    7948 {
    7949  VMA_ASSERT(allocator && pAllocation);
    7950 
    7951  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7952 
    7953  allocator->CreateLostAllocation(pAllocation);
    7954 }
    7955 
    7956 VkResult vmaMapMemory(
    7957  VmaAllocator allocator,
    7958  VmaAllocation allocation,
    7959  void** ppData)
    7960 {
    7961  VMA_ASSERT(allocator && allocation && ppData);
    7962 
    7963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7964 
    7965  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7966  allocator->m_hDevice,
    7967  allocation->GetMemory(),
    7968  allocation->GetOffset(),
    7969  allocation->GetSize(),
    7970  0,
    7971  ppData);
    7972 }
    7973 
    7974 void vmaUnmapMemory(
    7975  VmaAllocator allocator,
    7976  VmaAllocation allocation)
    7977 {
    7978  VMA_ASSERT(allocator && allocation);
    7979 
    7980  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7981 
    7982  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7983 }
    7984 
    7985 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7986 {
    7987  VMA_ASSERT(allocator);
    7988 
    7989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7990 
    7991  allocator->UnmapPersistentlyMappedMemory();
    7992 }
    7993 
    7994 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7995 {
    7996  VMA_ASSERT(allocator);
    7997 
    7998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7999 
    8000  return allocator->MapPersistentlyMappedMemory();
    8001 }
    8002 
    8003 VkResult vmaDefragment(
    8004  VmaAllocator allocator,
    8005  VmaAllocation* pAllocations,
    8006  size_t allocationCount,
    8007  VkBool32* pAllocationsChanged,
    8008  const VmaDefragmentationInfo *pDefragmentationInfo,
    8009  VmaDefragmentationStats* pDefragmentationStats)
    8010 {
    8011  VMA_ASSERT(allocator && pAllocations);
    8012 
    8013  VMA_DEBUG_LOG("vmaDefragment");
    8014 
    8015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8016 
    8017  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    8018 }
    8019 
    8020 VkResult vmaCreateBuffer(
    8021  VmaAllocator allocator,
    8022  const VkBufferCreateInfo* pBufferCreateInfo,
    8023  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8024  VkBuffer* pBuffer,
    8025  VmaAllocation* pAllocation,
    8026  VmaAllocationInfo* pAllocationInfo)
    8027 {
    8028  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    8029 
    8030  VMA_DEBUG_LOG("vmaCreateBuffer");
    8031 
    8032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8033 
    8034  *pBuffer = VK_NULL_HANDLE;
    8035  *pAllocation = VK_NULL_HANDLE;
    8036 
    8037  // 1. Create VkBuffer.
    8038  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8039  allocator->m_hDevice,
    8040  pBufferCreateInfo,
    8041  allocator->GetAllocationCallbacks(),
    8042  pBuffer);
    8043  if(res >= 0)
    8044  {
    8045  // 2. vkGetBufferMemoryRequirements.
    8046  VkMemoryRequirements vkMemReq = {};
    8047  bool requiresDedicatedAllocation = false;
    8048  bool prefersDedicatedAllocation = false;
    8049  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8050  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8051 
    8052  // 3. Allocate memory using allocator.
    8053  res = allocator->AllocateMemory(
    8054  vkMemReq,
    8055  requiresDedicatedAllocation,
    8056  prefersDedicatedAllocation,
    8057  *pBuffer, // dedicatedBuffer
    8058  VK_NULL_HANDLE, // dedicatedImage
    8059  *pAllocationCreateInfo,
    8060  VMA_SUBALLOCATION_TYPE_BUFFER,
    8061  pAllocation);
    8062  if(res >= 0)
    8063  {
    8064  // 3. Bind buffer with memory.
    8065  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8066  allocator->m_hDevice,
    8067  *pBuffer,
    8068  (*pAllocation)->GetMemory(),
    8069  (*pAllocation)->GetOffset());
    8070  if(res >= 0)
    8071  {
    8072  // All steps succeeded.
    8073  if(pAllocationInfo != VMA_NULL)
    8074  {
    8075  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8076  }
    8077  return VK_SUCCESS;
    8078  }
    8079  allocator->FreeMemory(*pAllocation);
    8080  *pAllocation = VK_NULL_HANDLE;
    8081  return res;
    8082  }
    8083  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8084  *pBuffer = VK_NULL_HANDLE;
    8085  return res;
    8086  }
    8087  return res;
    8088 }
    8089 
    8090 void vmaDestroyBuffer(
    8091  VmaAllocator allocator,
    8092  VkBuffer buffer,
    8093  VmaAllocation allocation)
    8094 {
    8095  if(buffer != VK_NULL_HANDLE)
    8096  {
    8097  VMA_ASSERT(allocator);
    8098 
    8099  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8100 
    8101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8102 
    8103  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8104 
    8105  allocator->FreeMemory(allocation);
    8106  }
    8107 }
    8108 
    8109 VkResult vmaCreateImage(
    8110  VmaAllocator allocator,
    8111  const VkImageCreateInfo* pImageCreateInfo,
    8112  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8113  VkImage* pImage,
    8114  VmaAllocation* pAllocation,
    8115  VmaAllocationInfo* pAllocationInfo)
    8116 {
    8117  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8118 
    8119  VMA_DEBUG_LOG("vmaCreateImage");
    8120 
    8121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8122 
    8123  *pImage = VK_NULL_HANDLE;
    8124  *pAllocation = VK_NULL_HANDLE;
    8125 
    8126  // 1. Create VkImage.
    8127  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8128  allocator->m_hDevice,
    8129  pImageCreateInfo,
    8130  allocator->GetAllocationCallbacks(),
    8131  pImage);
    8132  if(res >= 0)
    8133  {
    8134  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8135  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8136  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8137 
    8138  // 2. Allocate memory using allocator.
    8139  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8140  if(res >= 0)
    8141  {
    8142  // 3. Bind image with memory.
    8143  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8144  allocator->m_hDevice,
    8145  *pImage,
    8146  (*pAllocation)->GetMemory(),
    8147  (*pAllocation)->GetOffset());
    8148  if(res >= 0)
    8149  {
    8150  // All steps succeeded.
    8151  if(pAllocationInfo != VMA_NULL)
    8152  {
    8153  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8154  }
    8155  return VK_SUCCESS;
    8156  }
    8157  allocator->FreeMemory(*pAllocation);
    8158  *pAllocation = VK_NULL_HANDLE;
    8159  return res;
    8160  }
    8161  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8162  *pImage = VK_NULL_HANDLE;
    8163  return res;
    8164  }
    8165  return res;
    8166 }
    8167 
    8168 void vmaDestroyImage(
    8169  VmaAllocator allocator,
    8170  VkImage image,
    8171  VmaAllocation allocation)
    8172 {
    8173  if(image != VK_NULL_HANDLE)
    8174  {
    8175  VMA_ASSERT(allocator);
    8176 
    8177  VMA_DEBUG_LOG("vmaDestroyImage");
    8178 
    8179  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8180 
    8181  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8182 
    8183  allocator->FreeMemory(allocation);
    8184  }
    8185 }
    8186 
    8187 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:568
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:785
    -
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:593
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    516 #include <vulkan/vulkan.h>
    517 
    518 VK_DEFINE_HANDLE(VmaAllocator)
    519 
    520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    522  VmaAllocator allocator,
    523  uint32_t memoryType,
    524  VkDeviceMemory memory,
    525  VkDeviceSize size);
    527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    528  VmaAllocator allocator,
    529  uint32_t memoryType,
    530  VkDeviceMemory memory,
    531  VkDeviceSize size);
    532 
    540 typedef struct VmaDeviceMemoryCallbacks {
    546 
    582 
    585 typedef VkFlags VmaAllocatorCreateFlags;
    586 
    591 typedef struct VmaVulkanFunctions {
    592  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    593  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    594  PFN_vkAllocateMemory vkAllocateMemory;
    595  PFN_vkFreeMemory vkFreeMemory;
    596  PFN_vkMapMemory vkMapMemory;
    597  PFN_vkUnmapMemory vkUnmapMemory;
    598  PFN_vkBindBufferMemory vkBindBufferMemory;
    599  PFN_vkBindImageMemory vkBindImageMemory;
    600  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    601  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    602  PFN_vkCreateBuffer vkCreateBuffer;
    603  PFN_vkDestroyBuffer vkDestroyBuffer;
    604  PFN_vkCreateImage vkCreateImage;
    605  PFN_vkDestroyImage vkDestroyImage;
    606  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    607  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    609 
    612 {
    614  VmaAllocatorCreateFlags flags;
    616 
    617  VkPhysicalDevice physicalDevice;
    619 
    620  VkDevice device;
    622 
    625 
    628 
    629  const VkAllocationCallbacks* pAllocationCallbacks;
    631 
    646  uint32_t frameInUseCount;
    664  const VkDeviceSize* pHeapSizeLimit;
    678 
    680 VkResult vmaCreateAllocator(
    681  const VmaAllocatorCreateInfo* pCreateInfo,
    682  VmaAllocator* pAllocator);
    683 
    686  VmaAllocator allocator);
    687 
    693  VmaAllocator allocator,
    694  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    695 
    701  VmaAllocator allocator,
    702  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    703 
    711  VmaAllocator allocator,
    712  uint32_t memoryTypeIndex,
    713  VkMemoryPropertyFlags* pFlags);
    714 
    724  VmaAllocator allocator,
    725  uint32_t frameIndex);
    726 
    729 typedef struct VmaStatInfo
    730 {
    732  uint32_t blockCount;
    734  uint32_t allocationCount;
    738  VkDeviceSize usedBytes;
    740  VkDeviceSize unusedBytes;
    741  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    742  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    743 } VmaStatInfo;
    744 
    746 typedef struct VmaStats
    747 {
    748  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    749  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    751 } VmaStats;
    752 
    754 void vmaCalculateStats(
    755  VmaAllocator allocator,
    756  VmaStats* pStats);
    757 
    758 #define VMA_STATS_STRING_ENABLED 1
    759 
    760 #if VMA_STATS_STRING_ENABLED
    761 
    763 
    766  VmaAllocator allocator,
    767  char** ppStatsString,
    768  VkBool32 detailedMap);
    769 
    770 void vmaFreeStatsString(
    771  VmaAllocator allocator,
    772  char* pStatsString);
    773 
    774 #endif // #if VMA_STATS_STRING_ENABLED
    775 
    776 VK_DEFINE_HANDLE(VmaPool)
    777 
    778 typedef enum VmaMemoryUsage
    779 {
    785 
    788 
    791 
    795 
    810 
    854 
    857 typedef VkFlags VmaAllocationCreateFlags;
    858 
    860 {
    862  VmaAllocationCreateFlags flags;
    873  VkMemoryPropertyFlags requiredFlags;
    879  VkMemoryPropertyFlags preferredFlags;
    881  void* pUserData;
    886  VmaPool pool;
    888 
    903 VkResult vmaFindMemoryTypeIndex(
    904  VmaAllocator allocator,
    905  uint32_t memoryTypeBits,
    906  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    907  uint32_t* pMemoryTypeIndex);
    908 
    910 typedef enum VmaPoolCreateFlagBits {
    929 
    932 typedef VkFlags VmaPoolCreateFlags;
    933 
    936 typedef struct VmaPoolCreateInfo {
    939  uint32_t memoryTypeIndex;
    942  VmaPoolCreateFlags flags;
    947  VkDeviceSize blockSize;
    974  uint32_t frameInUseCount;
    976 
    979 typedef struct VmaPoolStats {
    982  VkDeviceSize size;
    985  VkDeviceSize unusedSize;
    998  VkDeviceSize unusedRangeSizeMax;
    999 } VmaPoolStats;
    1000 
    1007 VkResult vmaCreatePool(
    1008  VmaAllocator allocator,
    1009  const VmaPoolCreateInfo* pCreateInfo,
    1010  VmaPool* pPool);
    1011 
    1014 void vmaDestroyPool(
    1015  VmaAllocator allocator,
    1016  VmaPool pool);
    1017 
    1024 void vmaGetPoolStats(
    1025  VmaAllocator allocator,
    1026  VmaPool pool,
    1027  VmaPoolStats* pPoolStats);
    1028 
    1036  VmaAllocator allocator,
    1037  VmaPool pool,
    1038  size_t* pLostAllocationCount);
    1039 
    1040 VK_DEFINE_HANDLE(VmaAllocation)
    1041 
    1042 
    1044 typedef struct VmaAllocationInfo {
    1049  uint32_t memoryType;
    1058  VkDeviceMemory deviceMemory;
    1063  VkDeviceSize offset;
    1068  VkDeviceSize size;
    1082  void* pUserData;
    1084 
    1095 VkResult vmaAllocateMemory(
    1096  VmaAllocator allocator,
    1097  const VkMemoryRequirements* pVkMemoryRequirements,
    1098  const VmaAllocationCreateInfo* pCreateInfo,
    1099  VmaAllocation* pAllocation,
    1100  VmaAllocationInfo* pAllocationInfo);
    1101 
    1109  VmaAllocator allocator,
    1110  VkBuffer buffer,
    1111  const VmaAllocationCreateInfo* pCreateInfo,
    1112  VmaAllocation* pAllocation,
    1113  VmaAllocationInfo* pAllocationInfo);
    1114 
    1116 VkResult vmaAllocateMemoryForImage(
    1117  VmaAllocator allocator,
    1118  VkImage image,
    1119  const VmaAllocationCreateInfo* pCreateInfo,
    1120  VmaAllocation* pAllocation,
    1121  VmaAllocationInfo* pAllocationInfo);
    1122 
    1124 void vmaFreeMemory(
    1125  VmaAllocator allocator,
    1126  VmaAllocation allocation);
    1127 
    1130  VmaAllocator allocator,
    1131  VmaAllocation allocation,
    1132  VmaAllocationInfo* pAllocationInfo);
    1133 
    1136  VmaAllocator allocator,
    1137  VmaAllocation allocation,
    1138  void* pUserData);
    1139 
    1151  VmaAllocator allocator,
    1152  VmaAllocation* pAllocation);
    1153 
    1188 VkResult vmaMapMemory(
    1189  VmaAllocator allocator,
    1190  VmaAllocation allocation,
    1191  void** ppData);
    1192 
    1197 void vmaUnmapMemory(
    1198  VmaAllocator allocator,
    1199  VmaAllocation allocation);
    1200 
    1202 typedef struct VmaDefragmentationInfo {
    1207  VkDeviceSize maxBytesToMove;
    1214 
    1216 typedef struct VmaDefragmentationStats {
    1218  VkDeviceSize bytesMoved;
    1220  VkDeviceSize bytesFreed;
    1226 
    1303 VkResult vmaDefragment(
    1304  VmaAllocator allocator,
    1305  VmaAllocation* pAllocations,
    1306  size_t allocationCount,
    1307  VkBool32* pAllocationsChanged,
    1308  const VmaDefragmentationInfo *pDefragmentationInfo,
    1309  VmaDefragmentationStats* pDefragmentationStats);
    1310 
    1337 VkResult vmaCreateBuffer(
    1338  VmaAllocator allocator,
    1339  const VkBufferCreateInfo* pBufferCreateInfo,
    1340  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1341  VkBuffer* pBuffer,
    1342  VmaAllocation* pAllocation,
    1343  VmaAllocationInfo* pAllocationInfo);
    1344 
    1356 void vmaDestroyBuffer(
    1357  VmaAllocator allocator,
    1358  VkBuffer buffer,
    1359  VmaAllocation allocation);
    1360 
    1362 VkResult vmaCreateImage(
    1363  VmaAllocator allocator,
    1364  const VkImageCreateInfo* pImageCreateInfo,
    1365  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1366  VkImage* pImage,
    1367  VmaAllocation* pAllocation,
    1368  VmaAllocationInfo* pAllocationInfo);
    1369 
    1381 void vmaDestroyImage(
    1382  VmaAllocator allocator,
    1383  VkImage image,
    1384  VmaAllocation allocation);
    1385 
    1386 #ifdef __cplusplus
    1387 }
    1388 #endif
    1389 
    1390 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1391 
    1392 // For Visual Studio IntelliSense.
    1393 #ifdef __INTELLISENSE__
    1394 #define VMA_IMPLEMENTATION
    1395 #endif
    1396 
    1397 #ifdef VMA_IMPLEMENTATION
    1398 #undef VMA_IMPLEMENTATION
    1399 
    1400 #include <cstdint>
    1401 #include <cstdlib>
    1402 #include <cstring>
    1403 
    1404 /*******************************************************************************
    1405 CONFIGURATION SECTION
    1406 
    1407 Define some of these macros before each #include of this header or change them
    1408 here if you need other then default behavior depending on your environment.
    1409 */
    1410 
    1411 /*
    1412 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1413 internally, like:
    1414 
    1415  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1416 
    1417 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1418 VmaAllocatorCreateInfo::pVulkanFunctions.
    1419 */
    1420 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1421 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1422 #endif
    1423 
    1424 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1425 //#define VMA_USE_STL_CONTAINERS 1
    1426 
    1427 /* Set this macro to 1 to make the library including and using STL containers:
    1428 std::pair, std::vector, std::list, std::unordered_map.
    1429 
    1430 Set it to 0 or undefined to make the library using its own implementation of
    1431 the containers.
    1432 */
    1433 #if VMA_USE_STL_CONTAINERS
    1434  #define VMA_USE_STL_VECTOR 1
    1435  #define VMA_USE_STL_UNORDERED_MAP 1
    1436  #define VMA_USE_STL_LIST 1
    1437 #endif
    1438 
    1439 #if VMA_USE_STL_VECTOR
    1440  #include <vector>
    1441 #endif
    1442 
    1443 #if VMA_USE_STL_UNORDERED_MAP
    1444  #include <unordered_map>
    1445 #endif
    1446 
    1447 #if VMA_USE_STL_LIST
    1448  #include <list>
    1449 #endif
    1450 
    1451 /*
    1452 Following headers are used in this CONFIGURATION section only, so feel free to
    1453 remove them if not needed.
    1454 */
    1455 #include <cassert> // for assert
    1456 #include <algorithm> // for min, max
    1457 #include <mutex> // for std::mutex
    1458 #include <atomic> // for std::atomic
    1459 
    1460 #if !defined(_WIN32)
    1461  #include <malloc.h> // for aligned_alloc()
    1462 #endif
    1463 
    1464 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1465 #ifndef VMA_ASSERT
    1466  #ifdef _DEBUG
    1467  #define VMA_ASSERT(expr) assert(expr)
    1468  #else
    1469  #define VMA_ASSERT(expr)
    1470  #endif
    1471 #endif
    1472 
    1473 // Assert that will be called very often, like inside data structures e.g. operator[].
    1474 // Making it non-empty can make program slow.
    1475 #ifndef VMA_HEAVY_ASSERT
    1476  #ifdef _DEBUG
    1477  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1478  #else
    1479  #define VMA_HEAVY_ASSERT(expr)
    1480  #endif
    1481 #endif
    1482 
    1483 #ifndef VMA_NULL
    1484  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1485  #define VMA_NULL nullptr
    1486 #endif
    1487 
    1488 #ifndef VMA_ALIGN_OF
    1489  #define VMA_ALIGN_OF(type) (__alignof(type))
    1490 #endif
    1491 
    1492 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1493  #if defined(_WIN32)
    1494  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1495  #else
    1496  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1497  #endif
    1498 #endif
    1499 
    1500 #ifndef VMA_SYSTEM_FREE
    1501  #if defined(_WIN32)
    1502  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1503  #else
    1504  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1505  #endif
    1506 #endif
    1507 
    1508 #ifndef VMA_MIN
    1509  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1510 #endif
    1511 
    1512 #ifndef VMA_MAX
    1513  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1514 #endif
    1515 
    1516 #ifndef VMA_SWAP
    1517  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1518 #endif
    1519 
    1520 #ifndef VMA_SORT
    1521  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1522 #endif
    1523 
    1524 #ifndef VMA_DEBUG_LOG
    1525  #define VMA_DEBUG_LOG(format, ...)
    1526  /*
    1527  #define VMA_DEBUG_LOG(format, ...) do { \
    1528  printf(format, __VA_ARGS__); \
    1529  printf("\n"); \
    1530  } while(false)
    1531  */
    1532 #endif
    1533 
    1534 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1535 #if VMA_STATS_STRING_ENABLED
    1536  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1537  {
    1538  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1539  }
    1540  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1541  {
    1542  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1543  }
    1544  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1545  {
    1546  snprintf(outStr, strLen, "%p", ptr);
    1547  }
    1548 #endif
    1549 
    1550 #ifndef VMA_MUTEX
    1551  class VmaMutex
    1552  {
    1553  public:
    1554  VmaMutex() { }
    1555  ~VmaMutex() { }
    1556  void Lock() { m_Mutex.lock(); }
    1557  void Unlock() { m_Mutex.unlock(); }
    1558  private:
    1559  std::mutex m_Mutex;
    1560  };
    1561  #define VMA_MUTEX VmaMutex
    1562 #endif
    1563 
    1564 /*
    1565 If providing your own implementation, you need to implement a subset of std::atomic:
    1566 
    1567 - Constructor(uint32_t desired)
    1568 - uint32_t load() const
    1569 - void store(uint32_t desired)
    1570 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1571 */
    1572 #ifndef VMA_ATOMIC_UINT32
    1573  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1574 #endif
    1575 
    1576 #ifndef VMA_BEST_FIT
    1577 
    1589  #define VMA_BEST_FIT (1)
    1590 #endif
    1591 
    1592 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1593 
    1597  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1598 #endif
    1599 
    1600 #ifndef VMA_DEBUG_ALIGNMENT
    1601 
    1605  #define VMA_DEBUG_ALIGNMENT (1)
    1606 #endif
    1607 
    1608 #ifndef VMA_DEBUG_MARGIN
    1609 
    1613  #define VMA_DEBUG_MARGIN (0)
    1614 #endif
    1615 
    1616 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1617 
    1621  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1622 #endif
    1623 
    1624 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1625 
    1629  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1630 #endif
    1631 
    1632 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1633  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1635 #endif
    1636 
    1637 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1638  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1640 #endif
    1641 
    1642 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1643  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1645 #endif
    1646 
    1647 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1648 
    1649 /*******************************************************************************
    1650 END OF CONFIGURATION
    1651 */
    1652 
    1653 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1654  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1655 
    1656 // Returns number of bits set to 1 in (v).
    1657 static inline uint32_t CountBitsSet(uint32_t v)
    1658 {
    1659  uint32_t c = v - ((v >> 1) & 0x55555555);
    1660  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1661  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1662  c = ((c >> 8) + c) & 0x00FF00FF;
    1663  c = ((c >> 16) + c) & 0x0000FFFF;
    1664  return c;
    1665 }
    1666 
    1667 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1668 // Use types like uint32_t, uint64_t as T.
    1669 template <typename T>
    1670 static inline T VmaAlignUp(T val, T align)
    1671 {
    1672  return (val + align - 1) / align * align;
    1673 }
    1674 
    1675 // Division with mathematical rounding to nearest number.
    1676 template <typename T>
    1677 inline T VmaRoundDiv(T x, T y)
    1678 {
    1679  return (x + (y / (T)2)) / y;
    1680 }
    1681 
    1682 #ifndef VMA_SORT
    1683 
    1684 template<typename Iterator, typename Compare>
    1685 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1686 {
    1687  Iterator centerValue = end; --centerValue;
    1688  Iterator insertIndex = beg;
    1689  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1690  {
    1691  if(cmp(*memTypeIndex, *centerValue))
    1692  {
    1693  if(insertIndex != memTypeIndex)
    1694  {
    1695  VMA_SWAP(*memTypeIndex, *insertIndex);
    1696  }
    1697  ++insertIndex;
    1698  }
    1699  }
    1700  if(insertIndex != centerValue)
    1701  {
    1702  VMA_SWAP(*insertIndex, *centerValue);
    1703  }
    1704  return insertIndex;
    1705 }
    1706 
    1707 template<typename Iterator, typename Compare>
    1708 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1709 {
    1710  if(beg < end)
    1711  {
    1712  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1713  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1714  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1715  }
    1716 }
    1717 
    1718 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1719 
    1720 #endif // #ifndef VMA_SORT
    1721 
    1722 /*
    1723 Returns true if two memory blocks occupy overlapping pages.
    1724 ResourceA must be in less memory offset than ResourceB.
    1725 
    1726 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1727 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1728 */
    1729 static inline bool VmaBlocksOnSamePage(
    1730  VkDeviceSize resourceAOffset,
    1731  VkDeviceSize resourceASize,
    1732  VkDeviceSize resourceBOffset,
    1733  VkDeviceSize pageSize)
    1734 {
    1735  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1736  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1737  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1738  VkDeviceSize resourceBStart = resourceBOffset;
    1739  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1740  return resourceAEndPage == resourceBStartPage;
    1741 }
    1742 
    1743 enum VmaSuballocationType
    1744 {
    1745  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1746  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1747  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1748  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1749  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1750  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1751  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1752 };
    1753 
    1754 /*
    1755 Returns true if given suballocation types could conflict and must respect
    1756 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1757 or linear image and another one is optimal image. If type is unknown, behave
    1758 conservatively.
    1759 */
    1760 static inline bool VmaIsBufferImageGranularityConflict(
    1761  VmaSuballocationType suballocType1,
    1762  VmaSuballocationType suballocType2)
    1763 {
    1764  if(suballocType1 > suballocType2)
    1765  {
    1766  VMA_SWAP(suballocType1, suballocType2);
    1767  }
    1768 
    1769  switch(suballocType1)
    1770  {
    1771  case VMA_SUBALLOCATION_TYPE_FREE:
    1772  return false;
    1773  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1774  return true;
    1775  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1776  return
    1777  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1778  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1779  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1780  return
    1781  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1782  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1783  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1784  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1785  return
    1786  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1787  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1788  return false;
    1789  default:
    1790  VMA_ASSERT(0);
    1791  return true;
    1792  }
    1793 }
    1794 
    1795 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1796 struct VmaMutexLock
    1797 {
    1798 public:
    1799  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1800  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1801  {
    1802  if(m_pMutex)
    1803  {
    1804  m_pMutex->Lock();
    1805  }
    1806  }
    1807 
    1808  ~VmaMutexLock()
    1809  {
    1810  if(m_pMutex)
    1811  {
    1812  m_pMutex->Unlock();
    1813  }
    1814  }
    1815 
    1816 private:
    1817  VMA_MUTEX* m_pMutex;
    1818 };
    1819 
    1820 #if VMA_DEBUG_GLOBAL_MUTEX
    1821  static VMA_MUTEX gDebugGlobalMutex;
    1822  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1823 #else
    1824  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1825 #endif
    1826 
    1827 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1828 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1829 
    1830 /*
    1831 Performs binary search and returns iterator to first element that is greater or
    1832 equal to (key), according to comparison (cmp).
    1833 
    1834 Cmp should return true if first argument is less than second argument.
    1835 
    1836 Returned value is the found element, if present in the collection or place where
    1837 new element with value (key) should be inserted.
    1838 */
    1839 template <typename IterT, typename KeyT, typename CmpT>
    1840 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1841 {
    1842  size_t down = 0, up = (end - beg);
    1843  while(down < up)
    1844  {
    1845  const size_t mid = (down + up) / 2;
    1846  if(cmp(*(beg+mid), key))
    1847  {
    1848  down = mid + 1;
    1849  }
    1850  else
    1851  {
    1852  up = mid;
    1853  }
    1854  }
    1855  return beg + down;
    1856 }
    1857 
    1859 // Memory allocation
    1860 
    1861 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1862 {
    1863  if((pAllocationCallbacks != VMA_NULL) &&
    1864  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1865  {
    1866  return (*pAllocationCallbacks->pfnAllocation)(
    1867  pAllocationCallbacks->pUserData,
    1868  size,
    1869  alignment,
    1870  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1871  }
    1872  else
    1873  {
    1874  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1875  }
    1876 }
    1877 
    1878 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1879 {
    1880  if((pAllocationCallbacks != VMA_NULL) &&
    1881  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1882  {
    1883  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1884  }
    1885  else
    1886  {
    1887  VMA_SYSTEM_FREE(ptr);
    1888  }
    1889 }
    1890 
    1891 template<typename T>
    1892 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1893 {
    1894  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1895 }
    1896 
    1897 template<typename T>
    1898 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1899 {
    1900  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1901 }
    1902 
    1903 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1904 
    1905 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1906 
    1907 template<typename T>
    1908 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1909 {
    1910  ptr->~T();
    1911  VmaFree(pAllocationCallbacks, ptr);
    1912 }
    1913 
    1914 template<typename T>
    1915 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1916 {
    1917  if(ptr != VMA_NULL)
    1918  {
    1919  for(size_t i = count; i--; )
    1920  {
    1921  ptr[i].~T();
    1922  }
    1923  VmaFree(pAllocationCallbacks, ptr);
    1924  }
    1925 }
    1926 
    1927 // STL-compatible allocator.
    1928 template<typename T>
    1929 class VmaStlAllocator
    1930 {
    1931 public:
    1932  const VkAllocationCallbacks* const m_pCallbacks;
    1933  typedef T value_type;
    1934 
    1935  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1936  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1937 
    1938  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1939  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1940 
    1941  template<typename U>
    1942  bool operator==(const VmaStlAllocator<U>& rhs) const
    1943  {
    1944  return m_pCallbacks == rhs.m_pCallbacks;
    1945  }
    1946  template<typename U>
    1947  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1948  {
    1949  return m_pCallbacks != rhs.m_pCallbacks;
    1950  }
    1951 
    1952  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1953 };
    1954 
    1955 #if VMA_USE_STL_VECTOR
    1956 
    1957 #define VmaVector std::vector
    1958 
    1959 template<typename T, typename allocatorT>
    1960 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1961 {
    1962  vec.insert(vec.begin() + index, item);
    1963 }
    1964 
    1965 template<typename T, typename allocatorT>
    1966 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1967 {
    1968  vec.erase(vec.begin() + index);
    1969 }
    1970 
    1971 #else // #if VMA_USE_STL_VECTOR
    1972 
    1973 /* Class with interface compatible with subset of std::vector.
    1974 T must be POD because constructors and destructors are not called and memcpy is
    1975 used for these objects. */
    1976 template<typename T, typename AllocatorT>
    1977 class VmaVector
    1978 {
    1979 public:
    1980  typedef T value_type;
    1981 
    1982  VmaVector(const AllocatorT& allocator) :
    1983  m_Allocator(allocator),
    1984  m_pArray(VMA_NULL),
    1985  m_Count(0),
    1986  m_Capacity(0)
    1987  {
    1988  }
    1989 
    1990  VmaVector(size_t count, const AllocatorT& allocator) :
    1991  m_Allocator(allocator),
    1992  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1993  m_Count(count),
    1994  m_Capacity(count)
    1995  {
    1996  }
    1997 
    1998  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1999  m_Allocator(src.m_Allocator),
    2000  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    2001  m_Count(src.m_Count),
    2002  m_Capacity(src.m_Count)
    2003  {
    2004  if(m_Count != 0)
    2005  {
    2006  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    2007  }
    2008  }
    2009 
    2010  ~VmaVector()
    2011  {
    2012  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2013  }
    2014 
    2015  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    2016  {
    2017  if(&rhs != this)
    2018  {
    2019  resize(rhs.m_Count);
    2020  if(m_Count != 0)
    2021  {
    2022  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2023  }
    2024  }
    2025  return *this;
    2026  }
    2027 
    2028  bool empty() const { return m_Count == 0; }
    2029  size_t size() const { return m_Count; }
    2030  T* data() { return m_pArray; }
    2031  const T* data() const { return m_pArray; }
    2032 
    2033  T& operator[](size_t index)
    2034  {
    2035  VMA_HEAVY_ASSERT(index < m_Count);
    2036  return m_pArray[index];
    2037  }
    2038  const T& operator[](size_t index) const
    2039  {
    2040  VMA_HEAVY_ASSERT(index < m_Count);
    2041  return m_pArray[index];
    2042  }
    2043 
    2044  T& front()
    2045  {
    2046  VMA_HEAVY_ASSERT(m_Count > 0);
    2047  return m_pArray[0];
    2048  }
    2049  const T& front() const
    2050  {
    2051  VMA_HEAVY_ASSERT(m_Count > 0);
    2052  return m_pArray[0];
    2053  }
    2054  T& back()
    2055  {
    2056  VMA_HEAVY_ASSERT(m_Count > 0);
    2057  return m_pArray[m_Count - 1];
    2058  }
    2059  const T& back() const
    2060  {
    2061  VMA_HEAVY_ASSERT(m_Count > 0);
    2062  return m_pArray[m_Count - 1];
    2063  }
    2064 
    2065  void reserve(size_t newCapacity, bool freeMemory = false)
    2066  {
    2067  newCapacity = VMA_MAX(newCapacity, m_Count);
    2068 
    2069  if((newCapacity < m_Capacity) && !freeMemory)
    2070  {
    2071  newCapacity = m_Capacity;
    2072  }
    2073 
    2074  if(newCapacity != m_Capacity)
    2075  {
    2076  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2077  if(m_Count != 0)
    2078  {
    2079  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2080  }
    2081  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2082  m_Capacity = newCapacity;
    2083  m_pArray = newArray;
    2084  }
    2085  }
    2086 
    2087  void resize(size_t newCount, bool freeMemory = false)
    2088  {
    2089  size_t newCapacity = m_Capacity;
    2090  if(newCount > m_Capacity)
    2091  {
    2092  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2093  }
    2094  else if(freeMemory)
    2095  {
    2096  newCapacity = newCount;
    2097  }
    2098 
    2099  if(newCapacity != m_Capacity)
    2100  {
    2101  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2102  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2103  if(elementsToCopy != 0)
    2104  {
    2105  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2106  }
    2107  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2108  m_Capacity = newCapacity;
    2109  m_pArray = newArray;
    2110  }
    2111 
    2112  m_Count = newCount;
    2113  }
    2114 
    2115  void clear(bool freeMemory = false)
    2116  {
    2117  resize(0, freeMemory);
    2118  }
    2119 
    2120  void insert(size_t index, const T& src)
    2121  {
    2122  VMA_HEAVY_ASSERT(index <= m_Count);
    2123  const size_t oldCount = size();
    2124  resize(oldCount + 1);
    2125  if(index < oldCount)
    2126  {
    2127  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2128  }
    2129  m_pArray[index] = src;
    2130  }
    2131 
    2132  void remove(size_t index)
    2133  {
    2134  VMA_HEAVY_ASSERT(index < m_Count);
    2135  const size_t oldCount = size();
    2136  if(index < oldCount - 1)
    2137  {
    2138  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2139  }
    2140  resize(oldCount - 1);
    2141  }
    2142 
    2143  void push_back(const T& src)
    2144  {
    2145  const size_t newIndex = size();
    2146  resize(newIndex + 1);
    2147  m_pArray[newIndex] = src;
    2148  }
    2149 
    2150  void pop_back()
    2151  {
    2152  VMA_HEAVY_ASSERT(m_Count > 0);
    2153  resize(size() - 1);
    2154  }
    2155 
    2156  void push_front(const T& src)
    2157  {
    2158  insert(0, src);
    2159  }
    2160 
    2161  void pop_front()
    2162  {
    2163  VMA_HEAVY_ASSERT(m_Count > 0);
    2164  remove(0);
    2165  }
    2166 
    2167  typedef T* iterator;
    2168 
    2169  iterator begin() { return m_pArray; }
    2170  iterator end() { return m_pArray + m_Count; }
    2171 
    2172 private:
    2173  AllocatorT m_Allocator;
    2174  T* m_pArray;
    2175  size_t m_Count;
    2176  size_t m_Capacity;
    2177 };
    2178 
    2179 template<typename T, typename allocatorT>
    2180 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2181 {
    2182  vec.insert(index, item);
    2183 }
    2184 
    2185 template<typename T, typename allocatorT>
    2186 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2187 {
    2188  vec.remove(index);
    2189 }
    2190 
    2191 #endif // #if VMA_USE_STL_VECTOR
    2192 
    2193 template<typename CmpLess, typename VectorT>
    2194 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2195 {
    2196  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2197  vector.data(),
    2198  vector.data() + vector.size(),
    2199  value,
    2200  CmpLess()) - vector.data();
    2201  VmaVectorInsert(vector, indexToInsert, value);
    2202  return indexToInsert;
    2203 }
    2204 
    2205 template<typename CmpLess, typename VectorT>
    2206 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2207 {
    2208  CmpLess comparator;
    2209  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2210  vector.begin(),
    2211  vector.end(),
    2212  value,
    2213  comparator);
    2214  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2215  {
    2216  size_t indexToRemove = it - vector.begin();
    2217  VmaVectorRemove(vector, indexToRemove);
    2218  return true;
    2219  }
    2220  return false;
    2221 }
    2222 
    2223 template<typename CmpLess, typename VectorT>
    2224 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2225 {
    2226  CmpLess comparator;
    2227  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2228  vector.data(),
    2229  vector.data() + vector.size(),
    2230  value,
    2231  comparator);
    2232  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2233  {
    2234  return it - vector.begin();
    2235  }
    2236  else
    2237  {
    2238  return vector.size();
    2239  }
    2240 }
    2241 
    2243 // class VmaPoolAllocator
    2244 
    2245 /*
    2246 Allocator for objects of type T using a list of arrays (pools) to speed up
    2247 allocation. Number of elements that can be allocated is not bounded because
    2248 allocator can create multiple blocks.
    2249 */
    2250 template<typename T>
    2251 class VmaPoolAllocator
    2252 {
    2253 public:
    2254  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2255  ~VmaPoolAllocator();
    2256  void Clear();
    2257  T* Alloc();
    2258  void Free(T* ptr);
    2259 
    2260 private:
    2261  union Item
    2262  {
    2263  uint32_t NextFreeIndex;
    2264  T Value;
    2265  };
    2266 
    2267  struct ItemBlock
    2268  {
    2269  Item* pItems;
    2270  uint32_t FirstFreeIndex;
    2271  };
    2272 
    2273  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2274  size_t m_ItemsPerBlock;
    2275  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2276 
    2277  ItemBlock& CreateNewBlock();
    2278 };
    2279 
    2280 template<typename T>
    2281 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2282  m_pAllocationCallbacks(pAllocationCallbacks),
    2283  m_ItemsPerBlock(itemsPerBlock),
    2284  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2285 {
    2286  VMA_ASSERT(itemsPerBlock > 0);
    2287 }
    2288 
    2289 template<typename T>
    2290 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2291 {
    2292  Clear();
    2293 }
    2294 
    2295 template<typename T>
    2296 void VmaPoolAllocator<T>::Clear()
    2297 {
    2298  for(size_t i = m_ItemBlocks.size(); i--; )
    2299  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2300  m_ItemBlocks.clear();
    2301 }
    2302 
    2303 template<typename T>
    2304 T* VmaPoolAllocator<T>::Alloc()
    2305 {
    2306  for(size_t i = m_ItemBlocks.size(); i--; )
    2307  {
    2308  ItemBlock& block = m_ItemBlocks[i];
    2309  // This block has some free items: Use first one.
    2310  if(block.FirstFreeIndex != UINT32_MAX)
    2311  {
    2312  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2313  block.FirstFreeIndex = pItem->NextFreeIndex;
    2314  return &pItem->Value;
    2315  }
    2316  }
    2317 
    2318  // No block has free item: Create new one and use it.
    2319  ItemBlock& newBlock = CreateNewBlock();
    2320  Item* const pItem = &newBlock.pItems[0];
    2321  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2322  return &pItem->Value;
    2323 }
    2324 
    2325 template<typename T>
    2326 void VmaPoolAllocator<T>::Free(T* ptr)
    2327 {
    2328  // Search all memory blocks to find ptr.
    2329  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2330  {
    2331  ItemBlock& block = m_ItemBlocks[i];
    2332 
    2333  // Casting to union.
    2334  Item* pItemPtr;
    2335  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2336 
    2337  // Check if pItemPtr is in address range of this block.
    2338  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2339  {
    2340  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2341  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2342  block.FirstFreeIndex = index;
    2343  return;
    2344  }
    2345  }
    2346  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2347 }
    2348 
    2349 template<typename T>
    2350 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2351 {
    2352  ItemBlock newBlock = {
    2353  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2354 
    2355  m_ItemBlocks.push_back(newBlock);
    2356 
    2357  // Setup singly-linked list of all free items in this block.
    2358  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2359  newBlock.pItems[i].NextFreeIndex = i + 1;
    2360  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2361  return m_ItemBlocks.back();
    2362 }
    2363 
    2365 // class VmaRawList, VmaList
    2366 
    2367 #if VMA_USE_STL_LIST
    2368 
    2369 #define VmaList std::list
    2370 
    2371 #else // #if VMA_USE_STL_LIST
    2372 
    2373 template<typename T>
    2374 struct VmaListItem
    2375 {
    2376  VmaListItem* pPrev;
    2377  VmaListItem* pNext;
    2378  T Value;
    2379 };
    2380 
    2381 // Doubly linked list.
    2382 template<typename T>
    2383 class VmaRawList
    2384 {
    2385 public:
    2386  typedef VmaListItem<T> ItemType;
    2387 
    2388  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2389  ~VmaRawList();
    2390  void Clear();
    2391 
    2392  size_t GetCount() const { return m_Count; }
    2393  bool IsEmpty() const { return m_Count == 0; }
    2394 
    2395  ItemType* Front() { return m_pFront; }
    2396  const ItemType* Front() const { return m_pFront; }
    2397  ItemType* Back() { return m_pBack; }
    2398  const ItemType* Back() const { return m_pBack; }
    2399 
    2400  ItemType* PushBack();
    2401  ItemType* PushFront();
    2402  ItemType* PushBack(const T& value);
    2403  ItemType* PushFront(const T& value);
    2404  void PopBack();
    2405  void PopFront();
    2406 
    2407  // Item can be null - it means PushBack.
    2408  ItemType* InsertBefore(ItemType* pItem);
    2409  // Item can be null - it means PushFront.
    2410  ItemType* InsertAfter(ItemType* pItem);
    2411 
    2412  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2413  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2414 
    2415  void Remove(ItemType* pItem);
    2416 
    2417 private:
    2418  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2419  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2420  ItemType* m_pFront;
    2421  ItemType* m_pBack;
    2422  size_t m_Count;
    2423 
    2424  // Declared not defined, to block copy constructor and assignment operator.
    2425  VmaRawList(const VmaRawList<T>& src);
    2426  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2427 };
    2428 
    2429 template<typename T>
    2430 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2431  m_pAllocationCallbacks(pAllocationCallbacks),
    2432  m_ItemAllocator(pAllocationCallbacks, 128),
    2433  m_pFront(VMA_NULL),
    2434  m_pBack(VMA_NULL),
    2435  m_Count(0)
    2436 {
    2437 }
    2438 
    2439 template<typename T>
    2440 VmaRawList<T>::~VmaRawList()
    2441 {
    2442  // Intentionally not calling Clear, because that would be unnecessary
    2443  // computations to return all items to m_ItemAllocator as free.
    2444 }
    2445 
    2446 template<typename T>
    2447 void VmaRawList<T>::Clear()
    2448 {
    2449  if(IsEmpty() == false)
    2450  {
    2451  ItemType* pItem = m_pBack;
    2452  while(pItem != VMA_NULL)
    2453  {
    2454  ItemType* const pPrevItem = pItem->pPrev;
    2455  m_ItemAllocator.Free(pItem);
    2456  pItem = pPrevItem;
    2457  }
    2458  m_pFront = VMA_NULL;
    2459  m_pBack = VMA_NULL;
    2460  m_Count = 0;
    2461  }
    2462 }
    2463 
    2464 template<typename T>
    2465 VmaListItem<T>* VmaRawList<T>::PushBack()
    2466 {
    2467  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2468  pNewItem->pNext = VMA_NULL;
    2469  if(IsEmpty())
    2470  {
    2471  pNewItem->pPrev = VMA_NULL;
    2472  m_pFront = pNewItem;
    2473  m_pBack = pNewItem;
    2474  m_Count = 1;
    2475  }
    2476  else
    2477  {
    2478  pNewItem->pPrev = m_pBack;
    2479  m_pBack->pNext = pNewItem;
    2480  m_pBack = pNewItem;
    2481  ++m_Count;
    2482  }
    2483  return pNewItem;
    2484 }
    2485 
    2486 template<typename T>
    2487 VmaListItem<T>* VmaRawList<T>::PushFront()
    2488 {
    2489  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2490  pNewItem->pPrev = VMA_NULL;
    2491  if(IsEmpty())
    2492  {
    2493  pNewItem->pNext = VMA_NULL;
    2494  m_pFront = pNewItem;
    2495  m_pBack = pNewItem;
    2496  m_Count = 1;
    2497  }
    2498  else
    2499  {
    2500  pNewItem->pNext = m_pFront;
    2501  m_pFront->pPrev = pNewItem;
    2502  m_pFront = pNewItem;
    2503  ++m_Count;
    2504  }
    2505  return pNewItem;
    2506 }
    2507 
    2508 template<typename T>
    2509 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2510 {
    2511  ItemType* const pNewItem = PushBack();
    2512  pNewItem->Value = value;
    2513  return pNewItem;
    2514 }
    2515 
    2516 template<typename T>
    2517 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2518 {
    2519  ItemType* const pNewItem = PushFront();
    2520  pNewItem->Value = value;
    2521  return pNewItem;
    2522 }
    2523 
    2524 template<typename T>
    2525 void VmaRawList<T>::PopBack()
    2526 {
    2527  VMA_HEAVY_ASSERT(m_Count > 0);
    2528  ItemType* const pBackItem = m_pBack;
    2529  ItemType* const pPrevItem = pBackItem->pPrev;
    2530  if(pPrevItem != VMA_NULL)
    2531  {
    2532  pPrevItem->pNext = VMA_NULL;
    2533  }
    2534  m_pBack = pPrevItem;
    2535  m_ItemAllocator.Free(pBackItem);
    2536  --m_Count;
    2537 }
    2538 
    2539 template<typename T>
    2540 void VmaRawList<T>::PopFront()
    2541 {
    2542  VMA_HEAVY_ASSERT(m_Count > 0);
    2543  ItemType* const pFrontItem = m_pFront;
    2544  ItemType* const pNextItem = pFrontItem->pNext;
    2545  if(pNextItem != VMA_NULL)
    2546  {
    2547  pNextItem->pPrev = VMA_NULL;
    2548  }
    2549  m_pFront = pNextItem;
    2550  m_ItemAllocator.Free(pFrontItem);
    2551  --m_Count;
    2552 }
    2553 
    2554 template<typename T>
    2555 void VmaRawList<T>::Remove(ItemType* pItem)
    2556 {
    2557  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2558  VMA_HEAVY_ASSERT(m_Count > 0);
    2559 
    2560  if(pItem->pPrev != VMA_NULL)
    2561  {
    2562  pItem->pPrev->pNext = pItem->pNext;
    2563  }
    2564  else
    2565  {
    2566  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2567  m_pFront = pItem->pNext;
    2568  }
    2569 
    2570  if(pItem->pNext != VMA_NULL)
    2571  {
    2572  pItem->pNext->pPrev = pItem->pPrev;
    2573  }
    2574  else
    2575  {
    2576  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2577  m_pBack = pItem->pPrev;
    2578  }
    2579 
    2580  m_ItemAllocator.Free(pItem);
    2581  --m_Count;
    2582 }
    2583 
    2584 template<typename T>
    2585 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2586 {
    2587  if(pItem != VMA_NULL)
    2588  {
    2589  ItemType* const prevItem = pItem->pPrev;
    2590  ItemType* const newItem = m_ItemAllocator.Alloc();
    2591  newItem->pPrev = prevItem;
    2592  newItem->pNext = pItem;
    2593  pItem->pPrev = newItem;
    2594  if(prevItem != VMA_NULL)
    2595  {
    2596  prevItem->pNext = newItem;
    2597  }
    2598  else
    2599  {
    2600  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2601  m_pFront = newItem;
    2602  }
    2603  ++m_Count;
    2604  return newItem;
    2605  }
    2606  else
    2607  return PushBack();
    2608 }
    2609 
    2610 template<typename T>
    2611 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2612 {
    2613  if(pItem != VMA_NULL)
    2614  {
    2615  ItemType* const nextItem = pItem->pNext;
    2616  ItemType* const newItem = m_ItemAllocator.Alloc();
    2617  newItem->pNext = nextItem;
    2618  newItem->pPrev = pItem;
    2619  pItem->pNext = newItem;
    2620  if(nextItem != VMA_NULL)
    2621  {
    2622  nextItem->pPrev = newItem;
    2623  }
    2624  else
    2625  {
    2626  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2627  m_pBack = newItem;
    2628  }
    2629  ++m_Count;
    2630  return newItem;
    2631  }
    2632  else
    2633  return PushFront();
    2634 }
    2635 
    2636 template<typename T>
    2637 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2638 {
    2639  ItemType* const newItem = InsertBefore(pItem);
    2640  newItem->Value = value;
    2641  return newItem;
    2642 }
    2643 
    2644 template<typename T>
    2645 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2646 {
    2647  ItemType* const newItem = InsertAfter(pItem);
    2648  newItem->Value = value;
    2649  return newItem;
    2650 }
    2651 
    2652 template<typename T, typename AllocatorT>
    2653 class VmaList
    2654 {
    2655 public:
    2656  class iterator
    2657  {
    2658  public:
    2659  iterator() :
    2660  m_pList(VMA_NULL),
    2661  m_pItem(VMA_NULL)
    2662  {
    2663  }
    2664 
    2665  T& operator*() const
    2666  {
    2667  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2668  return m_pItem->Value;
    2669  }
    2670  T* operator->() const
    2671  {
    2672  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2673  return &m_pItem->Value;
    2674  }
    2675 
    2676  iterator& operator++()
    2677  {
    2678  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2679  m_pItem = m_pItem->pNext;
    2680  return *this;
    2681  }
    2682  iterator& operator--()
    2683  {
    2684  if(m_pItem != VMA_NULL)
    2685  {
    2686  m_pItem = m_pItem->pPrev;
    2687  }
    2688  else
    2689  {
    2690  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2691  m_pItem = m_pList->Back();
    2692  }
    2693  return *this;
    2694  }
    2695 
    2696  iterator operator++(int)
    2697  {
    2698  iterator result = *this;
    2699  ++*this;
    2700  return result;
    2701  }
    2702  iterator operator--(int)
    2703  {
    2704  iterator result = *this;
    2705  --*this;
    2706  return result;
    2707  }
    2708 
    2709  bool operator==(const iterator& rhs) const
    2710  {
    2711  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2712  return m_pItem == rhs.m_pItem;
    2713  }
    2714  bool operator!=(const iterator& rhs) const
    2715  {
    2716  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2717  return m_pItem != rhs.m_pItem;
    2718  }
    2719 
    2720  private:
    2721  VmaRawList<T>* m_pList;
    2722  VmaListItem<T>* m_pItem;
    2723 
    2724  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2725  m_pList(pList),
    2726  m_pItem(pItem)
    2727  {
    2728  }
    2729 
    2730  friend class VmaList<T, AllocatorT>;
    2731  };
    2732 
    2733  class const_iterator
    2734  {
    2735  public:
    2736  const_iterator() :
    2737  m_pList(VMA_NULL),
    2738  m_pItem(VMA_NULL)
    2739  {
    2740  }
    2741 
    2742  const_iterator(const iterator& src) :
    2743  m_pList(src.m_pList),
    2744  m_pItem(src.m_pItem)
    2745  {
    2746  }
    2747 
    2748  const T& operator*() const
    2749  {
    2750  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2751  return m_pItem->Value;
    2752  }
    2753  const T* operator->() const
    2754  {
    2755  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2756  return &m_pItem->Value;
    2757  }
    2758 
    2759  const_iterator& operator++()
    2760  {
    2761  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2762  m_pItem = m_pItem->pNext;
    2763  return *this;
    2764  }
    2765  const_iterator& operator--()
    2766  {
    2767  if(m_pItem != VMA_NULL)
    2768  {
    2769  m_pItem = m_pItem->pPrev;
    2770  }
    2771  else
    2772  {
    2773  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2774  m_pItem = m_pList->Back();
    2775  }
    2776  return *this;
    2777  }
    2778 
    2779  const_iterator operator++(int)
    2780  {
    2781  const_iterator result = *this;
    2782  ++*this;
    2783  return result;
    2784  }
    2785  const_iterator operator--(int)
    2786  {
    2787  const_iterator result = *this;
    2788  --*this;
    2789  return result;
    2790  }
    2791 
    2792  bool operator==(const const_iterator& rhs) const
    2793  {
    2794  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2795  return m_pItem == rhs.m_pItem;
    2796  }
    2797  bool operator!=(const const_iterator& rhs) const
    2798  {
    2799  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2800  return m_pItem != rhs.m_pItem;
    2801  }
    2802 
    2803  private:
    2804  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2805  m_pList(pList),
    2806  m_pItem(pItem)
    2807  {
    2808  }
    2809 
    2810  const VmaRawList<T>* m_pList;
    2811  const VmaListItem<T>* m_pItem;
    2812 
    2813  friend class VmaList<T, AllocatorT>;
    2814  };
    2815 
    2816  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2817 
    2818  bool empty() const { return m_RawList.IsEmpty(); }
    2819  size_t size() const { return m_RawList.GetCount(); }
    2820 
    2821  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2822  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2823 
    2824  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2825  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2826 
    2827  void clear() { m_RawList.Clear(); }
    2828  void push_back(const T& value) { m_RawList.PushBack(value); }
    2829  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2830  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2831 
    2832 private:
    2833  VmaRawList<T> m_RawList;
    2834 };
    2835 
    2836 #endif // #if VMA_USE_STL_LIST
    2837 
    2839 // class VmaMap
    2840 
    2841 // Unused in this version.
    2842 #if 0
    2843 
    2844 #if VMA_USE_STL_UNORDERED_MAP
    2845 
    2846 #define VmaPair std::pair
    2847 
    2848 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2849  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2850 
    2851 #else // #if VMA_USE_STL_UNORDERED_MAP
    2852 
    2853 template<typename T1, typename T2>
    2854 struct VmaPair
    2855 {
    2856  T1 first;
    2857  T2 second;
    2858 
    2859  VmaPair() : first(), second() { }
    2860  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2861 };
    2862 
    2863 /* Class compatible with subset of interface of std::unordered_map.
    2864 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2865 */
    2866 template<typename KeyT, typename ValueT>
    2867 class VmaMap
    2868 {
    2869 public:
    2870  typedef VmaPair<KeyT, ValueT> PairType;
    2871  typedef PairType* iterator;
    2872 
    2873  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2874 
    2875  iterator begin() { return m_Vector.begin(); }
    2876  iterator end() { return m_Vector.end(); }
    2877 
    2878  void insert(const PairType& pair);
    2879  iterator find(const KeyT& key);
    2880  void erase(iterator it);
    2881 
    2882 private:
    2883  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2884 };
    2885 
    2886 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2887 
    2888 template<typename FirstT, typename SecondT>
    2889 struct VmaPairFirstLess
    2890 {
    2891  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2892  {
    2893  return lhs.first < rhs.first;
    2894  }
    2895  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2896  {
    2897  return lhs.first < rhsFirst;
    2898  }
    2899 };
    2900 
    2901 template<typename KeyT, typename ValueT>
    2902 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2903 {
    2904  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2905  m_Vector.data(),
    2906  m_Vector.data() + m_Vector.size(),
    2907  pair,
    2908  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2909  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2910 }
    2911 
    2912 template<typename KeyT, typename ValueT>
    2913 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2914 {
    2915  PairType* it = VmaBinaryFindFirstNotLess(
    2916  m_Vector.data(),
    2917  m_Vector.data() + m_Vector.size(),
    2918  key,
    2919  VmaPairFirstLess<KeyT, ValueT>());
    2920  if((it != m_Vector.end()) && (it->first == key))
    2921  {
    2922  return it;
    2923  }
    2924  else
    2925  {
    2926  return m_Vector.end();
    2927  }
    2928 }
    2929 
    2930 template<typename KeyT, typename ValueT>
    2931 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2932 {
    2933  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2934 }
    2935 
    2936 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2937 
    2938 #endif // #if 0
    2939 
    2941 
    2942 class VmaDeviceMemoryBlock;
    2943 
    2944 struct VmaAllocation_T
    2945 {
    2946 private:
    2947  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    2948 
    2949 public:
    2950  enum ALLOCATION_TYPE
    2951  {
    2952  ALLOCATION_TYPE_NONE,
    2953  ALLOCATION_TYPE_BLOCK,
    2954  ALLOCATION_TYPE_DEDICATED,
    2955  };
    2956 
    2957  VmaAllocation_T(uint32_t currentFrameIndex) :
    2958  m_Alignment(1),
    2959  m_Size(0),
    2960  m_pUserData(VMA_NULL),
    2961  m_LastUseFrameIndex(currentFrameIndex),
    2962  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    2963  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2964  m_MapCount(0)
    2965  {
    2966  }
    2967 
    2968  ~VmaAllocation_T()
    2969  {
    2970  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    2971  }
    2972 
    2973  void InitBlockAllocation(
    2974  VmaPool hPool,
    2975  VmaDeviceMemoryBlock* block,
    2976  VkDeviceSize offset,
    2977  VkDeviceSize alignment,
    2978  VkDeviceSize size,
    2979  VmaSuballocationType suballocationType,
    2980  bool mapped,
    2981  void* pUserData,
    2982  bool canBecomeLost)
    2983  {
    2984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2985  VMA_ASSERT(block != VMA_NULL);
    2986  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    2987  m_Alignment = alignment;
    2988  m_Size = size;
    2989  m_pUserData = pUserData;
    2990  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    2991  m_SuballocationType = (uint8_t)suballocationType;
    2992  m_BlockAllocation.m_hPool = hPool;
    2993  m_BlockAllocation.m_Block = block;
    2994  m_BlockAllocation.m_Offset = offset;
    2995  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2996  }
    2997 
    2998  void InitLost()
    2999  {
    3000  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3001  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    3002  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3003  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    3004  m_BlockAllocation.m_Block = VMA_NULL;
    3005  m_BlockAllocation.m_Offset = 0;
    3006  m_BlockAllocation.m_CanBecomeLost = true;
    3007  }
    3008 
    3009  void ChangeBlockAllocation(
    3010  VmaDeviceMemoryBlock* block,
    3011  VkDeviceSize offset)
    3012  {
    3013  VMA_ASSERT(block != VMA_NULL);
    3014  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3015  m_BlockAllocation.m_Block = block;
    3016  m_BlockAllocation.m_Offset = offset;
    3017  }
    3018 
    3019  // pMappedData not null means allocation is created with MAPPED flag.
    3020  void InitDedicatedAllocation(
    3021  uint32_t memoryTypeIndex,
    3022  VkDeviceMemory hMemory,
    3023  VmaSuballocationType suballocationType,
    3024  void* pMappedData,
    3025  VkDeviceSize size,
    3026  void* pUserData)
    3027  {
    3028  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3029  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3030  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    3031  m_Alignment = 0;
    3032  m_Size = size;
    3033  m_pUserData = pUserData;
    3034  m_SuballocationType = (uint8_t)suballocationType;
    3035  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3036  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3037  m_DedicatedAllocation.m_hMemory = hMemory;
    3038  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3039  }
    3040 
    3041  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    3042  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3043  VkDeviceSize GetSize() const { return m_Size; }
    3044  void* GetUserData() const { return m_pUserData; }
    3045  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    3046  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    3047 
    3048  VmaDeviceMemoryBlock* GetBlock() const
    3049  {
    3050  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3051  return m_BlockAllocation.m_Block;
    3052  }
    3053  VkDeviceSize GetOffset() const;
    3054  VkDeviceMemory GetMemory() const;
    3055  uint32_t GetMemoryTypeIndex() const;
    3056  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    3057  void* GetMappedData() const;
    3058  bool CanBecomeLost() const;
    3059  VmaPool GetPool() const;
    3060 
    3061  uint32_t GetLastUseFrameIndex() const
    3062  {
    3063  return m_LastUseFrameIndex.load();
    3064  }
    3065  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3066  {
    3067  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3068  }
    3069  /*
    3070  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3071  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3072  - Else, returns false.
    3073 
    3074  If hAllocation is already lost, assert - you should not call it then.
    3075  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3076  */
    3077  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3078 
    3079  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3080  {
    3081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3082  outInfo.blockCount = 1;
    3083  outInfo.allocationCount = 1;
    3084  outInfo.unusedRangeCount = 0;
    3085  outInfo.usedBytes = m_Size;
    3086  outInfo.unusedBytes = 0;
    3087  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3088  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3089  outInfo.unusedRangeSizeMax = 0;
    3090  }
    3091 
    3092  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    3093  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    3094 
    3095 private:
    3096  VkDeviceSize m_Alignment;
    3097  VkDeviceSize m_Size;
    3098  void* m_pUserData;
    3099  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3100  uint8_t m_Type; // ALLOCATION_TYPE
    3101  uint8_t m_SuballocationType; // VmaSuballocationType
    3102  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    3103  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
    3104  uint8_t m_MapCount;
    3105 
    3106  // Allocation out of VmaDeviceMemoryBlock.
    3107  struct BlockAllocation
    3108  {
    3109  VmaPool m_hPool; // Null if belongs to general memory.
    3110  VmaDeviceMemoryBlock* m_Block;
    3111  VkDeviceSize m_Offset;
    3112  bool m_CanBecomeLost;
    3113  };
    3114 
    3115  // Allocation for an object that has its own private VkDeviceMemory.
    3116  struct DedicatedAllocation
    3117  {
    3118  uint32_t m_MemoryTypeIndex;
    3119  VkDeviceMemory m_hMemory;
    3120  void* m_pMappedData; // Not null means memory is mapped.
    3121  };
    3122 
    3123  union
    3124  {
    3125  // Allocation out of VmaDeviceMemoryBlock.
    3126  BlockAllocation m_BlockAllocation;
    3127  // Allocation for an object that has its own private VkDeviceMemory.
    3128  DedicatedAllocation m_DedicatedAllocation;
    3129  };
    3130 };
    3131 
    3132 /*
    3133 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3134 allocated memory block or free.
    3135 */
    3136 struct VmaSuballocation
    3137 {
    3138  VkDeviceSize offset;
    3139  VkDeviceSize size;
    3140  VmaAllocation hAllocation;
    3141  VmaSuballocationType type;
    3142 };
    3143 
    3144 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3145 
    3146 // Cost of one additional allocation lost, as equivalent in bytes.
    3147 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3148 
    3149 /*
    3150 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3151 
    3152 If canMakeOtherLost was false:
    3153 - item points to a FREE suballocation.
    3154 - itemsToMakeLostCount is 0.
    3155 
    3156 If canMakeOtherLost was true:
    3157 - item points to first of sequence of suballocations, which are either FREE,
    3158  or point to VmaAllocations that can become lost.
    3159 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3160  the requested allocation to succeed.
    3161 */
    3162 struct VmaAllocationRequest
    3163 {
    3164  VkDeviceSize offset;
    3165  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3166  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3167  VmaSuballocationList::iterator item;
    3168  size_t itemsToMakeLostCount;
    3169 
    3170  VkDeviceSize CalcCost() const
    3171  {
    3172  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3173  }
    3174 };
    3175 
    3176 /*
    3177 Data structure used for bookkeeping of allocations and unused ranges of memory
    3178 in a single VkDeviceMemory block.
    3179 */
    3180 class VmaBlockMetadata
    3181 {
    3182 public:
    3183  VmaBlockMetadata(VmaAllocator hAllocator);
    3184  ~VmaBlockMetadata();
    3185  void Init(VkDeviceSize size);
    3186 
    3187  // Validates all data structures inside this object. If not valid, returns false.
    3188  bool Validate() const;
    3189  VkDeviceSize GetSize() const { return m_Size; }
    3190  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3191  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3192  VkDeviceSize GetUnusedRangeSizeMax() const;
    3193  // Returns true if this block is empty - contains only single free suballocation.
    3194  bool IsEmpty() const;
    3195 
    3196  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3197  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3198 
    3199 #if VMA_STATS_STRING_ENABLED
    3200  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3201 #endif
    3202 
    3203  // Creates trivial request for case when block is empty.
    3204  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3205 
    3206  // Tries to find a place for suballocation with given parameters inside this block.
    3207  // If succeeded, fills pAllocationRequest and returns true.
    3208  // If failed, returns false.
    3209  bool CreateAllocationRequest(
    3210  uint32_t currentFrameIndex,
    3211  uint32_t frameInUseCount,
    3212  VkDeviceSize bufferImageGranularity,
    3213  VkDeviceSize allocSize,
    3214  VkDeviceSize allocAlignment,
    3215  VmaSuballocationType allocType,
    3216  bool canMakeOtherLost,
    3217  VmaAllocationRequest* pAllocationRequest);
    3218 
    3219  bool MakeRequestedAllocationsLost(
    3220  uint32_t currentFrameIndex,
    3221  uint32_t frameInUseCount,
    3222  VmaAllocationRequest* pAllocationRequest);
    3223 
    3224  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3225 
    3226  // Makes actual allocation based on request. Request must already be checked and valid.
    3227  void Alloc(
    3228  const VmaAllocationRequest& request,
    3229  VmaSuballocationType type,
    3230  VkDeviceSize allocSize,
    3231  VmaAllocation hAllocation);
    3232 
    3233  // Frees suballocation assigned to given memory region.
    3234  void Free(const VmaAllocation allocation);
    3235 
    3236 private:
    3237  VkDeviceSize m_Size;
    3238  uint32_t m_FreeCount;
    3239  VkDeviceSize m_SumFreeSize;
    3240  VmaSuballocationList m_Suballocations;
    3241  // Suballocations that are free and have size greater than certain threshold.
    3242  // Sorted by size, ascending.
    3243  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3244 
    3245  bool ValidateFreeSuballocationList() const;
    3246 
    3247  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3248  // If yes, fills pOffset and returns true. If no, returns false.
    3249  bool CheckAllocation(
    3250  uint32_t currentFrameIndex,
    3251  uint32_t frameInUseCount,
    3252  VkDeviceSize bufferImageGranularity,
    3253  VkDeviceSize allocSize,
    3254  VkDeviceSize allocAlignment,
    3255  VmaSuballocationType allocType,
    3256  VmaSuballocationList::const_iterator suballocItem,
    3257  bool canMakeOtherLost,
    3258  VkDeviceSize* pOffset,
    3259  size_t* itemsToMakeLostCount,
    3260  VkDeviceSize* pSumFreeSize,
    3261  VkDeviceSize* pSumItemSize) const;
    3262  // Given free suballocation, it merges it with following one, which must also be free.
    3263  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3264  // Releases given suballocation, making it free.
    3265  // Merges it with adjacent free suballocations if applicable.
    3266  // Returns iterator to new free suballocation at this place.
    3267  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3268  // Given free suballocation, it inserts it into sorted list of
    3269  // m_FreeSuballocationsBySize if it's suitable.
    3270  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3271  // Given free suballocation, it removes it from sorted list of
    3272  // m_FreeSuballocationsBySize if it's suitable.
    3273  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3274 };
    3275 
    3276 // Helper class that represents mapped memory. Synchronized internally.
    3277 class VmaDeviceMemoryMapping
    3278 {
    3279 public:
    3280  VmaDeviceMemoryMapping();
    3281  ~VmaDeviceMemoryMapping();
    3282 
    3283  void* GetMappedData() const { return m_pMappedData; }
    3284 
    3285  // ppData can be null.
    3286  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
    3287  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
    3288 
    3289 private:
    3290  VMA_MUTEX m_Mutex;
    3291  uint32_t m_MapCount;
    3292  void* m_pMappedData;
    3293 };
    3294 
    3295 /*
    3296 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3297 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3298 
    3299 Thread-safety: This class must be externally synchronized.
    3300 */
    3301 class VmaDeviceMemoryBlock
    3302 {
    3303 public:
    3304  uint32_t m_MemoryTypeIndex;
    3305  VkDeviceMemory m_hMemory;
    3306  VmaDeviceMemoryMapping m_Mapping;
    3307  VmaBlockMetadata m_Metadata;
    3308 
    3309  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3310 
    3311  ~VmaDeviceMemoryBlock()
    3312  {
    3313  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3314  }
    3315 
    3316  // Always call after construction.
    3317  void Init(
    3318  uint32_t newMemoryTypeIndex,
    3319  VkDeviceMemory newMemory,
    3320  VkDeviceSize newSize);
    3321  // Always call before destruction.
    3322  void Destroy(VmaAllocator allocator);
    3323 
    3324  // Validates all data structures inside this object. If not valid, returns false.
    3325  bool Validate() const;
    3326 
    3327  // ppData can be null.
    3328  VkResult Map(VmaAllocator hAllocator, void** ppData);
    3329  void Unmap(VmaAllocator hAllocator);
    3330 };
    3331 
    3332 struct VmaPointerLess
    3333 {
    3334  bool operator()(const void* lhs, const void* rhs) const
    3335  {
    3336  return lhs < rhs;
    3337  }
    3338 };
    3339 
    3340 class VmaDefragmentator;
    3341 
    3342 /*
    3343 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3344 Vulkan memory type.
    3345 
    3346 Synchronized internally with a mutex.
    3347 */
    3348 struct VmaBlockVector
    3349 {
    3350  VmaBlockVector(
    3351  VmaAllocator hAllocator,
    3352  uint32_t memoryTypeIndex,
    3353  VkDeviceSize preferredBlockSize,
    3354  size_t minBlockCount,
    3355  size_t maxBlockCount,
    3356  VkDeviceSize bufferImageGranularity,
    3357  uint32_t frameInUseCount,
    3358  bool isCustomPool);
    3359  ~VmaBlockVector();
    3360 
    3361  VkResult CreateMinBlocks();
    3362 
    3363  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3364  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3365  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3366  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3367 
    3368  void GetPoolStats(VmaPoolStats* pStats);
    3369 
    3370  bool IsEmpty() const { return m_Blocks.empty(); }
    3371 
    3372  VkResult Allocate(
    3373  VmaPool hCurrentPool,
    3374  uint32_t currentFrameIndex,
    3375  const VkMemoryRequirements& vkMemReq,
    3376  const VmaAllocationCreateInfo& createInfo,
    3377  VmaSuballocationType suballocType,
    3378  VmaAllocation* pAllocation);
    3379 
    3380  void Free(
    3381  VmaAllocation hAllocation);
    3382 
    3383  // Adds statistics of this BlockVector to pStats.
    3384  void AddStats(VmaStats* pStats);
    3385 
    3386 #if VMA_STATS_STRING_ENABLED
    3387  void PrintDetailedMap(class VmaJsonWriter& json);
    3388 #endif
    3389 
    3390  void MakePoolAllocationsLost(
    3391  uint32_t currentFrameIndex,
    3392  size_t* pLostAllocationCount);
    3393 
    3394  VmaDefragmentator* EnsureDefragmentator(
    3395  VmaAllocator hAllocator,
    3396  uint32_t currentFrameIndex);
    3397 
    3398  VkResult Defragment(
    3399  VmaDefragmentationStats* pDefragmentationStats,
    3400  VkDeviceSize& maxBytesToMove,
    3401  uint32_t& maxAllocationsToMove);
    3402 
    3403  void DestroyDefragmentator();
    3404 
    3405 private:
    3406  friend class VmaDefragmentator;
    3407 
    3408  const VmaAllocator m_hAllocator;
    3409  const uint32_t m_MemoryTypeIndex;
    3410  const VkDeviceSize m_PreferredBlockSize;
    3411  const size_t m_MinBlockCount;
    3412  const size_t m_MaxBlockCount;
    3413  const VkDeviceSize m_BufferImageGranularity;
    3414  const uint32_t m_FrameInUseCount;
    3415  const bool m_IsCustomPool;
    3416  VMA_MUTEX m_Mutex;
    3417  // Incrementally sorted by sumFreeSize, ascending.
    3418  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3419  /* There can be at most one allocation that is completely empty - a
    3420  hysteresis to avoid pessimistic case of alternating creation and destruction
    3421  of a VkDeviceMemory. */
    3422  bool m_HasEmptyBlock;
    3423  VmaDefragmentator* m_pDefragmentator;
    3424 
    3425  // Finds and removes given block from vector.
    3426  void Remove(VmaDeviceMemoryBlock* pBlock);
    3427 
    3428  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3429  // after this call.
    3430  void IncrementallySortBlocks();
    3431 
    3432  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3433 };
    3434 
    3435 struct VmaPool_T
    3436 {
    3437 public:
    3438  VmaBlockVector m_BlockVector;
    3439 
    3440  // Takes ownership.
    3441  VmaPool_T(
    3442  VmaAllocator hAllocator,
    3443  const VmaPoolCreateInfo& createInfo);
    3444  ~VmaPool_T();
    3445 
    3446  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3447 
    3448 #if VMA_STATS_STRING_ENABLED
    3449  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3450 #endif
    3451 };
    3452 
    3453 class VmaDefragmentator
    3454 {
    3455  const VmaAllocator m_hAllocator;
    3456  VmaBlockVector* const m_pBlockVector;
    3457  uint32_t m_CurrentFrameIndex;
    3458  VkDeviceSize m_BytesMoved;
    3459  uint32_t m_AllocationsMoved;
    3460 
    3461  struct AllocationInfo
    3462  {
    3463  VmaAllocation m_hAllocation;
    3464  VkBool32* m_pChanged;
    3465 
    3466  AllocationInfo() :
    3467  m_hAllocation(VK_NULL_HANDLE),
    3468  m_pChanged(VMA_NULL)
    3469  {
    3470  }
    3471  };
    3472 
    3473  struct AllocationInfoSizeGreater
    3474  {
    3475  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3476  {
    3477  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3478  }
    3479  };
    3480 
    3481  // Used between AddAllocation and Defragment.
    3482  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3483 
    3484  struct BlockInfo
    3485  {
    3486  VmaDeviceMemoryBlock* m_pBlock;
    3487  bool m_HasNonMovableAllocations;
    3488  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3489 
    3490  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3491  m_pBlock(VMA_NULL),
    3492  m_HasNonMovableAllocations(true),
    3493  m_Allocations(pAllocationCallbacks),
    3494  m_pMappedDataForDefragmentation(VMA_NULL)
    3495  {
    3496  }
    3497 
    3498  void CalcHasNonMovableAllocations()
    3499  {
    3500  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3501  const size_t defragmentAllocCount = m_Allocations.size();
    3502  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3503  }
    3504 
    3505  void SortAllocationsBySizeDescecnding()
    3506  {
    3507  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3508  }
    3509 
    3510  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3511  void Unmap(VmaAllocator hAllocator);
    3512 
    3513  private:
    3514  // Not null if mapped for defragmentation only, not originally mapped.
    3515  void* m_pMappedDataForDefragmentation;
    3516  };
    3517 
    3518  struct BlockPointerLess
    3519  {
    3520  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3521  {
    3522  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3523  }
    3524  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3525  {
    3526  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3527  }
    3528  };
    3529 
    3530  // 1. Blocks with some non-movable allocations go first.
    3531  // 2. Blocks with smaller sumFreeSize go first.
    3532  struct BlockInfoCompareMoveDestination
    3533  {
    3534  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3535  {
    3536  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3537  {
    3538  return true;
    3539  }
    3540  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3541  {
    3542  return false;
    3543  }
    3544  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3545  {
    3546  return true;
    3547  }
    3548  return false;
    3549  }
    3550  };
    3551 
    3552  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3553  BlockInfoVector m_Blocks;
    3554 
    3555  VkResult DefragmentRound(
    3556  VkDeviceSize maxBytesToMove,
    3557  uint32_t maxAllocationsToMove);
    3558 
    3559  static bool MoveMakesSense(
    3560  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3561  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3562 
    3563 public:
    3564  VmaDefragmentator(
    3565  VmaAllocator hAllocator,
    3566  VmaBlockVector* pBlockVector,
    3567  uint32_t currentFrameIndex);
    3568 
    3569  ~VmaDefragmentator();
    3570 
    3571  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3572  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3573 
    3574  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3575 
    3576  VkResult Defragment(
    3577  VkDeviceSize maxBytesToMove,
    3578  uint32_t maxAllocationsToMove);
    3579 };
    3580 
    3581 // Main allocator object.
    3582 struct VmaAllocator_T
    3583 {
    3584  bool m_UseMutex;
    3585  bool m_UseKhrDedicatedAllocation;
    3586  VkDevice m_hDevice;
    3587  bool m_AllocationCallbacksSpecified;
    3588  VkAllocationCallbacks m_AllocationCallbacks;
    3589  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3590 
    3591  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3592  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3593  VMA_MUTEX m_HeapSizeLimitMutex;
    3594 
    3595  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3596  VkPhysicalDeviceMemoryProperties m_MemProps;
    3597 
    3598  // Default pools.
    3599  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    3600 
    3601  // Each vector is sorted by memory (handle value).
    3602  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3603  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    3604  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3605 
    3606  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3607  ~VmaAllocator_T();
    3608 
    3609  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3610  {
    3611  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3612  }
    3613  const VmaVulkanFunctions& GetVulkanFunctions() const
    3614  {
    3615  return m_VulkanFunctions;
    3616  }
    3617 
    3618  VkDeviceSize GetBufferImageGranularity() const
    3619  {
    3620  return VMA_MAX(
    3621  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3622  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3623  }
    3624 
    3625  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3626  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3627 
    3628  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3629  {
    3630  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3631  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3632  }
    3633 
    3634  void GetBufferMemoryRequirements(
    3635  VkBuffer hBuffer,
    3636  VkMemoryRequirements& memReq,
    3637  bool& requiresDedicatedAllocation,
    3638  bool& prefersDedicatedAllocation) const;
    3639  void GetImageMemoryRequirements(
    3640  VkImage hImage,
    3641  VkMemoryRequirements& memReq,
    3642  bool& requiresDedicatedAllocation,
    3643  bool& prefersDedicatedAllocation) const;
    3644 
    3645  // Main allocation function.
    3646  VkResult AllocateMemory(
    3647  const VkMemoryRequirements& vkMemReq,
    3648  bool requiresDedicatedAllocation,
    3649  bool prefersDedicatedAllocation,
    3650  VkBuffer dedicatedBuffer,
    3651  VkImage dedicatedImage,
    3652  const VmaAllocationCreateInfo& createInfo,
    3653  VmaSuballocationType suballocType,
    3654  VmaAllocation* pAllocation);
    3655 
    3656  // Main deallocation function.
    3657  void FreeMemory(const VmaAllocation allocation);
    3658 
    3659  void CalculateStats(VmaStats* pStats);
    3660 
    3661 #if VMA_STATS_STRING_ENABLED
    3662  void PrintDetailedMap(class VmaJsonWriter& json);
    3663 #endif
    3664 
    3665  VkResult Defragment(
    3666  VmaAllocation* pAllocations,
    3667  size_t allocationCount,
    3668  VkBool32* pAllocationsChanged,
    3669  const VmaDefragmentationInfo* pDefragmentationInfo,
    3670  VmaDefragmentationStats* pDefragmentationStats);
    3671 
    3672  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3673 
    3674  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3675  void DestroyPool(VmaPool pool);
    3676  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3677 
    3678  void SetCurrentFrameIndex(uint32_t frameIndex);
    3679 
    3680  void MakePoolAllocationsLost(
    3681  VmaPool hPool,
    3682  size_t* pLostAllocationCount);
    3683 
    3684  void CreateLostAllocation(VmaAllocation* pAllocation);
    3685 
    3686  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3687  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3688 
    3689  VkResult Map(VmaAllocation hAllocation, void** ppData);
    3690  void Unmap(VmaAllocation hAllocation);
    3691 
    3692 private:
    3693  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3694  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3695 
    3696  VkPhysicalDevice m_PhysicalDevice;
    3697  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3698 
    3699  VMA_MUTEX m_PoolsMutex;
    3700  // Protected by m_PoolsMutex. Sorted by pointer value.
    3701  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3702 
    3703  VmaVulkanFunctions m_VulkanFunctions;
    3704 
    3705  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3706 
    3707  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3708 
    3709  VkResult AllocateMemoryOfType(
    3710  const VkMemoryRequirements& vkMemReq,
    3711  bool dedicatedAllocation,
    3712  VkBuffer dedicatedBuffer,
    3713  VkImage dedicatedImage,
    3714  const VmaAllocationCreateInfo& createInfo,
    3715  uint32_t memTypeIndex,
    3716  VmaSuballocationType suballocType,
    3717  VmaAllocation* pAllocation);
    3718 
    3719  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3720  VkResult AllocateDedicatedMemory(
    3721  VkDeviceSize size,
    3722  VmaSuballocationType suballocType,
    3723  uint32_t memTypeIndex,
    3724  bool map,
    3725  void* pUserData,
    3726  VkBuffer dedicatedBuffer,
    3727  VkImage dedicatedImage,
    3728  VmaAllocation* pAllocation);
    3729 
    3730  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3731  void FreeDedicatedMemory(VmaAllocation allocation);
    3732 };
    3733 
    3735 // Memory allocation #2 after VmaAllocator_T definition
    3736 
    3737 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3738 {
    3739  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3740 }
    3741 
    3742 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3743 {
    3744  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3745 }
    3746 
    3747 template<typename T>
    3748 static T* VmaAllocate(VmaAllocator hAllocator)
    3749 {
    3750  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3751 }
    3752 
    3753 template<typename T>
    3754 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3755 {
    3756  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3757 }
    3758 
    3759 template<typename T>
    3760 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3761 {
    3762  if(ptr != VMA_NULL)
    3763  {
    3764  ptr->~T();
    3765  VmaFree(hAllocator, ptr);
    3766  }
    3767 }
    3768 
    3769 template<typename T>
    3770 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3771 {
    3772  if(ptr != VMA_NULL)
    3773  {
    3774  for(size_t i = count; i--; )
    3775  ptr[i].~T();
    3776  VmaFree(hAllocator, ptr);
    3777  }
    3778 }
    3779 
    3781 // VmaStringBuilder
    3782 
    3783 #if VMA_STATS_STRING_ENABLED
    3784 
    3785 class VmaStringBuilder
    3786 {
    3787 public:
    3788  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3789  size_t GetLength() const { return m_Data.size(); }
    3790  const char* GetData() const { return m_Data.data(); }
    3791 
    3792  void Add(char ch) { m_Data.push_back(ch); }
    3793  void Add(const char* pStr);
    3794  void AddNewLine() { Add('\n'); }
    3795  void AddNumber(uint32_t num);
    3796  void AddNumber(uint64_t num);
    3797  void AddPointer(const void* ptr);
    3798 
    3799 private:
    3800  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3801 };
    3802 
    3803 void VmaStringBuilder::Add(const char* pStr)
    3804 {
    3805  const size_t strLen = strlen(pStr);
    3806  if(strLen > 0)
    3807  {
    3808  const size_t oldCount = m_Data.size();
    3809  m_Data.resize(oldCount + strLen);
    3810  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3811  }
    3812 }
    3813 
    3814 void VmaStringBuilder::AddNumber(uint32_t num)
    3815 {
    3816  char buf[11];
    3817  VmaUint32ToStr(buf, sizeof(buf), num);
    3818  Add(buf);
    3819 }
    3820 
    3821 void VmaStringBuilder::AddNumber(uint64_t num)
    3822 {
    3823  char buf[21];
    3824  VmaUint64ToStr(buf, sizeof(buf), num);
    3825  Add(buf);
    3826 }
    3827 
    3828 void VmaStringBuilder::AddPointer(const void* ptr)
    3829 {
    3830  char buf[21];
    3831  VmaPtrToStr(buf, sizeof(buf), ptr);
    3832  Add(buf);
    3833 }
    3834 
    3835 #endif // #if VMA_STATS_STRING_ENABLED
    3836 
    3838 // VmaJsonWriter
    3839 
    3840 #if VMA_STATS_STRING_ENABLED
    3841 
    3842 class VmaJsonWriter
    3843 {
    3844 public:
    3845  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3846  ~VmaJsonWriter();
    3847 
    3848  void BeginObject(bool singleLine = false);
    3849  void EndObject();
    3850 
    3851  void BeginArray(bool singleLine = false);
    3852  void EndArray();
    3853 
    3854  void WriteString(const char* pStr);
    3855  void BeginString(const char* pStr = VMA_NULL);
    3856  void ContinueString(const char* pStr);
    3857  void ContinueString(uint32_t n);
    3858  void ContinueString(uint64_t n);
    3859  void EndString(const char* pStr = VMA_NULL);
    3860 
    3861  void WriteNumber(uint32_t n);
    3862  void WriteNumber(uint64_t n);
    3863  void WriteBool(bool b);
    3864  void WriteNull();
    3865 
    3866 private:
    3867  static const char* const INDENT;
    3868 
    3869  enum COLLECTION_TYPE
    3870  {
    3871  COLLECTION_TYPE_OBJECT,
    3872  COLLECTION_TYPE_ARRAY,
    3873  };
    3874  struct StackItem
    3875  {
    3876  COLLECTION_TYPE type;
    3877  uint32_t valueCount;
    3878  bool singleLineMode;
    3879  };
    3880 
    3881  VmaStringBuilder& m_SB;
    3882  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3883  bool m_InsideString;
    3884 
    3885  void BeginValue(bool isString);
    3886  void WriteIndent(bool oneLess = false);
    3887 };
    3888 
    3889 const char* const VmaJsonWriter::INDENT = " ";
    3890 
    3891 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3892  m_SB(sb),
    3893  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3894  m_InsideString(false)
    3895 {
    3896 }
    3897 
    3898 VmaJsonWriter::~VmaJsonWriter()
    3899 {
    3900  VMA_ASSERT(!m_InsideString);
    3901  VMA_ASSERT(m_Stack.empty());
    3902 }
    3903 
    3904 void VmaJsonWriter::BeginObject(bool singleLine)
    3905 {
    3906  VMA_ASSERT(!m_InsideString);
    3907 
    3908  BeginValue(false);
    3909  m_SB.Add('{');
    3910 
    3911  StackItem item;
    3912  item.type = COLLECTION_TYPE_OBJECT;
    3913  item.valueCount = 0;
    3914  item.singleLineMode = singleLine;
    3915  m_Stack.push_back(item);
    3916 }
    3917 
    3918 void VmaJsonWriter::EndObject()
    3919 {
    3920  VMA_ASSERT(!m_InsideString);
    3921 
    3922  WriteIndent(true);
    3923  m_SB.Add('}');
    3924 
    3925  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3926  m_Stack.pop_back();
    3927 }
    3928 
    3929 void VmaJsonWriter::BeginArray(bool singleLine)
    3930 {
    3931  VMA_ASSERT(!m_InsideString);
    3932 
    3933  BeginValue(false);
    3934  m_SB.Add('[');
    3935 
    3936  StackItem item;
    3937  item.type = COLLECTION_TYPE_ARRAY;
    3938  item.valueCount = 0;
    3939  item.singleLineMode = singleLine;
    3940  m_Stack.push_back(item);
    3941 }
    3942 
    3943 void VmaJsonWriter::EndArray()
    3944 {
    3945  VMA_ASSERT(!m_InsideString);
    3946 
    3947  WriteIndent(true);
    3948  m_SB.Add(']');
    3949 
    3950  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3951  m_Stack.pop_back();
    3952 }
    3953 
    3954 void VmaJsonWriter::WriteString(const char* pStr)
    3955 {
    3956  BeginString(pStr);
    3957  EndString();
    3958 }
    3959 
    3960 void VmaJsonWriter::BeginString(const char* pStr)
    3961 {
    3962  VMA_ASSERT(!m_InsideString);
    3963 
    3964  BeginValue(true);
    3965  m_SB.Add('"');
    3966  m_InsideString = true;
    3967  if(pStr != VMA_NULL && pStr[0] != '\0')
    3968  {
    3969  ContinueString(pStr);
    3970  }
    3971 }
    3972 
    3973 void VmaJsonWriter::ContinueString(const char* pStr)
    3974 {
    3975  VMA_ASSERT(m_InsideString);
    3976 
    3977  const size_t strLen = strlen(pStr);
    3978  for(size_t i = 0; i < strLen; ++i)
    3979  {
    3980  char ch = pStr[i];
    3981  if(ch == '\'')
    3982  {
    3983  m_SB.Add("\\\\");
    3984  }
    3985  else if(ch == '"')
    3986  {
    3987  m_SB.Add("\\\"");
    3988  }
    3989  else if(ch >= 32)
    3990  {
    3991  m_SB.Add(ch);
    3992  }
    3993  else switch(ch)
    3994  {
    3995  case '\n':
    3996  m_SB.Add("\\n");
    3997  break;
    3998  case '\r':
    3999  m_SB.Add("\\r");
    4000  break;
    4001  case '\t':
    4002  m_SB.Add("\\t");
    4003  break;
    4004  default:
    4005  VMA_ASSERT(0 && "Character not currently supported.");
    4006  break;
    4007  }
    4008  }
    4009 }
    4010 
    4011 void VmaJsonWriter::ContinueString(uint32_t n)
    4012 {
    4013  VMA_ASSERT(m_InsideString);
    4014  m_SB.AddNumber(n);
    4015 }
    4016 
    4017 void VmaJsonWriter::ContinueString(uint64_t n)
    4018 {
    4019  VMA_ASSERT(m_InsideString);
    4020  m_SB.AddNumber(n);
    4021 }
    4022 
    4023 void VmaJsonWriter::EndString(const char* pStr)
    4024 {
    4025  VMA_ASSERT(m_InsideString);
    4026  if(pStr != VMA_NULL && pStr[0] != '\0')
    4027  {
    4028  ContinueString(pStr);
    4029  }
    4030  m_SB.Add('"');
    4031  m_InsideString = false;
    4032 }
    4033 
    4034 void VmaJsonWriter::WriteNumber(uint32_t n)
    4035 {
    4036  VMA_ASSERT(!m_InsideString);
    4037  BeginValue(false);
    4038  m_SB.AddNumber(n);
    4039 }
    4040 
    4041 void VmaJsonWriter::WriteNumber(uint64_t n)
    4042 {
    4043  VMA_ASSERT(!m_InsideString);
    4044  BeginValue(false);
    4045  m_SB.AddNumber(n);
    4046 }
    4047 
    4048 void VmaJsonWriter::WriteBool(bool b)
    4049 {
    4050  VMA_ASSERT(!m_InsideString);
    4051  BeginValue(false);
    4052  m_SB.Add(b ? "true" : "false");
    4053 }
    4054 
    4055 void VmaJsonWriter::WriteNull()
    4056 {
    4057  VMA_ASSERT(!m_InsideString);
    4058  BeginValue(false);
    4059  m_SB.Add("null");
    4060 }
    4061 
    4062 void VmaJsonWriter::BeginValue(bool isString)
    4063 {
    4064  if(!m_Stack.empty())
    4065  {
    4066  StackItem& currItem = m_Stack.back();
    4067  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4068  currItem.valueCount % 2 == 0)
    4069  {
    4070  VMA_ASSERT(isString);
    4071  }
    4072 
    4073  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4074  currItem.valueCount % 2 != 0)
    4075  {
    4076  m_SB.Add(": ");
    4077  }
    4078  else if(currItem.valueCount > 0)
    4079  {
    4080  m_SB.Add(", ");
    4081  WriteIndent();
    4082  }
    4083  else
    4084  {
    4085  WriteIndent();
    4086  }
    4087  ++currItem.valueCount;
    4088  }
    4089 }
    4090 
    4091 void VmaJsonWriter::WriteIndent(bool oneLess)
    4092 {
    4093  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4094  {
    4095  m_SB.AddNewLine();
    4096 
    4097  size_t count = m_Stack.size();
    4098  if(count > 0 && oneLess)
    4099  {
    4100  --count;
    4101  }
    4102  for(size_t i = 0; i < count; ++i)
    4103  {
    4104  m_SB.Add(INDENT);
    4105  }
    4106  }
    4107 }
    4108 
    4109 #endif // #if VMA_STATS_STRING_ENABLED
    4110 
    4112 
    4113 VkDeviceSize VmaAllocation_T::GetOffset() const
    4114 {
    4115  switch(m_Type)
    4116  {
    4117  case ALLOCATION_TYPE_BLOCK:
    4118  return m_BlockAllocation.m_Offset;
    4119  case ALLOCATION_TYPE_DEDICATED:
    4120  return 0;
    4121  default:
    4122  VMA_ASSERT(0);
    4123  return 0;
    4124  }
    4125 }
    4126 
    4127 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4128 {
    4129  switch(m_Type)
    4130  {
    4131  case ALLOCATION_TYPE_BLOCK:
    4132  return m_BlockAllocation.m_Block->m_hMemory;
    4133  case ALLOCATION_TYPE_DEDICATED:
    4134  return m_DedicatedAllocation.m_hMemory;
    4135  default:
    4136  VMA_ASSERT(0);
    4137  return VK_NULL_HANDLE;
    4138  }
    4139 }
    4140 
    4141 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4142 {
    4143  switch(m_Type)
    4144  {
    4145  case ALLOCATION_TYPE_BLOCK:
    4146  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4147  case ALLOCATION_TYPE_DEDICATED:
    4148  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4149  default:
    4150  VMA_ASSERT(0);
    4151  return UINT32_MAX;
    4152  }
    4153 }
    4154 
    4155 void* VmaAllocation_T::GetMappedData() const
    4156 {
    4157  switch(m_Type)
    4158  {
    4159  case ALLOCATION_TYPE_BLOCK:
    4160  if(m_MapCount != 0)
    4161  {
    4162  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
    4163  VMA_ASSERT(pBlockData != VMA_NULL);
    4164  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    4165  }
    4166  else
    4167  {
    4168  return VMA_NULL;
    4169  }
    4170  break;
    4171  case ALLOCATION_TYPE_DEDICATED:
    4172  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    4173  return m_DedicatedAllocation.m_pMappedData;
    4174  default:
    4175  VMA_ASSERT(0);
    4176  return VMA_NULL;
    4177  }
    4178 }
    4179 
    4180 bool VmaAllocation_T::CanBecomeLost() const
    4181 {
    4182  switch(m_Type)
    4183  {
    4184  case ALLOCATION_TYPE_BLOCK:
    4185  return m_BlockAllocation.m_CanBecomeLost;
    4186  case ALLOCATION_TYPE_DEDICATED:
    4187  return false;
    4188  default:
    4189  VMA_ASSERT(0);
    4190  return false;
    4191  }
    4192 }
    4193 
    4194 VmaPool VmaAllocation_T::GetPool() const
    4195 {
    4196  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4197  return m_BlockAllocation.m_hPool;
    4198 }
    4199 
    4200 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4201 {
    4202  VMA_ASSERT(CanBecomeLost());
    4203 
    4204  /*
    4205  Warning: This is a carefully designed algorithm.
    4206  Do not modify unless you really know what you're doing :)
    4207  */
    4208  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4209  for(;;)
    4210  {
    4211  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4212  {
    4213  VMA_ASSERT(0);
    4214  return false;
    4215  }
    4216  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4217  {
    4218  return false;
    4219  }
    4220  else // Last use time earlier than current time.
    4221  {
    4222  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4223  {
    4224  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4225  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4226  return true;
    4227  }
    4228  }
    4229  }
    4230 }
    4231 
    4232 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    4233 {
    4234  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4235 
    4236  if(m_MapCount != 0)
    4237  {
    4238  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    4239  {
    4240  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    4241  *ppData = m_DedicatedAllocation.m_pMappedData;
    4242  ++m_MapCount;
    4243  return VK_SUCCESS;
    4244  }
    4245  else
    4246  {
    4247  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    4248  return VK_ERROR_MEMORY_MAP_FAILED;
    4249  }
    4250  }
    4251  else
    4252  {
    4253  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4254  hAllocator->m_hDevice,
    4255  m_DedicatedAllocation.m_hMemory,
    4256  0, // offset
    4257  VK_WHOLE_SIZE,
    4258  0, // flags
    4259  ppData);
    4260  if(result == VK_SUCCESS)
    4261  {
    4262  m_DedicatedAllocation.m_pMappedData = *ppData;
    4263  m_MapCount = 1;
    4264  }
    4265  return result;
    4266  }
    4267 }
    4268 
    4269 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    4270 {
    4271  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4272 
    4273  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    4274  {
    4275  --m_MapCount;
    4276  if(m_MapCount == 0)
    4277  {
    4278  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4279  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    4280  hAllocator->m_hDevice,
    4281  m_DedicatedAllocation.m_hMemory);
    4282  }
    4283  }
    4284  else
    4285  {
    4286  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    4287  }
    4288 }
    4289 
    4290 #if VMA_STATS_STRING_ENABLED
    4291 
    4292 // Correspond to values of enum VmaSuballocationType.
    4293 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4294  "FREE",
    4295  "UNKNOWN",
    4296  "BUFFER",
    4297  "IMAGE_UNKNOWN",
    4298  "IMAGE_LINEAR",
    4299  "IMAGE_OPTIMAL",
    4300 };
    4301 
    4302 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4303 {
    4304  json.BeginObject();
    4305 
    4306  json.WriteString("Blocks");
    4307  json.WriteNumber(stat.blockCount);
    4308 
    4309  json.WriteString("Allocations");
    4310  json.WriteNumber(stat.allocationCount);
    4311 
    4312  json.WriteString("UnusedRanges");
    4313  json.WriteNumber(stat.unusedRangeCount);
    4314 
    4315  json.WriteString("UsedBytes");
    4316  json.WriteNumber(stat.usedBytes);
    4317 
    4318  json.WriteString("UnusedBytes");
    4319  json.WriteNumber(stat.unusedBytes);
    4320 
    4321  if(stat.allocationCount > 1)
    4322  {
    4323  json.WriteString("AllocationSize");
    4324  json.BeginObject(true);
    4325  json.WriteString("Min");
    4326  json.WriteNumber(stat.allocationSizeMin);
    4327  json.WriteString("Avg");
    4328  json.WriteNumber(stat.allocationSizeAvg);
    4329  json.WriteString("Max");
    4330  json.WriteNumber(stat.allocationSizeMax);
    4331  json.EndObject();
    4332  }
    4333 
    4334  if(stat.unusedRangeCount > 1)
    4335  {
    4336  json.WriteString("UnusedRangeSize");
    4337  json.BeginObject(true);
    4338  json.WriteString("Min");
    4339  json.WriteNumber(stat.unusedRangeSizeMin);
    4340  json.WriteString("Avg");
    4341  json.WriteNumber(stat.unusedRangeSizeAvg);
    4342  json.WriteString("Max");
    4343  json.WriteNumber(stat.unusedRangeSizeMax);
    4344  json.EndObject();
    4345  }
    4346 
    4347  json.EndObject();
    4348 }
    4349 
    4350 #endif // #if VMA_STATS_STRING_ENABLED
    4351 
    4352 struct VmaSuballocationItemSizeLess
    4353 {
    4354  bool operator()(
    4355  const VmaSuballocationList::iterator lhs,
    4356  const VmaSuballocationList::iterator rhs) const
    4357  {
    4358  return lhs->size < rhs->size;
    4359  }
    4360  bool operator()(
    4361  const VmaSuballocationList::iterator lhs,
    4362  VkDeviceSize rhsSize) const
    4363  {
    4364  return lhs->size < rhsSize;
    4365  }
    4366 };
    4367 
    4369 // class VmaBlockMetadata
    4370 
    4371 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4372  m_Size(0),
    4373  m_FreeCount(0),
    4374  m_SumFreeSize(0),
    4375  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4376  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4377 {
    4378 }
    4379 
    4380 VmaBlockMetadata::~VmaBlockMetadata()
    4381 {
    4382 }
    4383 
    4384 void VmaBlockMetadata::Init(VkDeviceSize size)
    4385 {
    4386  m_Size = size;
    4387  m_FreeCount = 1;
    4388  m_SumFreeSize = size;
    4389 
    4390  VmaSuballocation suballoc = {};
    4391  suballoc.offset = 0;
    4392  suballoc.size = size;
    4393  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4394  suballoc.hAllocation = VK_NULL_HANDLE;
    4395 
    4396  m_Suballocations.push_back(suballoc);
    4397  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4398  --suballocItem;
    4399  m_FreeSuballocationsBySize.push_back(suballocItem);
    4400 }
    4401 
    4402 bool VmaBlockMetadata::Validate() const
    4403 {
    4404  if(m_Suballocations.empty())
    4405  {
    4406  return false;
    4407  }
    4408 
    4409  // Expected offset of new suballocation as calculates from previous ones.
    4410  VkDeviceSize calculatedOffset = 0;
    4411  // Expected number of free suballocations as calculated from traversing their list.
    4412  uint32_t calculatedFreeCount = 0;
    4413  // Expected sum size of free suballocations as calculated from traversing their list.
    4414  VkDeviceSize calculatedSumFreeSize = 0;
    4415  // Expected number of free suballocations that should be registered in
    4416  // m_FreeSuballocationsBySize calculated from traversing their list.
    4417  size_t freeSuballocationsToRegister = 0;
    4418  // True if previous visisted suballocation was free.
    4419  bool prevFree = false;
    4420 
    4421  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4422  suballocItem != m_Suballocations.cend();
    4423  ++suballocItem)
    4424  {
    4425  const VmaSuballocation& subAlloc = *suballocItem;
    4426 
    4427  // Actual offset of this suballocation doesn't match expected one.
    4428  if(subAlloc.offset != calculatedOffset)
    4429  {
    4430  return false;
    4431  }
    4432 
    4433  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4434  // Two adjacent free suballocations are invalid. They should be merged.
    4435  if(prevFree && currFree)
    4436  {
    4437  return false;
    4438  }
    4439  prevFree = currFree;
    4440 
    4441  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4442  {
    4443  return false;
    4444  }
    4445 
    4446  if(currFree)
    4447  {
    4448  calculatedSumFreeSize += subAlloc.size;
    4449  ++calculatedFreeCount;
    4450  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4451  {
    4452  ++freeSuballocationsToRegister;
    4453  }
    4454  }
    4455 
    4456  calculatedOffset += subAlloc.size;
    4457  }
    4458 
    4459  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4460  // match expected one.
    4461  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4462  {
    4463  return false;
    4464  }
    4465 
    4466  VkDeviceSize lastSize = 0;
    4467  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4468  {
    4469  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4470 
    4471  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4472  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4473  {
    4474  return false;
    4475  }
    4476  // They must be sorted by size ascending.
    4477  if(suballocItem->size < lastSize)
    4478  {
    4479  return false;
    4480  }
    4481 
    4482  lastSize = suballocItem->size;
    4483  }
    4484 
    4485  // Check if totals match calculacted values.
    4486  return
    4487  ValidateFreeSuballocationList() &&
    4488  (calculatedOffset == m_Size) &&
    4489  (calculatedSumFreeSize == m_SumFreeSize) &&
    4490  (calculatedFreeCount == m_FreeCount);
    4491 }
    4492 
    4493 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4494 {
    4495  if(!m_FreeSuballocationsBySize.empty())
    4496  {
    4497  return m_FreeSuballocationsBySize.back()->size;
    4498  }
    4499  else
    4500  {
    4501  return 0;
    4502  }
    4503 }
    4504 
    4505 bool VmaBlockMetadata::IsEmpty() const
    4506 {
    4507  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4508 }
    4509 
    4510 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4511 {
    4512  outInfo.blockCount = 1;
    4513 
    4514  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4515  outInfo.allocationCount = rangeCount - m_FreeCount;
    4516  outInfo.unusedRangeCount = m_FreeCount;
    4517 
    4518  outInfo.unusedBytes = m_SumFreeSize;
    4519  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4520 
    4521  outInfo.allocationSizeMin = UINT64_MAX;
    4522  outInfo.allocationSizeMax = 0;
    4523  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4524  outInfo.unusedRangeSizeMax = 0;
    4525 
    4526  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4527  suballocItem != m_Suballocations.cend();
    4528  ++suballocItem)
    4529  {
    4530  const VmaSuballocation& suballoc = *suballocItem;
    4531  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4532  {
    4533  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4534  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4535  }
    4536  else
    4537  {
    4538  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4539  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4540  }
    4541  }
    4542 }
    4543 
    4544 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4545 {
    4546  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4547 
    4548  inoutStats.size += m_Size;
    4549  inoutStats.unusedSize += m_SumFreeSize;
    4550  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4551  inoutStats.unusedRangeCount += m_FreeCount;
    4552  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4553 }
    4554 
    4555 #if VMA_STATS_STRING_ENABLED
    4556 
    4557 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4558 {
    4559  json.BeginObject();
    4560 
    4561  json.WriteString("TotalBytes");
    4562  json.WriteNumber(m_Size);
    4563 
    4564  json.WriteString("UnusedBytes");
    4565  json.WriteNumber(m_SumFreeSize);
    4566 
    4567  json.WriteString("Allocations");
    4568  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4569 
    4570  json.WriteString("UnusedRanges");
    4571  json.WriteNumber(m_FreeCount);
    4572 
    4573  json.WriteString("Suballocations");
    4574  json.BeginArray();
    4575  size_t i = 0;
    4576  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4577  suballocItem != m_Suballocations.cend();
    4578  ++suballocItem, ++i)
    4579  {
    4580  json.BeginObject(true);
    4581 
    4582  json.WriteString("Type");
    4583  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4584 
    4585  json.WriteString("Size");
    4586  json.WriteNumber(suballocItem->size);
    4587 
    4588  json.WriteString("Offset");
    4589  json.WriteNumber(suballocItem->offset);
    4590 
    4591  json.EndObject();
    4592  }
    4593  json.EndArray();
    4594 
    4595  json.EndObject();
    4596 }
    4597 
    4598 #endif // #if VMA_STATS_STRING_ENABLED
    4599 
    4600 /*
    4601 How many suitable free suballocations to analyze before choosing best one.
    4602 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4603  be chosen.
    4604 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4605  suballocations will be analized and best one will be chosen.
    4606 - Any other value is also acceptable.
    4607 */
    4608 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4609 
    4610 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4611 {
    4612  VMA_ASSERT(IsEmpty());
    4613  pAllocationRequest->offset = 0;
    4614  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4615  pAllocationRequest->sumItemSize = 0;
    4616  pAllocationRequest->item = m_Suballocations.begin();
    4617  pAllocationRequest->itemsToMakeLostCount = 0;
    4618 }
    4619 
    4620 bool VmaBlockMetadata::CreateAllocationRequest(
    4621  uint32_t currentFrameIndex,
    4622  uint32_t frameInUseCount,
    4623  VkDeviceSize bufferImageGranularity,
    4624  VkDeviceSize allocSize,
    4625  VkDeviceSize allocAlignment,
    4626  VmaSuballocationType allocType,
    4627  bool canMakeOtherLost,
    4628  VmaAllocationRequest* pAllocationRequest)
    4629 {
    4630  VMA_ASSERT(allocSize > 0);
    4631  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4632  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4633  VMA_HEAVY_ASSERT(Validate());
    4634 
    4635  // There is not enough total free space in this block to fullfill the request: Early return.
    4636  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4637  {
    4638  return false;
    4639  }
    4640 
    4641  // New algorithm, efficiently searching freeSuballocationsBySize.
    4642  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4643  if(freeSuballocCount > 0)
    4644  {
    4645  if(VMA_BEST_FIT)
    4646  {
    4647  // Find first free suballocation with size not less than allocSize.
    4648  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4649  m_FreeSuballocationsBySize.data(),
    4650  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4651  allocSize,
    4652  VmaSuballocationItemSizeLess());
    4653  size_t index = it - m_FreeSuballocationsBySize.data();
    4654  for(; index < freeSuballocCount; ++index)
    4655  {
    4656  if(CheckAllocation(
    4657  currentFrameIndex,
    4658  frameInUseCount,
    4659  bufferImageGranularity,
    4660  allocSize,
    4661  allocAlignment,
    4662  allocType,
    4663  m_FreeSuballocationsBySize[index],
    4664  false, // canMakeOtherLost
    4665  &pAllocationRequest->offset,
    4666  &pAllocationRequest->itemsToMakeLostCount,
    4667  &pAllocationRequest->sumFreeSize,
    4668  &pAllocationRequest->sumItemSize))
    4669  {
    4670  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4671  return true;
    4672  }
    4673  }
    4674  }
    4675  else
    4676  {
    4677  // Search staring from biggest suballocations.
    4678  for(size_t index = freeSuballocCount; index--; )
    4679  {
    4680  if(CheckAllocation(
    4681  currentFrameIndex,
    4682  frameInUseCount,
    4683  bufferImageGranularity,
    4684  allocSize,
    4685  allocAlignment,
    4686  allocType,
    4687  m_FreeSuballocationsBySize[index],
    4688  false, // canMakeOtherLost
    4689  &pAllocationRequest->offset,
    4690  &pAllocationRequest->itemsToMakeLostCount,
    4691  &pAllocationRequest->sumFreeSize,
    4692  &pAllocationRequest->sumItemSize))
    4693  {
    4694  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4695  return true;
    4696  }
    4697  }
    4698  }
    4699  }
    4700 
    4701  if(canMakeOtherLost)
    4702  {
    4703  // Brute-force algorithm. TODO: Come up with something better.
    4704 
    4705  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4706  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4707 
    4708  VmaAllocationRequest tmpAllocRequest = {};
    4709  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4710  suballocIt != m_Suballocations.end();
    4711  ++suballocIt)
    4712  {
    4713  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4714  suballocIt->hAllocation->CanBecomeLost())
    4715  {
    4716  if(CheckAllocation(
    4717  currentFrameIndex,
    4718  frameInUseCount,
    4719  bufferImageGranularity,
    4720  allocSize,
    4721  allocAlignment,
    4722  allocType,
    4723  suballocIt,
    4724  canMakeOtherLost,
    4725  &tmpAllocRequest.offset,
    4726  &tmpAllocRequest.itemsToMakeLostCount,
    4727  &tmpAllocRequest.sumFreeSize,
    4728  &tmpAllocRequest.sumItemSize))
    4729  {
    4730  tmpAllocRequest.item = suballocIt;
    4731 
    4732  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4733  {
    4734  *pAllocationRequest = tmpAllocRequest;
    4735  }
    4736  }
    4737  }
    4738  }
    4739 
    4740  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4741  {
    4742  return true;
    4743  }
    4744  }
    4745 
    4746  return false;
    4747 }
    4748 
    4749 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4750  uint32_t currentFrameIndex,
    4751  uint32_t frameInUseCount,
    4752  VmaAllocationRequest* pAllocationRequest)
    4753 {
    4754  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4755  {
    4756  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4757  {
    4758  ++pAllocationRequest->item;
    4759  }
    4760  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4761  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4762  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4763  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4764  {
    4765  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4766  --pAllocationRequest->itemsToMakeLostCount;
    4767  }
    4768  else
    4769  {
    4770  return false;
    4771  }
    4772  }
    4773 
    4774  VMA_HEAVY_ASSERT(Validate());
    4775  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4776  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4777 
    4778  return true;
    4779 }
    4780 
    4781 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4782 {
    4783  uint32_t lostAllocationCount = 0;
    4784  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4785  it != m_Suballocations.end();
    4786  ++it)
    4787  {
    4788  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4789  it->hAllocation->CanBecomeLost() &&
    4790  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4791  {
    4792  it = FreeSuballocation(it);
    4793  ++lostAllocationCount;
    4794  }
    4795  }
    4796  return lostAllocationCount;
    4797 }
    4798 
    4799 void VmaBlockMetadata::Alloc(
    4800  const VmaAllocationRequest& request,
    4801  VmaSuballocationType type,
    4802  VkDeviceSize allocSize,
    4803  VmaAllocation hAllocation)
    4804 {
    4805  VMA_ASSERT(request.item != m_Suballocations.end());
    4806  VmaSuballocation& suballoc = *request.item;
    4807  // Given suballocation is a free block.
    4808  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4809  // Given offset is inside this suballocation.
    4810  VMA_ASSERT(request.offset >= suballoc.offset);
    4811  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4812  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4813  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4814 
    4815  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4816  // it to become used.
    4817  UnregisterFreeSuballocation(request.item);
    4818 
    4819  suballoc.offset = request.offset;
    4820  suballoc.size = allocSize;
    4821  suballoc.type = type;
    4822  suballoc.hAllocation = hAllocation;
    4823 
    4824  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4825  if(paddingEnd)
    4826  {
    4827  VmaSuballocation paddingSuballoc = {};
    4828  paddingSuballoc.offset = request.offset + allocSize;
    4829  paddingSuballoc.size = paddingEnd;
    4830  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4831  VmaSuballocationList::iterator next = request.item;
    4832  ++next;
    4833  const VmaSuballocationList::iterator paddingEndItem =
    4834  m_Suballocations.insert(next, paddingSuballoc);
    4835  RegisterFreeSuballocation(paddingEndItem);
    4836  }
    4837 
    4838  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4839  if(paddingBegin)
    4840  {
    4841  VmaSuballocation paddingSuballoc = {};
    4842  paddingSuballoc.offset = request.offset - paddingBegin;
    4843  paddingSuballoc.size = paddingBegin;
    4844  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4845  const VmaSuballocationList::iterator paddingBeginItem =
    4846  m_Suballocations.insert(request.item, paddingSuballoc);
    4847  RegisterFreeSuballocation(paddingBeginItem);
    4848  }
    4849 
    4850  // Update totals.
    4851  m_FreeCount = m_FreeCount - 1;
    4852  if(paddingBegin > 0)
    4853  {
    4854  ++m_FreeCount;
    4855  }
    4856  if(paddingEnd > 0)
    4857  {
    4858  ++m_FreeCount;
    4859  }
    4860  m_SumFreeSize -= allocSize;
    4861 }
    4862 
    4863 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4864 {
    4865  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4866  suballocItem != m_Suballocations.end();
    4867  ++suballocItem)
    4868  {
    4869  VmaSuballocation& suballoc = *suballocItem;
    4870  if(suballoc.hAllocation == allocation)
    4871  {
    4872  FreeSuballocation(suballocItem);
    4873  VMA_HEAVY_ASSERT(Validate());
    4874  return;
    4875  }
    4876  }
    4877  VMA_ASSERT(0 && "Not found!");
    4878 }
    4879 
    4880 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4881 {
    4882  VkDeviceSize lastSize = 0;
    4883  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4884  {
    4885  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4886 
    4887  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4888  {
    4889  VMA_ASSERT(0);
    4890  return false;
    4891  }
    4892  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4893  {
    4894  VMA_ASSERT(0);
    4895  return false;
    4896  }
    4897  if(it->size < lastSize)
    4898  {
    4899  VMA_ASSERT(0);
    4900  return false;
    4901  }
    4902 
    4903  lastSize = it->size;
    4904  }
    4905  return true;
    4906 }
    4907 
    4908 bool VmaBlockMetadata::CheckAllocation(
    4909  uint32_t currentFrameIndex,
    4910  uint32_t frameInUseCount,
    4911  VkDeviceSize bufferImageGranularity,
    4912  VkDeviceSize allocSize,
    4913  VkDeviceSize allocAlignment,
    4914  VmaSuballocationType allocType,
    4915  VmaSuballocationList::const_iterator suballocItem,
    4916  bool canMakeOtherLost,
    4917  VkDeviceSize* pOffset,
    4918  size_t* itemsToMakeLostCount,
    4919  VkDeviceSize* pSumFreeSize,
    4920  VkDeviceSize* pSumItemSize) const
    4921 {
    4922  VMA_ASSERT(allocSize > 0);
    4923  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4924  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4925  VMA_ASSERT(pOffset != VMA_NULL);
    4926 
    4927  *itemsToMakeLostCount = 0;
    4928  *pSumFreeSize = 0;
    4929  *pSumItemSize = 0;
    4930 
    4931  if(canMakeOtherLost)
    4932  {
    4933  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4934  {
    4935  *pSumFreeSize = suballocItem->size;
    4936  }
    4937  else
    4938  {
    4939  if(suballocItem->hAllocation->CanBecomeLost() &&
    4940  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4941  {
    4942  ++*itemsToMakeLostCount;
    4943  *pSumItemSize = suballocItem->size;
    4944  }
    4945  else
    4946  {
    4947  return false;
    4948  }
    4949  }
    4950 
    4951  // Remaining size is too small for this request: Early return.
    4952  if(m_Size - suballocItem->offset < allocSize)
    4953  {
    4954  return false;
    4955  }
    4956 
    4957  // Start from offset equal to beginning of this suballocation.
    4958  *pOffset = suballocItem->offset;
    4959 
    4960  // Apply VMA_DEBUG_MARGIN at the beginning.
    4961  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4962  {
    4963  *pOffset += VMA_DEBUG_MARGIN;
    4964  }
    4965 
    4966  // Apply alignment.
    4967  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4968  *pOffset = VmaAlignUp(*pOffset, alignment);
    4969 
    4970  // Check previous suballocations for BufferImageGranularity conflicts.
    4971  // Make bigger alignment if necessary.
    4972  if(bufferImageGranularity > 1)
    4973  {
    4974  bool bufferImageGranularityConflict = false;
    4975  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4976  while(prevSuballocItem != m_Suballocations.cbegin())
    4977  {
    4978  --prevSuballocItem;
    4979  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4980  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4981  {
    4982  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4983  {
    4984  bufferImageGranularityConflict = true;
    4985  break;
    4986  }
    4987  }
    4988  else
    4989  // Already on previous page.
    4990  break;
    4991  }
    4992  if(bufferImageGranularityConflict)
    4993  {
    4994  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4995  }
    4996  }
    4997 
    4998  // Now that we have final *pOffset, check if we are past suballocItem.
    4999  // If yes, return false - this function should be called for another suballocItem as starting point.
    5000  if(*pOffset >= suballocItem->offset + suballocItem->size)
    5001  {
    5002  return false;
    5003  }
    5004 
    5005  // Calculate padding at the beginning based on current offset.
    5006  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    5007 
    5008  // Calculate required margin at the end if this is not last suballocation.
    5009  VmaSuballocationList::const_iterator next = suballocItem;
    5010  ++next;
    5011  const VkDeviceSize requiredEndMargin =
    5012  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5013 
    5014  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    5015  // Another early return check.
    5016  if(suballocItem->offset + totalSize > m_Size)
    5017  {
    5018  return false;
    5019  }
    5020 
    5021  // Advance lastSuballocItem until desired size is reached.
    5022  // Update itemsToMakeLostCount.
    5023  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    5024  if(totalSize > suballocItem->size)
    5025  {
    5026  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    5027  while(remainingSize > 0)
    5028  {
    5029  ++lastSuballocItem;
    5030  if(lastSuballocItem == m_Suballocations.cend())
    5031  {
    5032  return false;
    5033  }
    5034  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5035  {
    5036  *pSumFreeSize += lastSuballocItem->size;
    5037  }
    5038  else
    5039  {
    5040  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    5041  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    5042  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5043  {
    5044  ++*itemsToMakeLostCount;
    5045  *pSumItemSize += lastSuballocItem->size;
    5046  }
    5047  else
    5048  {
    5049  return false;
    5050  }
    5051  }
    5052  remainingSize = (lastSuballocItem->size < remainingSize) ?
    5053  remainingSize - lastSuballocItem->size : 0;
    5054  }
    5055  }
    5056 
    5057  // Check next suballocations for BufferImageGranularity conflicts.
    5058  // If conflict exists, we must mark more allocations lost or fail.
    5059  if(bufferImageGranularity > 1)
    5060  {
    5061  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5062  ++nextSuballocItem;
    5063  while(nextSuballocItem != m_Suballocations.cend())
    5064  {
    5065  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5066  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5067  {
    5068  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5069  {
    5070  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5071  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5072  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5073  {
    5074  ++*itemsToMakeLostCount;
    5075  }
    5076  else
    5077  {
    5078  return false;
    5079  }
    5080  }
    5081  }
    5082  else
    5083  {
    5084  // Already on next page.
    5085  break;
    5086  }
    5087  ++nextSuballocItem;
    5088  }
    5089  }
    5090  }
    5091  else
    5092  {
    5093  const VmaSuballocation& suballoc = *suballocItem;
    5094  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5095 
    5096  *pSumFreeSize = suballoc.size;
    5097 
    5098  // Size of this suballocation is too small for this request: Early return.
    5099  if(suballoc.size < allocSize)
    5100  {
    5101  return false;
    5102  }
    5103 
    5104  // Start from offset equal to beginning of this suballocation.
    5105  *pOffset = suballoc.offset;
    5106 
    5107  // Apply VMA_DEBUG_MARGIN at the beginning.
    5108  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5109  {
    5110  *pOffset += VMA_DEBUG_MARGIN;
    5111  }
    5112 
    5113  // Apply alignment.
    5114  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5115  *pOffset = VmaAlignUp(*pOffset, alignment);
    5116 
    5117  // Check previous suballocations for BufferImageGranularity conflicts.
    5118  // Make bigger alignment if necessary.
    5119  if(bufferImageGranularity > 1)
    5120  {
    5121  bool bufferImageGranularityConflict = false;
    5122  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5123  while(prevSuballocItem != m_Suballocations.cbegin())
    5124  {
    5125  --prevSuballocItem;
    5126  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5127  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5128  {
    5129  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5130  {
    5131  bufferImageGranularityConflict = true;
    5132  break;
    5133  }
    5134  }
    5135  else
    5136  // Already on previous page.
    5137  break;
    5138  }
    5139  if(bufferImageGranularityConflict)
    5140  {
    5141  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5142  }
    5143  }
    5144 
    5145  // Calculate padding at the beginning based on current offset.
    5146  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5147 
    5148  // Calculate required margin at the end if this is not last suballocation.
    5149  VmaSuballocationList::const_iterator next = suballocItem;
    5150  ++next;
    5151  const VkDeviceSize requiredEndMargin =
    5152  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5153 
    5154  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5155  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5156  {
    5157  return false;
    5158  }
    5159 
    5160  // Check next suballocations for BufferImageGranularity conflicts.
    5161  // If conflict exists, allocation cannot be made here.
    5162  if(bufferImageGranularity > 1)
    5163  {
    5164  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5165  ++nextSuballocItem;
    5166  while(nextSuballocItem != m_Suballocations.cend())
    5167  {
    5168  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5169  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5170  {
    5171  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5172  {
    5173  return false;
    5174  }
    5175  }
    5176  else
    5177  {
    5178  // Already on next page.
    5179  break;
    5180  }
    5181  ++nextSuballocItem;
    5182  }
    5183  }
    5184  }
    5185 
    5186  // All tests passed: Success. pOffset is already filled.
    5187  return true;
    5188 }
    5189 
    5190 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5191 {
    5192  VMA_ASSERT(item != m_Suballocations.end());
    5193  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5194 
    5195  VmaSuballocationList::iterator nextItem = item;
    5196  ++nextItem;
    5197  VMA_ASSERT(nextItem != m_Suballocations.end());
    5198  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5199 
    5200  item->size += nextItem->size;
    5201  --m_FreeCount;
    5202  m_Suballocations.erase(nextItem);
    5203 }
    5204 
    5205 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5206 {
    5207  // Change this suballocation to be marked as free.
    5208  VmaSuballocation& suballoc = *suballocItem;
    5209  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5210  suballoc.hAllocation = VK_NULL_HANDLE;
    5211 
    5212  // Update totals.
    5213  ++m_FreeCount;
    5214  m_SumFreeSize += suballoc.size;
    5215 
    5216  // Merge with previous and/or next suballocation if it's also free.
    5217  bool mergeWithNext = false;
    5218  bool mergeWithPrev = false;
    5219 
    5220  VmaSuballocationList::iterator nextItem = suballocItem;
    5221  ++nextItem;
    5222  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5223  {
    5224  mergeWithNext = true;
    5225  }
    5226 
    5227  VmaSuballocationList::iterator prevItem = suballocItem;
    5228  if(suballocItem != m_Suballocations.begin())
    5229  {
    5230  --prevItem;
    5231  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5232  {
    5233  mergeWithPrev = true;
    5234  }
    5235  }
    5236 
    5237  if(mergeWithNext)
    5238  {
    5239  UnregisterFreeSuballocation(nextItem);
    5240  MergeFreeWithNext(suballocItem);
    5241  }
    5242 
    5243  if(mergeWithPrev)
    5244  {
    5245  UnregisterFreeSuballocation(prevItem);
    5246  MergeFreeWithNext(prevItem);
    5247  RegisterFreeSuballocation(prevItem);
    5248  return prevItem;
    5249  }
    5250  else
    5251  {
    5252  RegisterFreeSuballocation(suballocItem);
    5253  return suballocItem;
    5254  }
    5255 }
    5256 
    5257 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5258 {
    5259  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5260  VMA_ASSERT(item->size > 0);
    5261 
    5262  // You may want to enable this validation at the beginning or at the end of
    5263  // this function, depending on what do you want to check.
    5264  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5265 
    5266  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5267  {
    5268  if(m_FreeSuballocationsBySize.empty())
    5269  {
    5270  m_FreeSuballocationsBySize.push_back(item);
    5271  }
    5272  else
    5273  {
    5274  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5275  }
    5276  }
    5277 
    5278  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5279 }
    5280 
    5281 
    5282 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5283 {
    5284  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5285  VMA_ASSERT(item->size > 0);
    5286 
    5287  // You may want to enable this validation at the beginning or at the end of
    5288  // this function, depending on what do you want to check.
    5289  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5290 
    5291  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5292  {
    5293  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5294  m_FreeSuballocationsBySize.data(),
    5295  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5296  item,
    5297  VmaSuballocationItemSizeLess());
    5298  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5299  index < m_FreeSuballocationsBySize.size();
    5300  ++index)
    5301  {
    5302  if(m_FreeSuballocationsBySize[index] == item)
    5303  {
    5304  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5305  return;
    5306  }
    5307  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5308  }
    5309  VMA_ASSERT(0 && "Not found.");
    5310  }
    5311 
    5312  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5313 }
    5314 
    5316 // class VmaDeviceMemoryMapping
    5317 
    5318 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
    5319  m_MapCount(0),
    5320  m_pMappedData(VMA_NULL)
    5321 {
    5322 }
    5323 
    5324 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
    5325 {
    5326  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5327 }
    5328 
    5329 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
    5330 {
    5331  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5332  if(m_MapCount != 0)
    5333  {
    5334  ++m_MapCount;
    5335  VMA_ASSERT(m_pMappedData != VMA_NULL);
    5336  if(ppData != VMA_NULL)
    5337  {
    5338  *ppData = m_pMappedData;
    5339  }
    5340  return VK_SUCCESS;
    5341  }
    5342  else
    5343  {
    5344  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5345  hAllocator->m_hDevice,
    5346  hMemory,
    5347  0, // offset
    5348  VK_WHOLE_SIZE,
    5349  0, // flags
    5350  &m_pMappedData);
    5351  if(result == VK_SUCCESS)
    5352  {
    5353  if(ppData != VMA_NULL)
    5354  {
    5355  *ppData = m_pMappedData;
    5356  }
    5357  m_MapCount = 1;
    5358  }
    5359  return result;
    5360  }
    5361 }
    5362 
    5363 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
    5364 {
    5365  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5366  if(m_MapCount != 0)
    5367  {
    5368  if(--m_MapCount == 0)
    5369  {
    5370  m_pMappedData = VMA_NULL;
    5371  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
    5372  }
    5373  }
    5374  else
    5375  {
    5376  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    5377  }
    5378 }
    5379 
    5381 // class VmaDeviceMemoryBlock
    5382 
    5383 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5384  m_MemoryTypeIndex(UINT32_MAX),
    5385  m_hMemory(VK_NULL_HANDLE),
    5386  m_Metadata(hAllocator)
    5387 {
    5388 }
    5389 
    5390 void VmaDeviceMemoryBlock::Init(
    5391  uint32_t newMemoryTypeIndex,
    5392  VkDeviceMemory newMemory,
    5393  VkDeviceSize newSize)
    5394 {
    5395  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5396 
    5397  m_MemoryTypeIndex = newMemoryTypeIndex;
    5398  m_hMemory = newMemory;
    5399 
    5400  m_Metadata.Init(newSize);
    5401 }
    5402 
    5403 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5404 {
    5405  // This is the most important assert in the entire library.
    5406  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5407  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5408 
    5409  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5410  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5411  m_hMemory = VK_NULL_HANDLE;
    5412 }
    5413 
    5414 bool VmaDeviceMemoryBlock::Validate() const
    5415 {
    5416  if((m_hMemory == VK_NULL_HANDLE) ||
    5417  (m_Metadata.GetSize() == 0))
    5418  {
    5419  return false;
    5420  }
    5421 
    5422  return m_Metadata.Validate();
    5423 }
    5424 
    5425 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
    5426 {
    5427  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
    5428 }
    5429 
    5430 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
    5431 {
    5432  m_Mapping.Unmap(hAllocator, m_hMemory);
    5433 }
    5434 
    5435 static void InitStatInfo(VmaStatInfo& outInfo)
    5436 {
    5437  memset(&outInfo, 0, sizeof(outInfo));
    5438  outInfo.allocationSizeMin = UINT64_MAX;
    5439  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5440 }
    5441 
    5442 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5443 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5444 {
    5445  inoutInfo.blockCount += srcInfo.blockCount;
    5446  inoutInfo.allocationCount += srcInfo.allocationCount;
    5447  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5448  inoutInfo.usedBytes += srcInfo.usedBytes;
    5449  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5450  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5451  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5452  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5453  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5454 }
    5455 
    5456 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5457 {
    5458  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5459  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5460  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5461  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5462 }
    5463 
    5464 VmaPool_T::VmaPool_T(
    5465  VmaAllocator hAllocator,
    5466  const VmaPoolCreateInfo& createInfo) :
    5467  m_BlockVector(
    5468  hAllocator,
    5469  createInfo.memoryTypeIndex,
    5470  createInfo.blockSize,
    5471  createInfo.minBlockCount,
    5472  createInfo.maxBlockCount,
    5473  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5474  createInfo.frameInUseCount,
    5475  true) // isCustomPool
    5476 {
    5477 }
    5478 
    5479 VmaPool_T::~VmaPool_T()
    5480 {
    5481 }
    5482 
    5483 #if VMA_STATS_STRING_ENABLED
    5484 
    5485 #endif // #if VMA_STATS_STRING_ENABLED
    5486 
    5487 VmaBlockVector::VmaBlockVector(
    5488  VmaAllocator hAllocator,
    5489  uint32_t memoryTypeIndex,
    5490  VkDeviceSize preferredBlockSize,
    5491  size_t minBlockCount,
    5492  size_t maxBlockCount,
    5493  VkDeviceSize bufferImageGranularity,
    5494  uint32_t frameInUseCount,
    5495  bool isCustomPool) :
    5496  m_hAllocator(hAllocator),
    5497  m_MemoryTypeIndex(memoryTypeIndex),
    5498  m_PreferredBlockSize(preferredBlockSize),
    5499  m_MinBlockCount(minBlockCount),
    5500  m_MaxBlockCount(maxBlockCount),
    5501  m_BufferImageGranularity(bufferImageGranularity),
    5502  m_FrameInUseCount(frameInUseCount),
    5503  m_IsCustomPool(isCustomPool),
    5504  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5505  m_HasEmptyBlock(false),
    5506  m_pDefragmentator(VMA_NULL)
    5507 {
    5508 }
    5509 
    5510 VmaBlockVector::~VmaBlockVector()
    5511 {
    5512  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5513 
    5514  for(size_t i = m_Blocks.size(); i--; )
    5515  {
    5516  m_Blocks[i]->Destroy(m_hAllocator);
    5517  vma_delete(m_hAllocator, m_Blocks[i]);
    5518  }
    5519 }
    5520 
    5521 VkResult VmaBlockVector::CreateMinBlocks()
    5522 {
    5523  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5524  {
    5525  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5526  if(res != VK_SUCCESS)
    5527  {
    5528  return res;
    5529  }
    5530  }
    5531  return VK_SUCCESS;
    5532 }
    5533 
    5534 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5535 {
    5536  pStats->size = 0;
    5537  pStats->unusedSize = 0;
    5538  pStats->allocationCount = 0;
    5539  pStats->unusedRangeCount = 0;
    5540  pStats->unusedRangeSizeMax = 0;
    5541 
    5542  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5543 
    5544  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5545  {
    5546  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5547  VMA_ASSERT(pBlock);
    5548  VMA_HEAVY_ASSERT(pBlock->Validate());
    5549  pBlock->m_Metadata.AddPoolStats(*pStats);
    5550  }
    5551 }
    5552 
    5553 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5554 
    5555 VkResult VmaBlockVector::Allocate(
    5556  VmaPool hCurrentPool,
    5557  uint32_t currentFrameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  const VmaAllocationCreateInfo& createInfo,
    5560  VmaSuballocationType suballocType,
    5561  VmaAllocation* pAllocation)
    5562 {
    5563  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    5564 
    5565  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5566 
    5567  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5568  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5569  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5570  {
    5571  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5572  VMA_ASSERT(pCurrBlock);
    5573  VmaAllocationRequest currRequest = {};
    5574  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5575  currentFrameIndex,
    5576  m_FrameInUseCount,
    5577  m_BufferImageGranularity,
    5578  vkMemReq.size,
    5579  vkMemReq.alignment,
    5580  suballocType,
    5581  false, // canMakeOtherLost
    5582  &currRequest))
    5583  {
    5584  // Allocate from pCurrBlock.
    5585  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5586 
    5587  if(mapped)
    5588  {
    5589  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
    5590  if(res != VK_SUCCESS)
    5591  {
    5592  return res;
    5593  }
    5594  }
    5595 
    5596  // We no longer have an empty Allocation.
    5597  if(pCurrBlock->m_Metadata.IsEmpty())
    5598  {
    5599  m_HasEmptyBlock = false;
    5600  }
    5601 
    5602  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5603  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5604  (*pAllocation)->InitBlockAllocation(
    5605  hCurrentPool,
    5606  pCurrBlock,
    5607  currRequest.offset,
    5608  vkMemReq.alignment,
    5609  vkMemReq.size,
    5610  suballocType,
    5611  mapped,
    5612  createInfo.pUserData,
    5613  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5614  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5615  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5616  return VK_SUCCESS;
    5617  }
    5618  }
    5619 
    5620  const bool canCreateNewBlock =
    5621  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5622  (m_Blocks.size() < m_MaxBlockCount);
    5623 
    5624  // 2. Try to create new block.
    5625  if(canCreateNewBlock)
    5626  {
    5627  // 2.1. Start with full preferredBlockSize.
    5628  VkDeviceSize blockSize = m_PreferredBlockSize;
    5629  size_t newBlockIndex = 0;
    5630  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5631  // Allocating blocks of other sizes is allowed only in default pools.
    5632  // In custom pools block size is fixed.
    5633  if(res < 0 && m_IsCustomPool == false)
    5634  {
    5635  // 2.2. Try half the size.
    5636  blockSize /= 2;
    5637  if(blockSize >= vkMemReq.size)
    5638  {
    5639  res = CreateBlock(blockSize, &newBlockIndex);
    5640  if(res < 0)
    5641  {
    5642  // 2.3. Try quarter the size.
    5643  blockSize /= 2;
    5644  if(blockSize >= vkMemReq.size)
    5645  {
    5646  res = CreateBlock(blockSize, &newBlockIndex);
    5647  }
    5648  }
    5649  }
    5650  }
    5651  if(res == VK_SUCCESS)
    5652  {
    5653  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5654  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5655 
    5656  if(mapped)
    5657  {
    5658  res = pBlock->Map(m_hAllocator, nullptr);
    5659  if(res != VK_SUCCESS)
    5660  {
    5661  return res;
    5662  }
    5663  }
    5664 
    5665  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5666  VmaAllocationRequest allocRequest;
    5667  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5668  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5669  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5670  (*pAllocation)->InitBlockAllocation(
    5671  hCurrentPool,
    5672  pBlock,
    5673  allocRequest.offset,
    5674  vkMemReq.alignment,
    5675  vkMemReq.size,
    5676  suballocType,
    5677  mapped,
    5678  createInfo.pUserData,
    5679  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5680  VMA_HEAVY_ASSERT(pBlock->Validate());
    5681  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5682 
    5683  return VK_SUCCESS;
    5684  }
    5685  }
    5686 
    5687  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5688 
    5689  // 3. Try to allocate from existing blocks with making other allocations lost.
    5690  if(canMakeOtherLost)
    5691  {
    5692  uint32_t tryIndex = 0;
    5693  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5694  {
    5695  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5696  VmaAllocationRequest bestRequest = {};
    5697  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5698 
    5699  // 1. Search existing allocations.
    5700  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5701  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5702  {
    5703  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5704  VMA_ASSERT(pCurrBlock);
    5705  VmaAllocationRequest currRequest = {};
    5706  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5707  currentFrameIndex,
    5708  m_FrameInUseCount,
    5709  m_BufferImageGranularity,
    5710  vkMemReq.size,
    5711  vkMemReq.alignment,
    5712  suballocType,
    5713  canMakeOtherLost,
    5714  &currRequest))
    5715  {
    5716  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5717  if(pBestRequestBlock == VMA_NULL ||
    5718  currRequestCost < bestRequestCost)
    5719  {
    5720  pBestRequestBlock = pCurrBlock;
    5721  bestRequest = currRequest;
    5722  bestRequestCost = currRequestCost;
    5723 
    5724  if(bestRequestCost == 0)
    5725  {
    5726  break;
    5727  }
    5728  }
    5729  }
    5730  }
    5731 
    5732  if(pBestRequestBlock != VMA_NULL)
    5733  {
    5734  if(mapped)
    5735  {
    5736  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
    5737  if(res != VK_SUCCESS)
    5738  {
    5739  return res;
    5740  }
    5741  }
    5742 
    5743  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5744  currentFrameIndex,
    5745  m_FrameInUseCount,
    5746  &bestRequest))
    5747  {
    5748  // We no longer have an empty Allocation.
    5749  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5750  {
    5751  m_HasEmptyBlock = false;
    5752  }
    5753  // Allocate from this pBlock.
    5754  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5755  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5756  (*pAllocation)->InitBlockAllocation(
    5757  hCurrentPool,
    5758  pBestRequestBlock,
    5759  bestRequest.offset,
    5760  vkMemReq.alignment,
    5761  vkMemReq.size,
    5762  suballocType,
    5763  mapped,
    5764  createInfo.pUserData,
    5765  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5766  VMA_HEAVY_ASSERT(pBlock->Validate());
    5767  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5768  return VK_SUCCESS;
    5769  }
    5770  // else: Some allocations must have been touched while we are here. Next try.
    5771  }
    5772  else
    5773  {
    5774  // Could not find place in any of the blocks - break outer loop.
    5775  break;
    5776  }
    5777  }
    5778  /* Maximum number of tries exceeded - a very unlike event when many other
    5779  threads are simultaneously touching allocations making it impossible to make
    5780  lost at the same time as we try to allocate. */
    5781  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5782  {
    5783  return VK_ERROR_TOO_MANY_OBJECTS;
    5784  }
    5785  }
    5786 
    5787  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5788 }
    5789 
    5790 void VmaBlockVector::Free(
    5791  VmaAllocation hAllocation)
    5792 {
    5793  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5794 
    5795  // Scope for lock.
    5796  {
    5797  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5798 
    5799  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5800 
    5801  if(hAllocation->IsPersistentMap())
    5802  {
    5803  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
    5804  }
    5805 
    5806  pBlock->m_Metadata.Free(hAllocation);
    5807  VMA_HEAVY_ASSERT(pBlock->Validate());
    5808 
    5809  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5810 
    5811  // pBlock became empty after this deallocation.
    5812  if(pBlock->m_Metadata.IsEmpty())
    5813  {
    5814  // Already has empty Allocation. We don't want to have two, so delete this one.
    5815  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5816  {
    5817  pBlockToDelete = pBlock;
    5818  Remove(pBlock);
    5819  }
    5820  // We now have first empty Allocation.
    5821  else
    5822  {
    5823  m_HasEmptyBlock = true;
    5824  }
    5825  }
    5826  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5827  // (This is optional, heuristics.)
    5828  else if(m_HasEmptyBlock)
    5829  {
    5830  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5831  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5832  {
    5833  pBlockToDelete = pLastBlock;
    5834  m_Blocks.pop_back();
    5835  m_HasEmptyBlock = false;
    5836  }
    5837  }
    5838 
    5839  IncrementallySortBlocks();
    5840  }
    5841 
    5842  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5843  // lock, for performance reason.
    5844  if(pBlockToDelete != VMA_NULL)
    5845  {
    5846  VMA_DEBUG_LOG(" Deleted empty allocation");
    5847  pBlockToDelete->Destroy(m_hAllocator);
    5848  vma_delete(m_hAllocator, pBlockToDelete);
    5849  }
    5850 }
    5851 
    5852 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5853 {
    5854  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5855  {
    5856  if(m_Blocks[blockIndex] == pBlock)
    5857  {
    5858  VmaVectorRemove(m_Blocks, blockIndex);
    5859  return;
    5860  }
    5861  }
    5862  VMA_ASSERT(0);
    5863 }
    5864 
    5865 void VmaBlockVector::IncrementallySortBlocks()
    5866 {
    5867  // Bubble sort only until first swap.
    5868  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5869  {
    5870  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5871  {
    5872  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5873  return;
    5874  }
    5875  }
    5876 }
    5877 
    5878 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5879 {
    5880  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5881  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5882  allocInfo.allocationSize = blockSize;
    5883  VkDeviceMemory mem = VK_NULL_HANDLE;
    5884  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5885  if(res < 0)
    5886  {
    5887  return res;
    5888  }
    5889 
    5890  // New VkDeviceMemory successfully created.
    5891 
    5892  // Create new Allocation for it.
    5893  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5894  pBlock->Init(
    5895  m_MemoryTypeIndex,
    5896  mem,
    5897  allocInfo.allocationSize);
    5898 
    5899  m_Blocks.push_back(pBlock);
    5900  if(pNewBlockIndex != VMA_NULL)
    5901  {
    5902  *pNewBlockIndex = m_Blocks.size() - 1;
    5903  }
    5904 
    5905  return VK_SUCCESS;
    5906 }
    5907 
    5908 #if VMA_STATS_STRING_ENABLED
    5909 
    5910 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5911 {
    5912  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5913 
    5914  json.BeginObject();
    5915 
    5916  if(m_IsCustomPool)
    5917  {
    5918  json.WriteString("MemoryTypeIndex");
    5919  json.WriteNumber(m_MemoryTypeIndex);
    5920 
    5921  json.WriteString("BlockSize");
    5922  json.WriteNumber(m_PreferredBlockSize);
    5923 
    5924  json.WriteString("BlockCount");
    5925  json.BeginObject(true);
    5926  if(m_MinBlockCount > 0)
    5927  {
    5928  json.WriteString("Min");
    5929  json.WriteNumber(m_MinBlockCount);
    5930  }
    5931  if(m_MaxBlockCount < SIZE_MAX)
    5932  {
    5933  json.WriteString("Max");
    5934  json.WriteNumber(m_MaxBlockCount);
    5935  }
    5936  json.WriteString("Cur");
    5937  json.WriteNumber(m_Blocks.size());
    5938  json.EndObject();
    5939 
    5940  if(m_FrameInUseCount > 0)
    5941  {
    5942  json.WriteString("FrameInUseCount");
    5943  json.WriteNumber(m_FrameInUseCount);
    5944  }
    5945  }
    5946  else
    5947  {
    5948  json.WriteString("PreferredBlockSize");
    5949  json.WriteNumber(m_PreferredBlockSize);
    5950  }
    5951 
    5952  json.WriteString("Blocks");
    5953  json.BeginArray();
    5954  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5955  {
    5956  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5957  }
    5958  json.EndArray();
    5959 
    5960  json.EndObject();
    5961 }
    5962 
    5963 #endif // #if VMA_STATS_STRING_ENABLED
    5964 
    5965 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5966  VmaAllocator hAllocator,
    5967  uint32_t currentFrameIndex)
    5968 {
    5969  if(m_pDefragmentator == VMA_NULL)
    5970  {
    5971  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5972  hAllocator,
    5973  this,
    5974  currentFrameIndex);
    5975  }
    5976 
    5977  return m_pDefragmentator;
    5978 }
    5979 
    5980 VkResult VmaBlockVector::Defragment(
    5981  VmaDefragmentationStats* pDefragmentationStats,
    5982  VkDeviceSize& maxBytesToMove,
    5983  uint32_t& maxAllocationsToMove)
    5984 {
    5985  if(m_pDefragmentator == VMA_NULL)
    5986  {
    5987  return VK_SUCCESS;
    5988  }
    5989 
    5990  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5991 
    5992  // Defragment.
    5993  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5994 
    5995  // Accumulate statistics.
    5996  if(pDefragmentationStats != VMA_NULL)
    5997  {
    5998  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5999  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    6000  pDefragmentationStats->bytesMoved += bytesMoved;
    6001  pDefragmentationStats->allocationsMoved += allocationsMoved;
    6002  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    6003  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    6004  maxBytesToMove -= bytesMoved;
    6005  maxAllocationsToMove -= allocationsMoved;
    6006  }
    6007 
    6008  // Free empty blocks.
    6009  m_HasEmptyBlock = false;
    6010  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    6011  {
    6012  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    6013  if(pBlock->m_Metadata.IsEmpty())
    6014  {
    6015  if(m_Blocks.size() > m_MinBlockCount)
    6016  {
    6017  if(pDefragmentationStats != VMA_NULL)
    6018  {
    6019  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    6020  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    6021  }
    6022 
    6023  VmaVectorRemove(m_Blocks, blockIndex);
    6024  pBlock->Destroy(m_hAllocator);
    6025  vma_delete(m_hAllocator, pBlock);
    6026  }
    6027  else
    6028  {
    6029  m_HasEmptyBlock = true;
    6030  }
    6031  }
    6032  }
    6033 
    6034  return result;
    6035 }
    6036 
    6037 void VmaBlockVector::DestroyDefragmentator()
    6038 {
    6039  if(m_pDefragmentator != VMA_NULL)
    6040  {
    6041  vma_delete(m_hAllocator, m_pDefragmentator);
    6042  m_pDefragmentator = VMA_NULL;
    6043  }
    6044 }
    6045 
    6046 void VmaBlockVector::MakePoolAllocationsLost(
    6047  uint32_t currentFrameIndex,
    6048  size_t* pLostAllocationCount)
    6049 {
    6050  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6051 
    6052  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6053  {
    6054  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6055  VMA_ASSERT(pBlock);
    6056  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    6057  }
    6058 }
    6059 
    6060 void VmaBlockVector::AddStats(VmaStats* pStats)
    6061 {
    6062  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    6063  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    6064 
    6065  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6066 
    6067  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6068  {
    6069  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6070  VMA_ASSERT(pBlock);
    6071  VMA_HEAVY_ASSERT(pBlock->Validate());
    6072  VmaStatInfo allocationStatInfo;
    6073  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6074  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6075  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6076  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6077  }
    6078 }
    6079 
    6081 // VmaDefragmentator members definition
    6082 
    6083 VmaDefragmentator::VmaDefragmentator(
    6084  VmaAllocator hAllocator,
    6085  VmaBlockVector* pBlockVector,
    6086  uint32_t currentFrameIndex) :
    6087  m_hAllocator(hAllocator),
    6088  m_pBlockVector(pBlockVector),
    6089  m_CurrentFrameIndex(currentFrameIndex),
    6090  m_BytesMoved(0),
    6091  m_AllocationsMoved(0),
    6092  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6093  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6094 {
    6095 }
    6096 
    6097 VmaDefragmentator::~VmaDefragmentator()
    6098 {
    6099  for(size_t i = m_Blocks.size(); i--; )
    6100  {
    6101  vma_delete(m_hAllocator, m_Blocks[i]);
    6102  }
    6103 }
    6104 
    6105 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6106 {
    6107  AllocationInfo allocInfo;
    6108  allocInfo.m_hAllocation = hAlloc;
    6109  allocInfo.m_pChanged = pChanged;
    6110  m_Allocations.push_back(allocInfo);
    6111 }
    6112 
    6113 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6114 {
    6115  // It has already been mapped for defragmentation.
    6116  if(m_pMappedDataForDefragmentation)
    6117  {
    6118  *ppMappedData = m_pMappedDataForDefragmentation;
    6119  return VK_SUCCESS;
    6120  }
    6121 
    6122  // It is originally mapped.
    6123  if(m_pBlock->m_Mapping.GetMappedData())
    6124  {
    6125  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
    6126  return VK_SUCCESS;
    6127  }
    6128 
    6129  // Map on first usage.
    6130  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
    6131  *ppMappedData = m_pMappedDataForDefragmentation;
    6132  return res;
    6133 }
    6134 
    6135 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6136 {
    6137  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6138  {
    6139  m_pBlock->Unmap(hAllocator);
    6140  }
    6141 }
    6142 
    6143 VkResult VmaDefragmentator::DefragmentRound(
    6144  VkDeviceSize maxBytesToMove,
    6145  uint32_t maxAllocationsToMove)
    6146 {
    6147  if(m_Blocks.empty())
    6148  {
    6149  return VK_SUCCESS;
    6150  }
    6151 
    6152  size_t srcBlockIndex = m_Blocks.size() - 1;
    6153  size_t srcAllocIndex = SIZE_MAX;
    6154  for(;;)
    6155  {
    6156  // 1. Find next allocation to move.
    6157  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6158  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6159  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6160  {
    6161  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6162  {
    6163  // Finished: no more allocations to process.
    6164  if(srcBlockIndex == 0)
    6165  {
    6166  return VK_SUCCESS;
    6167  }
    6168  else
    6169  {
    6170  --srcBlockIndex;
    6171  srcAllocIndex = SIZE_MAX;
    6172  }
    6173  }
    6174  else
    6175  {
    6176  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6177  }
    6178  }
    6179 
    6180  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6181  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6182 
    6183  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6184  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6185  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6186  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6187 
    6188  // 2. Try to find new place for this allocation in preceding or current block.
    6189  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6190  {
    6191  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6192  VmaAllocationRequest dstAllocRequest;
    6193  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6194  m_CurrentFrameIndex,
    6195  m_pBlockVector->GetFrameInUseCount(),
    6196  m_pBlockVector->GetBufferImageGranularity(),
    6197  size,
    6198  alignment,
    6199  suballocType,
    6200  false, // canMakeOtherLost
    6201  &dstAllocRequest) &&
    6202  MoveMakesSense(
    6203  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6204  {
    6205  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6206 
    6207  // Reached limit on number of allocations or bytes to move.
    6208  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6209  (m_BytesMoved + size > maxBytesToMove))
    6210  {
    6211  return VK_INCOMPLETE;
    6212  }
    6213 
    6214  void* pDstMappedData = VMA_NULL;
    6215  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6216  if(res != VK_SUCCESS)
    6217  {
    6218  return res;
    6219  }
    6220 
    6221  void* pSrcMappedData = VMA_NULL;
    6222  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6223  if(res != VK_SUCCESS)
    6224  {
    6225  return res;
    6226  }
    6227 
    6228  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6229  memcpy(
    6230  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6231  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6232  static_cast<size_t>(size));
    6233 
    6234  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6235  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6236 
    6237  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6238 
    6239  if(allocInfo.m_pChanged != VMA_NULL)
    6240  {
    6241  *allocInfo.m_pChanged = VK_TRUE;
    6242  }
    6243 
    6244  ++m_AllocationsMoved;
    6245  m_BytesMoved += size;
    6246 
    6247  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6248 
    6249  break;
    6250  }
    6251  }
    6252 
    6253  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6254 
    6255  if(srcAllocIndex > 0)
    6256  {
    6257  --srcAllocIndex;
    6258  }
    6259  else
    6260  {
    6261  if(srcBlockIndex > 0)
    6262  {
    6263  --srcBlockIndex;
    6264  srcAllocIndex = SIZE_MAX;
    6265  }
    6266  else
    6267  {
    6268  return VK_SUCCESS;
    6269  }
    6270  }
    6271  }
    6272 }
    6273 
    6274 VkResult VmaDefragmentator::Defragment(
    6275  VkDeviceSize maxBytesToMove,
    6276  uint32_t maxAllocationsToMove)
    6277 {
    6278  if(m_Allocations.empty())
    6279  {
    6280  return VK_SUCCESS;
    6281  }
    6282 
    6283  // Create block info for each block.
    6284  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6285  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6286  {
    6287  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6288  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6289  m_Blocks.push_back(pBlockInfo);
    6290  }
    6291 
    6292  // Sort them by m_pBlock pointer value.
    6293  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6294 
    6295  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6296  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6297  {
    6298  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6299  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6300  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6301  {
    6302  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6303  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6304  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6305  {
    6306  (*it)->m_Allocations.push_back(allocInfo);
    6307  }
    6308  else
    6309  {
    6310  VMA_ASSERT(0);
    6311  }
    6312  }
    6313  }
    6314  m_Allocations.clear();
    6315 
    6316  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6317  {
    6318  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6319  pBlockInfo->CalcHasNonMovableAllocations();
    6320  pBlockInfo->SortAllocationsBySizeDescecnding();
    6321  }
    6322 
    6323  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6324  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6325 
    6326  // Execute defragmentation rounds (the main part).
    6327  VkResult result = VK_SUCCESS;
    6328  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6329  {
    6330  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6331  }
    6332 
    6333  // Unmap blocks that were mapped for defragmentation.
    6334  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6335  {
    6336  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6337  }
    6338 
    6339  return result;
    6340 }
    6341 
    6342 bool VmaDefragmentator::MoveMakesSense(
    6343  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6344  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6345 {
    6346  if(dstBlockIndex < srcBlockIndex)
    6347  {
    6348  return true;
    6349  }
    6350  if(dstBlockIndex > srcBlockIndex)
    6351  {
    6352  return false;
    6353  }
    6354  if(dstOffset < srcOffset)
    6355  {
    6356  return true;
    6357  }
    6358  return false;
    6359 }
    6360 
    6362 // VmaAllocator_T
    6363 
    6364 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6365  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6366  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6367  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6368  m_hDevice(pCreateInfo->device),
    6369  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6370  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6371  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6372  m_PreferredLargeHeapBlockSize(0),
    6373  m_PreferredSmallHeapBlockSize(0),
    6374  m_CurrentFrameIndex(0),
    6375  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6376 {
    6377  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6378 
    6379  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6380  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6381  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6382 
    6383  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6384  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6385 
    6386  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6387  {
    6388  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6389  }
    6390 
    6391  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6392  {
    6393  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6394  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6395  }
    6396 
    6397  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6398 
    6399  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6400  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6401 
    6402  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6403  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6404  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6405  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6406 
    6407  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6408  {
    6409  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6410  {
    6411  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6412  if(limit != VK_WHOLE_SIZE)
    6413  {
    6414  m_HeapSizeLimit[heapIndex] = limit;
    6415  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6416  {
    6417  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6418  }
    6419  }
    6420  }
    6421  }
    6422 
    6423  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6424  {
    6425  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6426 
    6427  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    6428  this,
    6429  memTypeIndex,
    6430  preferredBlockSize,
    6431  0,
    6432  SIZE_MAX,
    6433  GetBufferImageGranularity(),
    6434  pCreateInfo->frameInUseCount,
    6435  false); // isCustomPool
    6436  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6437  // becase minBlockCount is 0.
    6438  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6439  }
    6440 }
    6441 
    6442 VmaAllocator_T::~VmaAllocator_T()
    6443 {
    6444  VMA_ASSERT(m_Pools.empty());
    6445 
    6446  for(size_t i = GetMemoryTypeCount(); i--; )
    6447  {
    6448  vma_delete(this, m_pDedicatedAllocations[i]);
    6449  vma_delete(this, m_pBlockVectors[i]);
    6450  }
    6451 }
    6452 
    6453 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6454 {
    6455 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6456  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6457  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6458  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6459  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6460  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6461  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6462  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6463  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6464  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6465  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6466  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6467  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6468  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6469  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6470  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6471  // Ignoring vkGetImageMemoryRequirements2KHR.
    6472 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6473 
    6474 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6475  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6476 
    6477  if(pVulkanFunctions != VMA_NULL)
    6478  {
    6479  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6480  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6481  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6482  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6483  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6484  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6485  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6486  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6487  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6488  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6489  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6490  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6491  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6492  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6493  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6494  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6495  }
    6496 
    6497 #undef VMA_COPY_IF_NOT_NULL
    6498 
    6499  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6500  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6501  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6502  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6503  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6504  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6505  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6506  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6507  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6508  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6509  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6510  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6511  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6512  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6513  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6514  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6515  if(m_UseKhrDedicatedAllocation)
    6516  {
    6517  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6518  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6519  }
    6520 }
    6521 
    6522 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6523 {
    6524  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6525  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6526  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6527  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6528 }
    6529 
    6530 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6531  const VkMemoryRequirements& vkMemReq,
    6532  bool dedicatedAllocation,
    6533  VkBuffer dedicatedBuffer,
    6534  VkImage dedicatedImage,
    6535  const VmaAllocationCreateInfo& createInfo,
    6536  uint32_t memTypeIndex,
    6537  VmaSuballocationType suballocType,
    6538  VmaAllocation* pAllocation)
    6539 {
    6540  VMA_ASSERT(pAllocation != VMA_NULL);
    6541  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6542 
    6543  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6544 
    6545  // If memory type is not HOST_VISIBLE, disable MAPPED.
    6546  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    6547  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6548  {
    6549  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    6550  }
    6551 
    6552  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    6553  VMA_ASSERT(blockVector);
    6554 
    6555  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6556  bool preferDedicatedMemory =
    6557  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6558  dedicatedAllocation ||
    6559  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6560  vkMemReq.size > preferredBlockSize / 2;
    6561 
    6562  if(preferDedicatedMemory &&
    6563  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6564  finalCreateInfo.pool == VK_NULL_HANDLE)
    6565  {
    6567  }
    6568 
    6569  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6570  {
    6571  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6572  {
    6573  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6574  }
    6575  else
    6576  {
    6577  return AllocateDedicatedMemory(
    6578  vkMemReq.size,
    6579  suballocType,
    6580  memTypeIndex,
    6581  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    6582  finalCreateInfo.pUserData,
    6583  dedicatedBuffer,
    6584  dedicatedImage,
    6585  pAllocation);
    6586  }
    6587  }
    6588  else
    6589  {
    6590  VkResult res = blockVector->Allocate(
    6591  VK_NULL_HANDLE, // hCurrentPool
    6592  m_CurrentFrameIndex.load(),
    6593  vkMemReq,
    6594  finalCreateInfo,
    6595  suballocType,
    6596  pAllocation);
    6597  if(res == VK_SUCCESS)
    6598  {
    6599  return res;
    6600  }
    6601 
    6602  // 5. Try dedicated memory.
    6603  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6604  {
    6605  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6606  }
    6607  else
    6608  {
    6609  res = AllocateDedicatedMemory(
    6610  vkMemReq.size,
    6611  suballocType,
    6612  memTypeIndex,
    6613  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    6614  finalCreateInfo.pUserData,
    6615  dedicatedBuffer,
    6616  dedicatedImage,
    6617  pAllocation);
    6618  if(res == VK_SUCCESS)
    6619  {
    6620  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6621  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6622  return VK_SUCCESS;
    6623  }
    6624  else
    6625  {
    6626  // Everything failed: Return error code.
    6627  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6628  return res;
    6629  }
    6630  }
    6631  }
    6632 }
    6633 
    6634 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6635  VkDeviceSize size,
    6636  VmaSuballocationType suballocType,
    6637  uint32_t memTypeIndex,
    6638  bool map,
    6639  void* pUserData,
    6640  VkBuffer dedicatedBuffer,
    6641  VkImage dedicatedImage,
    6642  VmaAllocation* pAllocation)
    6643 {
    6644  VMA_ASSERT(pAllocation);
    6645 
    6646  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6647  allocInfo.memoryTypeIndex = memTypeIndex;
    6648  allocInfo.allocationSize = size;
    6649 
    6650  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6651  if(m_UseKhrDedicatedAllocation)
    6652  {
    6653  if(dedicatedBuffer != VK_NULL_HANDLE)
    6654  {
    6655  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6656  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6657  allocInfo.pNext = &dedicatedAllocInfo;
    6658  }
    6659  else if(dedicatedImage != VK_NULL_HANDLE)
    6660  {
    6661  dedicatedAllocInfo.image = dedicatedImage;
    6662  allocInfo.pNext = &dedicatedAllocInfo;
    6663  }
    6664  }
    6665 
    6666  // Allocate VkDeviceMemory.
    6667  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6668  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6669  if(res < 0)
    6670  {
    6671  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6672  return res;
    6673  }
    6674 
    6675  void* pMappedData = nullptr;
    6676  if(map)
    6677  {
    6678  res = (*m_VulkanFunctions.vkMapMemory)(
    6679  m_hDevice,
    6680  hMemory,
    6681  0,
    6682  VK_WHOLE_SIZE,
    6683  0,
    6684  &pMappedData);
    6685  if(res < 0)
    6686  {
    6687  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6688  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6689  return res;
    6690  }
    6691  }
    6692 
    6693  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6694  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size, pUserData);
    6695 
    6696  // Register it in m_pDedicatedAllocations.
    6697  {
    6698  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6699  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    6700  VMA_ASSERT(pDedicatedAllocations);
    6701  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6702  }
    6703 
    6704  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6705 
    6706  return VK_SUCCESS;
    6707 }
    6708 
    6709 void VmaAllocator_T::GetBufferMemoryRequirements(
    6710  VkBuffer hBuffer,
    6711  VkMemoryRequirements& memReq,
    6712  bool& requiresDedicatedAllocation,
    6713  bool& prefersDedicatedAllocation) const
    6714 {
    6715  if(m_UseKhrDedicatedAllocation)
    6716  {
    6717  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6718  memReqInfo.buffer = hBuffer;
    6719 
    6720  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6721 
    6722  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6723  memReq2.pNext = &memDedicatedReq;
    6724 
    6725  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6726 
    6727  memReq = memReq2.memoryRequirements;
    6728  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6729  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6730  }
    6731  else
    6732  {
    6733  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6734  requiresDedicatedAllocation = false;
    6735  prefersDedicatedAllocation = false;
    6736  }
    6737 }
    6738 
    6739 void VmaAllocator_T::GetImageMemoryRequirements(
    6740  VkImage hImage,
    6741  VkMemoryRequirements& memReq,
    6742  bool& requiresDedicatedAllocation,
    6743  bool& prefersDedicatedAllocation) const
    6744 {
    6745  if(m_UseKhrDedicatedAllocation)
    6746  {
    6747  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6748  memReqInfo.image = hImage;
    6749 
    6750  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6751 
    6752  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6753  memReq2.pNext = &memDedicatedReq;
    6754 
    6755  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6756 
    6757  memReq = memReq2.memoryRequirements;
    6758  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6759  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6760  }
    6761  else
    6762  {
    6763  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6764  requiresDedicatedAllocation = false;
    6765  prefersDedicatedAllocation = false;
    6766  }
    6767 }
    6768 
    6769 VkResult VmaAllocator_T::AllocateMemory(
    6770  const VkMemoryRequirements& vkMemReq,
    6771  bool requiresDedicatedAllocation,
    6772  bool prefersDedicatedAllocation,
    6773  VkBuffer dedicatedBuffer,
    6774  VkImage dedicatedImage,
    6775  const VmaAllocationCreateInfo& createInfo,
    6776  VmaSuballocationType suballocType,
    6777  VmaAllocation* pAllocation)
    6778 {
    6779  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6780  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6781  {
    6782  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6783  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6784  }
    6785  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    6787  {
    6788  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    6789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6790  }
    6791  if(requiresDedicatedAllocation)
    6792  {
    6793  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6794  {
    6795  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6796  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6797  }
    6798  if(createInfo.pool != VK_NULL_HANDLE)
    6799  {
    6800  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6802  }
    6803  }
    6804  if((createInfo.pool != VK_NULL_HANDLE) &&
    6805  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6806  {
    6807  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6808  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6809  }
    6810 
    6811  if(createInfo.pool != VK_NULL_HANDLE)
    6812  {
    6813  return createInfo.pool->m_BlockVector.Allocate(
    6814  createInfo.pool,
    6815  m_CurrentFrameIndex.load(),
    6816  vkMemReq,
    6817  createInfo,
    6818  suballocType,
    6819  pAllocation);
    6820  }
    6821  else
    6822  {
    6823  // Bit mask of memory Vulkan types acceptable for this allocation.
    6824  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6825  uint32_t memTypeIndex = UINT32_MAX;
    6826  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6827  if(res == VK_SUCCESS)
    6828  {
    6829  res = AllocateMemoryOfType(
    6830  vkMemReq,
    6831  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6832  dedicatedBuffer,
    6833  dedicatedImage,
    6834  createInfo,
    6835  memTypeIndex,
    6836  suballocType,
    6837  pAllocation);
    6838  // Succeeded on first try.
    6839  if(res == VK_SUCCESS)
    6840  {
    6841  return res;
    6842  }
    6843  // Allocation from this memory type failed. Try other compatible memory types.
    6844  else
    6845  {
    6846  for(;;)
    6847  {
    6848  // Remove old memTypeIndex from list of possibilities.
    6849  memoryTypeBits &= ~(1u << memTypeIndex);
    6850  // Find alternative memTypeIndex.
    6851  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6852  if(res == VK_SUCCESS)
    6853  {
    6854  res = AllocateMemoryOfType(
    6855  vkMemReq,
    6856  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6857  dedicatedBuffer,
    6858  dedicatedImage,
    6859  createInfo,
    6860  memTypeIndex,
    6861  suballocType,
    6862  pAllocation);
    6863  // Allocation from this alternative memory type succeeded.
    6864  if(res == VK_SUCCESS)
    6865  {
    6866  return res;
    6867  }
    6868  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6869  }
    6870  // No other matching memory type index could be found.
    6871  else
    6872  {
    6873  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6874  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6875  }
    6876  }
    6877  }
    6878  }
    6879  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6880  else
    6881  return res;
    6882  }
    6883 }
    6884 
    6885 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6886 {
    6887  VMA_ASSERT(allocation);
    6888 
    6889  if(allocation->CanBecomeLost() == false ||
    6890  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6891  {
    6892  switch(allocation->GetType())
    6893  {
    6894  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6895  {
    6896  VmaBlockVector* pBlockVector = VMA_NULL;
    6897  VmaPool hPool = allocation->GetPool();
    6898  if(hPool != VK_NULL_HANDLE)
    6899  {
    6900  pBlockVector = &hPool->m_BlockVector;
    6901  }
    6902  else
    6903  {
    6904  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6905  pBlockVector = m_pBlockVectors[memTypeIndex];
    6906  }
    6907  pBlockVector->Free(allocation);
    6908  }
    6909  break;
    6910  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6911  FreeDedicatedMemory(allocation);
    6912  break;
    6913  default:
    6914  VMA_ASSERT(0);
    6915  }
    6916  }
    6917 
    6918  vma_delete(this, allocation);
    6919 }
    6920 
    6921 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6922 {
    6923  // Initialize.
    6924  InitStatInfo(pStats->total);
    6925  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6926  InitStatInfo(pStats->memoryType[i]);
    6927  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6928  InitStatInfo(pStats->memoryHeap[i]);
    6929 
    6930  // Process default pools.
    6931  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6932  {
    6933  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6934  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    6935  VMA_ASSERT(pBlockVector);
    6936  pBlockVector->AddStats(pStats);
    6937  }
    6938 
    6939  // Process custom pools.
    6940  {
    6941  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6942  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6943  {
    6944  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6945  }
    6946  }
    6947 
    6948  // Process dedicated allocations.
    6949  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6950  {
    6951  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6952  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6953  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    6954  VMA_ASSERT(pDedicatedAllocVector);
    6955  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6956  {
    6957  VmaStatInfo allocationStatInfo;
    6958  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6959  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6960  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6961  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6962  }
    6963  }
    6964 
    6965  // Postprocess.
    6966  VmaPostprocessCalcStatInfo(pStats->total);
    6967  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6968  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6969  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6970  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6971 }
    6972 
    6973 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6974 
    6975 VkResult VmaAllocator_T::Defragment(
    6976  VmaAllocation* pAllocations,
    6977  size_t allocationCount,
    6978  VkBool32* pAllocationsChanged,
    6979  const VmaDefragmentationInfo* pDefragmentationInfo,
    6980  VmaDefragmentationStats* pDefragmentationStats)
    6981 {
    6982  if(pAllocationsChanged != VMA_NULL)
    6983  {
    6984  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    6985  }
    6986  if(pDefragmentationStats != VMA_NULL)
    6987  {
    6988  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    6989  }
    6990 
    6991  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    6992 
    6993  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    6994 
    6995  const size_t poolCount = m_Pools.size();
    6996 
    6997  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    6998  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    6999  {
    7000  VmaAllocation hAlloc = pAllocations[allocIndex];
    7001  VMA_ASSERT(hAlloc);
    7002  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7003  // DedicatedAlloc cannot be defragmented.
    7004  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7005  // Only HOST_VISIBLE memory types can be defragmented.
    7006  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7007  // Lost allocation cannot be defragmented.
    7008  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7009  {
    7010  VmaBlockVector* pAllocBlockVector = nullptr;
    7011 
    7012  const VmaPool hAllocPool = hAlloc->GetPool();
    7013  // This allocation belongs to custom pool.
    7014  if(hAllocPool != VK_NULL_HANDLE)
    7015  {
    7016  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7017  }
    7018  // This allocation belongs to general pool.
    7019  else
    7020  {
    7021  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    7022  }
    7023 
    7024  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7025 
    7026  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7027  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7028  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7029  }
    7030  }
    7031 
    7032  VkResult result = VK_SUCCESS;
    7033 
    7034  // ======== Main processing.
    7035 
    7036  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7037  uint32_t maxAllocationsToMove = UINT32_MAX;
    7038  if(pDefragmentationInfo != VMA_NULL)
    7039  {
    7040  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7041  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7042  }
    7043 
    7044  // Process standard memory.
    7045  for(uint32_t memTypeIndex = 0;
    7046  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7047  ++memTypeIndex)
    7048  {
    7049  // Only HOST_VISIBLE memory types can be defragmented.
    7050  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7051  {
    7052  result = m_pBlockVectors[memTypeIndex]->Defragment(
    7053  pDefragmentationStats,
    7054  maxBytesToMove,
    7055  maxAllocationsToMove);
    7056  }
    7057  }
    7058 
    7059  // Process custom pools.
    7060  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7061  {
    7062  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7063  pDefragmentationStats,
    7064  maxBytesToMove,
    7065  maxAllocationsToMove);
    7066  }
    7067 
    7068  // ======== Destroy defragmentators.
    7069 
    7070  // Process custom pools.
    7071  for(size_t poolIndex = poolCount; poolIndex--; )
    7072  {
    7073  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7074  }
    7075 
    7076  // Process standard memory.
    7077  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7078  {
    7079  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7080  {
    7081  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    7082  }
    7083  }
    7084 
    7085  return result;
    7086 }
    7087 
    7088 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7089 {
    7090  if(hAllocation->CanBecomeLost())
    7091  {
    7092  /*
    7093  Warning: This is a carefully designed algorithm.
    7094  Do not modify unless you really know what you're doing :)
    7095  */
    7096  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7097  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7098  for(;;)
    7099  {
    7100  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7101  {
    7102  pAllocationInfo->memoryType = UINT32_MAX;
    7103  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7104  pAllocationInfo->offset = 0;
    7105  pAllocationInfo->size = hAllocation->GetSize();
    7106  pAllocationInfo->pMappedData = VMA_NULL;
    7107  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7108  return;
    7109  }
    7110  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7111  {
    7112  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7113  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7114  pAllocationInfo->offset = hAllocation->GetOffset();
    7115  pAllocationInfo->size = hAllocation->GetSize();
    7116  pAllocationInfo->pMappedData = VMA_NULL;
    7117  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7118  return;
    7119  }
    7120  else // Last use time earlier than current time.
    7121  {
    7122  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7123  {
    7124  localLastUseFrameIndex = localCurrFrameIndex;
    7125  }
    7126  }
    7127  }
    7128  }
    7129  else
    7130  {
    7131  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7132  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7133  pAllocationInfo->offset = hAllocation->GetOffset();
    7134  pAllocationInfo->size = hAllocation->GetSize();
    7135  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7136  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7137  }
    7138 }
    7139 
    7140 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7141 {
    7142  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7143 
    7144  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7145 
    7146  if(newCreateInfo.maxBlockCount == 0)
    7147  {
    7148  newCreateInfo.maxBlockCount = SIZE_MAX;
    7149  }
    7150  if(newCreateInfo.blockSize == 0)
    7151  {
    7152  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7153  }
    7154 
    7155  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7156 
    7157  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7158  if(res != VK_SUCCESS)
    7159  {
    7160  vma_delete(this, *pPool);
    7161  *pPool = VMA_NULL;
    7162  return res;
    7163  }
    7164 
    7165  // Add to m_Pools.
    7166  {
    7167  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7168  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7169  }
    7170 
    7171  return VK_SUCCESS;
    7172 }
    7173 
    7174 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7175 {
    7176  // Remove from m_Pools.
    7177  {
    7178  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7179  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7180  VMA_ASSERT(success && "Pool not found in Allocator.");
    7181  }
    7182 
    7183  vma_delete(this, pool);
    7184 }
    7185 
    7186 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7187 {
    7188  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7189 }
    7190 
    7191 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7192 {
    7193  m_CurrentFrameIndex.store(frameIndex);
    7194 }
    7195 
    7196 void VmaAllocator_T::MakePoolAllocationsLost(
    7197  VmaPool hPool,
    7198  size_t* pLostAllocationCount)
    7199 {
    7200  hPool->m_BlockVector.MakePoolAllocationsLost(
    7201  m_CurrentFrameIndex.load(),
    7202  pLostAllocationCount);
    7203 }
    7204 
    7205 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7206 {
    7207  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7208  (*pAllocation)->InitLost();
    7209 }
    7210 
    7211 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7212 {
    7213  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7214 
    7215  VkResult res;
    7216  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7217  {
    7218  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7219  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7220  {
    7221  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7222  if(res == VK_SUCCESS)
    7223  {
    7224  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7225  }
    7226  }
    7227  else
    7228  {
    7229  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7230  }
    7231  }
    7232  else
    7233  {
    7234  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7235  }
    7236 
    7237  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7238  {
    7239  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7240  }
    7241 
    7242  return res;
    7243 }
    7244 
    7245 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7246 {
    7247  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7248  {
    7249  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7250  }
    7251 
    7252  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7253 
    7254  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7255  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7256  {
    7257  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7258  m_HeapSizeLimit[heapIndex] += size;
    7259  }
    7260 }
    7261 
    7262 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    7263 {
    7264  if(hAllocation->CanBecomeLost())
    7265  {
    7266  return VK_ERROR_MEMORY_MAP_FAILED;
    7267  }
    7268 
    7269  switch(hAllocation->GetType())
    7270  {
    7271  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7272  {
    7273  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7274  char *pBytes = nullptr;
    7275  VkResult res = pBlock->Map(this, (void**)&pBytes);
    7276  if(res == VK_SUCCESS)
    7277  {
    7278  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    7279  }
    7280  return res;
    7281  }
    7282  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7283  return hAllocation->DedicatedAllocMap(this, ppData);
    7284  default:
    7285  VMA_ASSERT(0);
    7286  return VK_ERROR_MEMORY_MAP_FAILED;
    7287  }
    7288 }
    7289 
    7290 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    7291 {
    7292  switch(hAllocation->GetType())
    7293  {
    7294  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7295  {
    7296  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7297  pBlock->Unmap(this);
    7298  }
    7299  break;
    7300  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7301  hAllocation->DedicatedAllocUnmap(this);
    7302  break;
    7303  default:
    7304  VMA_ASSERT(0);
    7305  }
    7306 }
    7307 
    7308 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7309 {
    7310  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7311 
    7312  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7313  {
    7314  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7315  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    7316  VMA_ASSERT(pDedicatedAllocations);
    7317  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7318  VMA_ASSERT(success);
    7319  }
    7320 
    7321  VkDeviceMemory hMemory = allocation->GetMemory();
    7322 
    7323  if(allocation->GetMappedData() != VMA_NULL)
    7324  {
    7325  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7326  }
    7327 
    7328  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7329 
    7330  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7331 }
    7332 
    7333 #if VMA_STATS_STRING_ENABLED
    7334 
    7335 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7336 {
    7337  bool dedicatedAllocationsStarted = false;
    7338  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7339  {
    7340  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7341  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    7342  VMA_ASSERT(pDedicatedAllocVector);
    7343  if(pDedicatedAllocVector->empty() == false)
    7344  {
    7345  if(dedicatedAllocationsStarted == false)
    7346  {
    7347  dedicatedAllocationsStarted = true;
    7348  json.WriteString("DedicatedAllocations");
    7349  json.BeginObject();
    7350  }
    7351 
    7352  json.BeginString("Type ");
    7353  json.ContinueString(memTypeIndex);
    7354  json.EndString();
    7355 
    7356  json.BeginArray();
    7357 
    7358  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7359  {
    7360  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7361  json.BeginObject(true);
    7362 
    7363  json.WriteString("Size");
    7364  json.WriteNumber(hAlloc->GetSize());
    7365 
    7366  json.WriteString("Type");
    7367  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7368 
    7369  json.EndObject();
    7370  }
    7371 
    7372  json.EndArray();
    7373  }
    7374  }
    7375  if(dedicatedAllocationsStarted)
    7376  {
    7377  json.EndObject();
    7378  }
    7379 
    7380  {
    7381  bool allocationsStarted = false;
    7382  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7383  {
    7384  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    7385  {
    7386  if(allocationsStarted == false)
    7387  {
    7388  allocationsStarted = true;
    7389  json.WriteString("DefaultPools");
    7390  json.BeginObject();
    7391  }
    7392 
    7393  json.BeginString("Type ");
    7394  json.ContinueString(memTypeIndex);
    7395  json.EndString();
    7396 
    7397  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    7398  }
    7399  }
    7400  if(allocationsStarted)
    7401  {
    7402  json.EndObject();
    7403  }
    7404  }
    7405 
    7406  {
    7407  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7408  const size_t poolCount = m_Pools.size();
    7409  if(poolCount > 0)
    7410  {
    7411  json.WriteString("Pools");
    7412  json.BeginArray();
    7413  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7414  {
    7415  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7416  }
    7417  json.EndArray();
    7418  }
    7419  }
    7420 }
    7421 
    7422 #endif // #if VMA_STATS_STRING_ENABLED
    7423 
    7424 static VkResult AllocateMemoryForImage(
    7425  VmaAllocator allocator,
    7426  VkImage image,
    7427  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7428  VmaSuballocationType suballocType,
    7429  VmaAllocation* pAllocation)
    7430 {
    7431  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7432 
    7433  VkMemoryRequirements vkMemReq = {};
    7434  bool requiresDedicatedAllocation = false;
    7435  bool prefersDedicatedAllocation = false;
    7436  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7437  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7438 
    7439  return allocator->AllocateMemory(
    7440  vkMemReq,
    7441  requiresDedicatedAllocation,
    7442  prefersDedicatedAllocation,
    7443  VK_NULL_HANDLE, // dedicatedBuffer
    7444  image, // dedicatedImage
    7445  *pAllocationCreateInfo,
    7446  suballocType,
    7447  pAllocation);
    7448 }
    7449 
    7451 // Public interface
    7452 
    7453 VkResult vmaCreateAllocator(
    7454  const VmaAllocatorCreateInfo* pCreateInfo,
    7455  VmaAllocator* pAllocator)
    7456 {
    7457  VMA_ASSERT(pCreateInfo && pAllocator);
    7458  VMA_DEBUG_LOG("vmaCreateAllocator");
    7459  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7460  return VK_SUCCESS;
    7461 }
    7462 
    7463 void vmaDestroyAllocator(
    7464  VmaAllocator allocator)
    7465 {
    7466  if(allocator != VK_NULL_HANDLE)
    7467  {
    7468  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7469  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7470  vma_delete(&allocationCallbacks, allocator);
    7471  }
    7472 }
    7473 
    7475  VmaAllocator allocator,
    7476  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7477 {
    7478  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7479  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7480 }
    7481 
    7483  VmaAllocator allocator,
    7484  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7485 {
    7486  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7487  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7488 }
    7489 
    7491  VmaAllocator allocator,
    7492  uint32_t memoryTypeIndex,
    7493  VkMemoryPropertyFlags* pFlags)
    7494 {
    7495  VMA_ASSERT(allocator && pFlags);
    7496  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7497  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7498 }
    7499 
    7501  VmaAllocator allocator,
    7502  uint32_t frameIndex)
    7503 {
    7504  VMA_ASSERT(allocator);
    7505  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7506 
    7507  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7508 
    7509  allocator->SetCurrentFrameIndex(frameIndex);
    7510 }
    7511 
    7512 void vmaCalculateStats(
    7513  VmaAllocator allocator,
    7514  VmaStats* pStats)
    7515 {
    7516  VMA_ASSERT(allocator && pStats);
    7517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7518  allocator->CalculateStats(pStats);
    7519 }
    7520 
    7521 #if VMA_STATS_STRING_ENABLED
    7522 
    7523 void vmaBuildStatsString(
    7524  VmaAllocator allocator,
    7525  char** ppStatsString,
    7526  VkBool32 detailedMap)
    7527 {
    7528  VMA_ASSERT(allocator && ppStatsString);
    7529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7530 
    7531  VmaStringBuilder sb(allocator);
    7532  {
    7533  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7534  json.BeginObject();
    7535 
    7536  VmaStats stats;
    7537  allocator->CalculateStats(&stats);
    7538 
    7539  json.WriteString("Total");
    7540  VmaPrintStatInfo(json, stats.total);
    7541 
    7542  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7543  {
    7544  json.BeginString("Heap ");
    7545  json.ContinueString(heapIndex);
    7546  json.EndString();
    7547  json.BeginObject();
    7548 
    7549  json.WriteString("Size");
    7550  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7551 
    7552  json.WriteString("Flags");
    7553  json.BeginArray(true);
    7554  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7555  {
    7556  json.WriteString("DEVICE_LOCAL");
    7557  }
    7558  json.EndArray();
    7559 
    7560  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7561  {
    7562  json.WriteString("Stats");
    7563  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7564  }
    7565 
    7566  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7567  {
    7568  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7569  {
    7570  json.BeginString("Type ");
    7571  json.ContinueString(typeIndex);
    7572  json.EndString();
    7573 
    7574  json.BeginObject();
    7575 
    7576  json.WriteString("Flags");
    7577  json.BeginArray(true);
    7578  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7579  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7580  {
    7581  json.WriteString("DEVICE_LOCAL");
    7582  }
    7583  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7584  {
    7585  json.WriteString("HOST_VISIBLE");
    7586  }
    7587  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7588  {
    7589  json.WriteString("HOST_COHERENT");
    7590  }
    7591  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7592  {
    7593  json.WriteString("HOST_CACHED");
    7594  }
    7595  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7596  {
    7597  json.WriteString("LAZILY_ALLOCATED");
    7598  }
    7599  json.EndArray();
    7600 
    7601  if(stats.memoryType[typeIndex].blockCount > 0)
    7602  {
    7603  json.WriteString("Stats");
    7604  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7605  }
    7606 
    7607  json.EndObject();
    7608  }
    7609  }
    7610 
    7611  json.EndObject();
    7612  }
    7613  if(detailedMap == VK_TRUE)
    7614  {
    7615  allocator->PrintDetailedMap(json);
    7616  }
    7617 
    7618  json.EndObject();
    7619  }
    7620 
    7621  const size_t len = sb.GetLength();
    7622  char* const pChars = vma_new_array(allocator, char, len + 1);
    7623  if(len > 0)
    7624  {
    7625  memcpy(pChars, sb.GetData(), len);
    7626  }
    7627  pChars[len] = '\0';
    7628  *ppStatsString = pChars;
    7629 }
    7630 
    7631 void vmaFreeStatsString(
    7632  VmaAllocator allocator,
    7633  char* pStatsString)
    7634 {
    7635  if(pStatsString != VMA_NULL)
    7636  {
    7637  VMA_ASSERT(allocator);
    7638  size_t len = strlen(pStatsString);
    7639  vma_delete_array(allocator, pStatsString, len + 1);
    7640  }
    7641 }
    7642 
    7643 #endif // #if VMA_STATS_STRING_ENABLED
    7644 
    7647 VkResult vmaFindMemoryTypeIndex(
    7648  VmaAllocator allocator,
    7649  uint32_t memoryTypeBits,
    7650  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7651  uint32_t* pMemoryTypeIndex)
    7652 {
    7653  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7654  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7655  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7656 
    7657  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7658  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7659  if(preferredFlags == 0)
    7660  {
    7661  preferredFlags = requiredFlags;
    7662  }
    7663  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7664  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7665 
    7666  // Convert usage to requiredFlags and preferredFlags.
    7667  switch(pAllocationCreateInfo->usage)
    7668  {
    7670  break;
    7672  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7673  break;
    7675  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7676  break;
    7678  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7679  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7680  break;
    7682  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7683  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7684  break;
    7685  default:
    7686  break;
    7687  }
    7688 
    7689  *pMemoryTypeIndex = UINT32_MAX;
    7690  uint32_t minCost = UINT32_MAX;
    7691  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7692  memTypeIndex < allocator->GetMemoryTypeCount();
    7693  ++memTypeIndex, memTypeBit <<= 1)
    7694  {
    7695  // This memory type is acceptable according to memoryTypeBits bitmask.
    7696  if((memTypeBit & memoryTypeBits) != 0)
    7697  {
    7698  const VkMemoryPropertyFlags currFlags =
    7699  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7700  // This memory type contains requiredFlags.
    7701  if((requiredFlags & ~currFlags) == 0)
    7702  {
    7703  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7704  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7705  // Remember memory type with lowest cost.
    7706  if(currCost < minCost)
    7707  {
    7708  *pMemoryTypeIndex = memTypeIndex;
    7709  if(currCost == 0)
    7710  {
    7711  return VK_SUCCESS;
    7712  }
    7713  minCost = currCost;
    7714  }
    7715  }
    7716  }
    7717  }
    7718  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7719 }
    7720 
    7721 VkResult vmaCreatePool(
    7722  VmaAllocator allocator,
    7723  const VmaPoolCreateInfo* pCreateInfo,
    7724  VmaPool* pPool)
    7725 {
    7726  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7727 
    7728  VMA_DEBUG_LOG("vmaCreatePool");
    7729 
    7730  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7731 
    7732  return allocator->CreatePool(pCreateInfo, pPool);
    7733 }
    7734 
    7735 void vmaDestroyPool(
    7736  VmaAllocator allocator,
    7737  VmaPool pool)
    7738 {
    7739  VMA_ASSERT(allocator);
    7740 
    7741  if(pool == VK_NULL_HANDLE)
    7742  {
    7743  return;
    7744  }
    7745 
    7746  VMA_DEBUG_LOG("vmaDestroyPool");
    7747 
    7748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7749 
    7750  allocator->DestroyPool(pool);
    7751 }
    7752 
    7753 void vmaGetPoolStats(
    7754  VmaAllocator allocator,
    7755  VmaPool pool,
    7756  VmaPoolStats* pPoolStats)
    7757 {
    7758  VMA_ASSERT(allocator && pool && pPoolStats);
    7759 
    7760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7761 
    7762  allocator->GetPoolStats(pool, pPoolStats);
    7763 }
    7764 
    7766  VmaAllocator allocator,
    7767  VmaPool pool,
    7768  size_t* pLostAllocationCount)
    7769 {
    7770  VMA_ASSERT(allocator && pool);
    7771 
    7772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7773 
    7774  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7775 }
    7776 
    7777 VkResult vmaAllocateMemory(
    7778  VmaAllocator allocator,
    7779  const VkMemoryRequirements* pVkMemoryRequirements,
    7780  const VmaAllocationCreateInfo* pCreateInfo,
    7781  VmaAllocation* pAllocation,
    7782  VmaAllocationInfo* pAllocationInfo)
    7783 {
    7784  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7785 
    7786  VMA_DEBUG_LOG("vmaAllocateMemory");
    7787 
    7788  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7789 
    7790  VkResult result = allocator->AllocateMemory(
    7791  *pVkMemoryRequirements,
    7792  false, // requiresDedicatedAllocation
    7793  false, // prefersDedicatedAllocation
    7794  VK_NULL_HANDLE, // dedicatedBuffer
    7795  VK_NULL_HANDLE, // dedicatedImage
    7796  *pCreateInfo,
    7797  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7798  pAllocation);
    7799 
    7800  if(pAllocationInfo && result == VK_SUCCESS)
    7801  {
    7802  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7803  }
    7804 
    7805  return result;
    7806 }
    7807 
    7809  VmaAllocator allocator,
    7810  VkBuffer buffer,
    7811  const VmaAllocationCreateInfo* pCreateInfo,
    7812  VmaAllocation* pAllocation,
    7813  VmaAllocationInfo* pAllocationInfo)
    7814 {
    7815  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7816 
    7817  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7818 
    7819  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7820 
    7821  VkMemoryRequirements vkMemReq = {};
    7822  bool requiresDedicatedAllocation = false;
    7823  bool prefersDedicatedAllocation = false;
    7824  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7825  requiresDedicatedAllocation,
    7826  prefersDedicatedAllocation);
    7827 
    7828  VkResult result = allocator->AllocateMemory(
    7829  vkMemReq,
    7830  requiresDedicatedAllocation,
    7831  prefersDedicatedAllocation,
    7832  buffer, // dedicatedBuffer
    7833  VK_NULL_HANDLE, // dedicatedImage
    7834  *pCreateInfo,
    7835  VMA_SUBALLOCATION_TYPE_BUFFER,
    7836  pAllocation);
    7837 
    7838  if(pAllocationInfo && result == VK_SUCCESS)
    7839  {
    7840  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7841  }
    7842 
    7843  return result;
    7844 }
    7845 
    7846 VkResult vmaAllocateMemoryForImage(
    7847  VmaAllocator allocator,
    7848  VkImage image,
    7849  const VmaAllocationCreateInfo* pCreateInfo,
    7850  VmaAllocation* pAllocation,
    7851  VmaAllocationInfo* pAllocationInfo)
    7852 {
    7853  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7854 
    7855  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7856 
    7857  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7858 
    7859  VkResult result = AllocateMemoryForImage(
    7860  allocator,
    7861  image,
    7862  pCreateInfo,
    7863  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7864  pAllocation);
    7865 
    7866  if(pAllocationInfo && result == VK_SUCCESS)
    7867  {
    7868  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7869  }
    7870 
    7871  return result;
    7872 }
    7873 
    7874 void vmaFreeMemory(
    7875  VmaAllocator allocator,
    7876  VmaAllocation allocation)
    7877 {
    7878  VMA_ASSERT(allocator && allocation);
    7879 
    7880  VMA_DEBUG_LOG("vmaFreeMemory");
    7881 
    7882  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7883 
    7884  allocator->FreeMemory(allocation);
    7885 }
    7886 
    7888  VmaAllocator allocator,
    7889  VmaAllocation allocation,
    7890  VmaAllocationInfo* pAllocationInfo)
    7891 {
    7892  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7893 
    7894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7895 
    7896  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7897 }
    7898 
    7900  VmaAllocator allocator,
    7901  VmaAllocation allocation,
    7902  void* pUserData)
    7903 {
    7904  VMA_ASSERT(allocator && allocation);
    7905 
    7906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7907 
    7908  allocation->SetUserData(pUserData);
    7909 }
    7910 
    7912  VmaAllocator allocator,
    7913  VmaAllocation* pAllocation)
    7914 {
    7915  VMA_ASSERT(allocator && pAllocation);
    7916 
    7917  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7918 
    7919  allocator->CreateLostAllocation(pAllocation);
    7920 }
    7921 
    7922 VkResult vmaMapMemory(
    7923  VmaAllocator allocator,
    7924  VmaAllocation allocation,
    7925  void** ppData)
    7926 {
    7927  VMA_ASSERT(allocator && allocation && ppData);
    7928 
    7929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7930 
    7931  return allocator->Map(allocation, ppData);
    7932 }
    7933 
    7934 void vmaUnmapMemory(
    7935  VmaAllocator allocator,
    7936  VmaAllocation allocation)
    7937 {
    7938  VMA_ASSERT(allocator && allocation);
    7939 
    7940  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7941 
    7942  allocator->Unmap(allocation);
    7943 }
    7944 
    7945 VkResult vmaDefragment(
    7946  VmaAllocator allocator,
    7947  VmaAllocation* pAllocations,
    7948  size_t allocationCount,
    7949  VkBool32* pAllocationsChanged,
    7950  const VmaDefragmentationInfo *pDefragmentationInfo,
    7951  VmaDefragmentationStats* pDefragmentationStats)
    7952 {
    7953  VMA_ASSERT(allocator && pAllocations);
    7954 
    7955  VMA_DEBUG_LOG("vmaDefragment");
    7956 
    7957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7958 
    7959  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    7960 }
    7961 
    7962 VkResult vmaCreateBuffer(
    7963  VmaAllocator allocator,
    7964  const VkBufferCreateInfo* pBufferCreateInfo,
    7965  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7966  VkBuffer* pBuffer,
    7967  VmaAllocation* pAllocation,
    7968  VmaAllocationInfo* pAllocationInfo)
    7969 {
    7970  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    7971 
    7972  VMA_DEBUG_LOG("vmaCreateBuffer");
    7973 
    7974  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7975 
    7976  *pBuffer = VK_NULL_HANDLE;
    7977  *pAllocation = VK_NULL_HANDLE;
    7978 
    7979  // 1. Create VkBuffer.
    7980  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    7981  allocator->m_hDevice,
    7982  pBufferCreateInfo,
    7983  allocator->GetAllocationCallbacks(),
    7984  pBuffer);
    7985  if(res >= 0)
    7986  {
    7987  // 2. vkGetBufferMemoryRequirements.
    7988  VkMemoryRequirements vkMemReq = {};
    7989  bool requiresDedicatedAllocation = false;
    7990  bool prefersDedicatedAllocation = false;
    7991  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    7992  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7993 
    7994  // 3. Allocate memory using allocator.
    7995  res = allocator->AllocateMemory(
    7996  vkMemReq,
    7997  requiresDedicatedAllocation,
    7998  prefersDedicatedAllocation,
    7999  *pBuffer, // dedicatedBuffer
    8000  VK_NULL_HANDLE, // dedicatedImage
    8001  *pAllocationCreateInfo,
    8002  VMA_SUBALLOCATION_TYPE_BUFFER,
    8003  pAllocation);
    8004  if(res >= 0)
    8005  {
    8006  // 3. Bind buffer with memory.
    8007  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8008  allocator->m_hDevice,
    8009  *pBuffer,
    8010  (*pAllocation)->GetMemory(),
    8011  (*pAllocation)->GetOffset());
    8012  if(res >= 0)
    8013  {
    8014  // All steps succeeded.
    8015  if(pAllocationInfo != VMA_NULL)
    8016  {
    8017  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8018  }
    8019  return VK_SUCCESS;
    8020  }
    8021  allocator->FreeMemory(*pAllocation);
    8022  *pAllocation = VK_NULL_HANDLE;
    8023  return res;
    8024  }
    8025  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8026  *pBuffer = VK_NULL_HANDLE;
    8027  return res;
    8028  }
    8029  return res;
    8030 }
    8031 
    8032 void vmaDestroyBuffer(
    8033  VmaAllocator allocator,
    8034  VkBuffer buffer,
    8035  VmaAllocation allocation)
    8036 {
    8037  if(buffer != VK_NULL_HANDLE)
    8038  {
    8039  VMA_ASSERT(allocator);
    8040 
    8041  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8042 
    8043  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8044 
    8045  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8046 
    8047  allocator->FreeMemory(allocation);
    8048  }
    8049 }
    8050 
    8051 VkResult vmaCreateImage(
    8052  VmaAllocator allocator,
    8053  const VkImageCreateInfo* pImageCreateInfo,
    8054  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8055  VkImage* pImage,
    8056  VmaAllocation* pAllocation,
    8057  VmaAllocationInfo* pAllocationInfo)
    8058 {
    8059  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8060 
    8061  VMA_DEBUG_LOG("vmaCreateImage");
    8062 
    8063  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8064 
    8065  *pImage = VK_NULL_HANDLE;
    8066  *pAllocation = VK_NULL_HANDLE;
    8067 
    8068  // 1. Create VkImage.
    8069  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8070  allocator->m_hDevice,
    8071  pImageCreateInfo,
    8072  allocator->GetAllocationCallbacks(),
    8073  pImage);
    8074  if(res >= 0)
    8075  {
    8076  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8077  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8078  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8079 
    8080  // 2. Allocate memory using allocator.
    8081  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8082  if(res >= 0)
    8083  {
    8084  // 3. Bind image with memory.
    8085  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8086  allocator->m_hDevice,
    8087  *pImage,
    8088  (*pAllocation)->GetMemory(),
    8089  (*pAllocation)->GetOffset());
    8090  if(res >= 0)
    8091  {
    8092  // All steps succeeded.
    8093  if(pAllocationInfo != VMA_NULL)
    8094  {
    8095  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8096  }
    8097  return VK_SUCCESS;
    8098  }
    8099  allocator->FreeMemory(*pAllocation);
    8100  *pAllocation = VK_NULL_HANDLE;
    8101  return res;
    8102  }
    8103  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8104  *pImage = VK_NULL_HANDLE;
    8105  return res;
    8106  }
    8107  return res;
    8108 }
    8109 
    8110 void vmaDestroyImage(
    8111  VmaAllocator allocator,
    8112  VkImage image,
    8113  VmaAllocation allocation)
    8114 {
    8115  if(image != VK_NULL_HANDLE)
    8116  {
    8117  VMA_ASSERT(allocator);
    8118 
    8119  VMA_DEBUG_LOG("vmaDestroyImage");
    8120 
    8121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8122 
    8123  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8124 
    8125  allocator->FreeMemory(allocation);
    8126  }
    8127 }
    8128 
    8129 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:592
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:809
    +
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:617
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:578
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:602
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:759
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:572
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:1044
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:590
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1198
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:914
    +
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:783
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:596
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:1058
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:614
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1212
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:928
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:968
    -
    Definition: vk_mem_alloc.h:823
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:561
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:856
    -
    Definition: vk_mem_alloc.h:769
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:605
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:982
    +
    Definition: vk_mem_alloc.h:846
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:585
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:879
    +
    Definition: vk_mem_alloc.h:793
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:629
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:652
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:587
    -
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:602
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:676
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:611
    +
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:626
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:773
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:797
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation.
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:717
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:575
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:716
    -
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:583
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1202
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:741
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:599
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:740
    +
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:607
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1216
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:622
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:726
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1210
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:839
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1193
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:576
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:497
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:596
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:922
    -
    Definition: vk_mem_alloc.h:916
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:1054
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:646
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:750
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1224
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:862
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1207
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:600
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:521
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:620
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:936
    +
    Definition: vk_mem_alloc.h:930
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:1068
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:573
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:858
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:938
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:974
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:597
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:881
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:952
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:988
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:559
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:925
    +
    Definition: vk_mem_alloc.h:583
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:939
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:754
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:778
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1188
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1202
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1206
    -
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:765
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:574
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1220
    +
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:789
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:598
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:722
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:503
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:746
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:527
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:524
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:548
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:529
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1208
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:553
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1222
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:850
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:984
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:873
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:998
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:569
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:705
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:933
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:516
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:593
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:729
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:947
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:540
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:830
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:718
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:520
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:928
    -
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:768
    +
    Definition: vk_mem_alloc.h:853
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:742
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:544
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:942
    +
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:792
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:845
    -
    Definition: vk_mem_alloc.h:836
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:708
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:571
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:946
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:608
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:977
    -
    VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:834
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:863
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:868
    +
    Definition: vk_mem_alloc.h:859
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:732
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:595
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:960
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:632
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:991
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:857
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:886
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:640
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:724
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:717
    -
    Definition: vk_mem_alloc.h:896
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:580
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:518
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:579
    -
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:960
    +
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:664
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:748
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:833
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:741
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:604
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:542
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:603
    +
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:974
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:1065
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:599
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:717
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:714
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:1082
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:623
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:741
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:738
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:965
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:1049
    -
    Definition: vk_mem_alloc.h:832
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1204
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:567
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:979
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:1063
    +
    Definition: vk_mem_alloc.h:855
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1218
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:591
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:582
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:712
    -
    No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
    Definition: vk_mem_alloc.h:757
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:918
    +
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:606
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:736
    +
    No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
    Definition: vk_mem_alloc.h:781
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:932
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:710
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:577
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:581
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:796
    -
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:762
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
    Definition: vk_mem_alloc.h:1060
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:734
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:601
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:605
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:820
    +
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:786
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:1077
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:557
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:581
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:570
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:1030
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:594
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:1044
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:812
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:887
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:718
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:725
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:910
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:742
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:749
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:971
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:718
    -
    void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:1035
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:985
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:742
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:1049