monocypher.cpp 108 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044
  1. // Monocypher version 3.1.2
  2. //
  3. // This file is dual-licensed. Choose whichever licence you want from
  4. // the two licences listed below.
  5. //
  6. // The first licence is a regular 2-clause BSD licence. The second licence
  7. // is the CC-0 from Creative Commons. It is intended to release Monocypher
  8. // to the public domain. The BSD licence serves as a fallback option.
  9. //
  10. // SPDX-License-Identifier: BSD-2-Clause OR CC0-1.0
  11. //
  12. // ------------------------------------------------------------------------
  13. //
  14. // Copyright (c) 2017-2020, Loup Vaillant
  15. // All rights reserved.
  16. //
  17. //
  18. // Redistribution and use in source and binary forms, with or without
  19. // modification, are permitted provided that the following conditions are
  20. // met:
  21. //
  22. // 1. Redistributions of source code must retain the above copyright
  23. // notice, this list of conditions and the following disclaimer.
  24. //
  25. // 2. Redistributions in binary form must reproduce the above copyright
  26. // notice, this list of conditions and the following disclaimer in the
  27. // documentation and/or other materials provided with the
  28. // distribution.
  29. //
  30. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  33. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34. // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  36. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  37. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  38. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  39. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  40. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  41. //
  42. // ------------------------------------------------------------------------
  43. //
  44. // Written in 2017-2020 by Loup Vaillant
  45. //
  46. // To the extent possible under law, the author(s) have dedicated all copyright
  47. // and related neighboring rights to this software to the public domain
  48. // worldwide. This software is distributed without any warranty.
  49. //
  50. // You should have received a copy of the CC0 Public Domain Dedication along
  51. // with this software. If not, see
  52. // <https://creativecommons.org/publicdomain/zero/1.0/>
  53. #include "monocypher.h"
  54. // we don't need Argon2
  55. #define MONOCYPHER_ARGON2_ENABLE 0
  56. // we want the bootloader to be as small as possible
  57. #define BLAKE2_NO_UNROLLING 1
  58. /////////////////
  59. /// Utilities ///
  60. /////////////////
  61. #define FOR_T(type, i0, start, end) for (type i0 = (start); i0 < (end); i0++)
  62. #define FOR(i1, start, end) FOR_T(size_t, i1, start, end)
  63. #define COPY(dst, src, size) FOR(i2, 0, size) (dst)[i2] = (src)[i2]
  64. #define ZERO(buf, size) FOR(i3, 0, size) (buf)[i3] = 0
  65. #define WIPE_CTX(ctx) crypto_wipe(ctx , sizeof(*(ctx)))
  66. #define WIPE_BUFFER(buffer) crypto_wipe(buffer, sizeof(buffer))
  67. #define MIN(a, b) ((a) <= (b) ? (a) : (b))
  68. #define MAX(a, b) ((a) >= (b) ? (a) : (b))
  69. typedef int8_t i8;
  70. typedef uint8_t u8;
  71. typedef int16_t i16;
  72. typedef uint32_t u32;
  73. typedef int32_t i32;
  74. typedef int64_t i64;
  75. typedef uint64_t u64;
  76. static const u8 zero[128] = {0};
  77. // returns the smallest positive integer y such that
  78. // (x + y) % pow_2 == 0
  79. // Basically, it's how many bytes we need to add to "align" x.
  80. // Only works when pow_2 is a power of 2.
  81. // Note: we use ~x+1 instead of -x to avoid compiler warnings
  82. static size_t align(size_t x, size_t pow_2)
  83. {
  84. return (~x + 1) & (pow_2 - 1);
  85. }
  86. static u32 load24_le(const u8 s[3])
  87. {
  88. return (u32)s[0]
  89. | ((u32)s[1] << 8)
  90. | ((u32)s[2] << 16);
  91. }
  92. static u32 load32_le(const u8 s[4])
  93. {
  94. return (u32)s[0]
  95. | ((u32)s[1] << 8)
  96. | ((u32)s[2] << 16)
  97. | ((u32)s[3] << 24);
  98. }
  99. static u64 load64_le(const u8 s[8])
  100. {
  101. return load32_le(s) | ((u64)load32_le(s+4) << 32);
  102. }
  103. static void store32_le(u8 out[4], u32 in)
  104. {
  105. out[0] = in & 0xff;
  106. out[1] = (in >> 8) & 0xff;
  107. out[2] = (in >> 16) & 0xff;
  108. out[3] = (in >> 24) & 0xff;
  109. }
  110. static void store64_le(u8 out[8], u64 in)
  111. {
  112. store32_le(out , (u32)in );
  113. store32_le(out + 4, in >> 32);
  114. }
  115. static void load32_le_buf (u32 *dst, const u8 *src, size_t size) {
  116. FOR(i, 0, size) { dst[i] = load32_le(src + i*4); }
  117. }
  118. static void load64_le_buf (u64 *dst, const u8 *src, size_t size) {
  119. FOR(i, 0, size) { dst[i] = load64_le(src + i*8); }
  120. }
  121. static void store32_le_buf(u8 *dst, const u32 *src, size_t size) {
  122. FOR(i, 0, size) { store32_le(dst + i*4, src[i]); }
  123. }
  124. static void store64_le_buf(u8 *dst, const u64 *src, size_t size) {
  125. FOR(i, 0, size) { store64_le(dst + i*8, src[i]); }
  126. }
  127. static u64 rotr64(u64 x, u64 n) { return (x >> n) ^ (x << (64 - n)); }
  128. static u32 rotl32(u32 x, u32 n) { return (x << n) ^ (x >> (32 - n)); }
  129. static int neq0(u64 diff)
  130. { // constant time comparison to zero
  131. // return diff != 0 ? -1 : 0
  132. u64 half = (diff >> 32) | ((u32)diff);
  133. return (1 & ((half - 1) >> 32)) - 1;
  134. }
  135. static u64 x16(const u8 a[16], const u8 b[16])
  136. {
  137. return (load64_le(a + 0) ^ load64_le(b + 0))
  138. | (load64_le(a + 8) ^ load64_le(b + 8));
  139. }
  140. static u64 x32(const u8 a[32],const u8 b[32]){return x16(a,b)| x16(a+16, b+16);}
  141. static u64 x64(const u8 a[64],const u8 b[64]){return x32(a,b)| x32(a+32, b+32);}
  142. int crypto_verify16(const u8 a[16], const u8 b[16]){ return neq0(x16(a, b)); }
  143. int crypto_verify32(const u8 a[32], const u8 b[32]){ return neq0(x32(a, b)); }
  144. int crypto_verify64(const u8 a[64], const u8 b[64]){ return neq0(x64(a, b)); }
  145. void crypto_wipe(void *secret, size_t size)
  146. {
  147. volatile u8 *v_secret = (u8*)secret;
  148. ZERO(v_secret, size);
  149. }
  150. /////////////////
  151. /// Chacha 20 ///
  152. /////////////////
  153. #define QUARTERROUND(a, b, c, d) \
  154. a += b; d = rotl32(d ^ a, 16); \
  155. c += d; b = rotl32(b ^ c, 12); \
  156. a += b; d = rotl32(d ^ a, 8); \
  157. c += d; b = rotl32(b ^ c, 7)
  158. static void chacha20_rounds(u32 out[16], const u32 in[16])
  159. {
  160. // The temporary variables make Chacha20 10% faster.
  161. u32 t0 = in[ 0]; u32 t1 = in[ 1]; u32 t2 = in[ 2]; u32 t3 = in[ 3];
  162. u32 t4 = in[ 4]; u32 t5 = in[ 5]; u32 t6 = in[ 6]; u32 t7 = in[ 7];
  163. u32 t8 = in[ 8]; u32 t9 = in[ 9]; u32 t10 = in[10]; u32 t11 = in[11];
  164. u32 t12 = in[12]; u32 t13 = in[13]; u32 t14 = in[14]; u32 t15 = in[15];
  165. FOR (i, 0, 10) { // 20 rounds, 2 rounds per loop.
  166. QUARTERROUND(t0, t4, t8 , t12); // column 0
  167. QUARTERROUND(t1, t5, t9 , t13); // column 1
  168. QUARTERROUND(t2, t6, t10, t14); // column 2
  169. QUARTERROUND(t3, t7, t11, t15); // column 3
  170. QUARTERROUND(t0, t5, t10, t15); // diagonal 0
  171. QUARTERROUND(t1, t6, t11, t12); // diagonal 1
  172. QUARTERROUND(t2, t7, t8 , t13); // diagonal 2
  173. QUARTERROUND(t3, t4, t9 , t14); // diagonal 3
  174. }
  175. out[ 0] = t0; out[ 1] = t1; out[ 2] = t2; out[ 3] = t3;
  176. out[ 4] = t4; out[ 5] = t5; out[ 6] = t6; out[ 7] = t7;
  177. out[ 8] = t8; out[ 9] = t9; out[10] = t10; out[11] = t11;
  178. out[12] = t12; out[13] = t13; out[14] = t14; out[15] = t15;
  179. }
  180. static void chacha20_init_key(u32 block[16], const u8 key[32])
  181. {
  182. load32_le_buf(block , (const u8*)"expand 32-byte k", 4); // constant
  183. load32_le_buf(block+4, key , 8); // key
  184. }
  185. void crypto_hchacha20(u8 out[32], const u8 key[32], const u8 in [16])
  186. {
  187. u32 block[16];
  188. chacha20_init_key(block, key);
  189. // input
  190. load32_le_buf(block + 12, in, 4);
  191. chacha20_rounds(block, block);
  192. // prevent reversal of the rounds by revealing only half of the buffer.
  193. store32_le_buf(out , block , 4); // constant
  194. store32_le_buf(out+16, block+12, 4); // counter and nonce
  195. WIPE_BUFFER(block);
  196. }
  197. u64 crypto_chacha20_ctr(u8 *cipher_text, const u8 *plain_text,
  198. size_t text_size, const u8 key[32], const u8 nonce[8],
  199. u64 ctr)
  200. {
  201. u32 input[16];
  202. chacha20_init_key(input, key);
  203. input[12] = (u32) ctr;
  204. input[13] = (u32)(ctr >> 32);
  205. load32_le_buf(input+14, nonce, 2);
  206. // Whole blocks
  207. u32 pool[16];
  208. size_t nb_blocks = text_size >> 6;
  209. FOR (i, 0, nb_blocks) {
  210. chacha20_rounds(pool, input);
  211. if (plain_text != 0) {
  212. FOR (j, 0, 16) {
  213. u32 p = pool[j] + input[j];
  214. store32_le(cipher_text, p ^ load32_le(plain_text));
  215. cipher_text += 4;
  216. plain_text += 4;
  217. }
  218. } else {
  219. FOR (j, 0, 16) {
  220. u32 p = pool[j] + input[j];
  221. store32_le(cipher_text, p);
  222. cipher_text += 4;
  223. }
  224. }
  225. input[12]++;
  226. if (input[12] == 0) {
  227. input[13]++;
  228. }
  229. }
  230. text_size &= 63;
  231. // Last (incomplete) block
  232. if (text_size > 0) {
  233. if (plain_text == 0) {
  234. plain_text = zero;
  235. }
  236. chacha20_rounds(pool, input);
  237. u8 tmp[64];
  238. FOR (i, 0, 16) {
  239. store32_le(tmp + i*4, pool[i] + input[i]);
  240. }
  241. FOR (i, 0, text_size) {
  242. cipher_text[i] = tmp[i] ^ plain_text[i];
  243. }
  244. WIPE_BUFFER(tmp);
  245. }
  246. ctr = input[12] + ((u64)input[13] << 32) + (text_size > 0);
  247. WIPE_BUFFER(pool);
  248. WIPE_BUFFER(input);
  249. return ctr;
  250. }
  251. u32 crypto_ietf_chacha20_ctr(u8 *cipher_text, const u8 *plain_text,
  252. size_t text_size,
  253. const u8 key[32], const u8 nonce[12], u32 ctr)
  254. {
  255. u64 big_ctr = ctr + ((u64)load32_le(nonce) << 32);
  256. return (u32)crypto_chacha20_ctr(cipher_text, plain_text, text_size,
  257. key, nonce + 4, big_ctr);
  258. }
  259. u64 crypto_xchacha20_ctr(u8 *cipher_text, const u8 *plain_text,
  260. size_t text_size,
  261. const u8 key[32], const u8 nonce[24], u64 ctr)
  262. {
  263. u8 sub_key[32];
  264. crypto_hchacha20(sub_key, key, nonce);
  265. ctr = crypto_chacha20_ctr(cipher_text, plain_text, text_size,
  266. sub_key, nonce+16, ctr);
  267. WIPE_BUFFER(sub_key);
  268. return ctr;
  269. }
  270. void crypto_chacha20(u8 *cipher_text, const u8 *plain_text, size_t text_size,
  271. const u8 key[32], const u8 nonce[8])
  272. {
  273. crypto_chacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
  274. }
  275. void crypto_ietf_chacha20(u8 *cipher_text, const u8 *plain_text,
  276. size_t text_size,
  277. const u8 key[32], const u8 nonce[12])
  278. {
  279. crypto_ietf_chacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
  280. }
  281. void crypto_xchacha20(u8 *cipher_text, const u8 *plain_text, size_t text_size,
  282. const u8 key[32], const u8 nonce[24])
  283. {
  284. crypto_xchacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
  285. }
  286. /////////////////
  287. /// Poly 1305 ///
  288. /////////////////
  289. // h = (h + c) * r
  290. // preconditions:
  291. // ctx->h <= 4_ffffffff_ffffffff_ffffffff_ffffffff
  292. // ctx->c <= 1_ffffffff_ffffffff_ffffffff_ffffffff
  293. // ctx->r <= 0ffffffc_0ffffffc_0ffffffc_0fffffff
  294. // Postcondition:
  295. // ctx->h <= 4_ffffffff_ffffffff_ffffffff_ffffffff
  296. static void poly_block(crypto_poly1305_ctx *ctx)
  297. {
  298. // s = h + c, without carry propagation
  299. const u64 s0 = ctx->h[0] + (u64)ctx->c[0]; // s0 <= 1_fffffffe
  300. const u64 s1 = ctx->h[1] + (u64)ctx->c[1]; // s1 <= 1_fffffffe
  301. const u64 s2 = ctx->h[2] + (u64)ctx->c[2]; // s2 <= 1_fffffffe
  302. const u64 s3 = ctx->h[3] + (u64)ctx->c[3]; // s3 <= 1_fffffffe
  303. const u32 s4 = ctx->h[4] + ctx->c[4]; // s4 <= 5
  304. // Local all the things!
  305. const u32 r0 = ctx->r[0]; // r0 <= 0fffffff
  306. const u32 r1 = ctx->r[1]; // r1 <= 0ffffffc
  307. const u32 r2 = ctx->r[2]; // r2 <= 0ffffffc
  308. const u32 r3 = ctx->r[3]; // r3 <= 0ffffffc
  309. const u32 rr0 = (r0 >> 2) * 5; // rr0 <= 13fffffb // lose 2 bits...
  310. const u32 rr1 = (r1 >> 2) + r1; // rr1 <= 13fffffb // rr1 == (r1 >> 2) * 5
  311. const u32 rr2 = (r2 >> 2) + r2; // rr2 <= 13fffffb // rr1 == (r2 >> 2) * 5
  312. const u32 rr3 = (r3 >> 2) + r3; // rr3 <= 13fffffb // rr1 == (r3 >> 2) * 5
  313. // (h + c) * r, without carry propagation
  314. const u64 x0 = s0*r0+ s1*rr3+ s2*rr2+ s3*rr1+ s4*rr0; // <= 97ffffe007fffff8
  315. const u64 x1 = s0*r1+ s1*r0 + s2*rr3+ s3*rr2+ s4*rr1; // <= 8fffffe20ffffff6
  316. const u64 x2 = s0*r2+ s1*r1 + s2*r0 + s3*rr3+ s4*rr2; // <= 87ffffe417fffff4
  317. const u64 x3 = s0*r3+ s1*r2 + s2*r1 + s3*r0 + s4*rr3; // <= 7fffffe61ffffff2
  318. const u32 x4 = s4 * (r0 & 3); // ...recover 2 bits // <= f
  319. // partial reduction modulo 2^130 - 5
  320. const u32 u5 = x4 + (x3 >> 32); // u5 <= 7ffffff5
  321. const u64 u0 = (u5 >> 2) * 5 + (x0 & 0xffffffff);
  322. const u64 u1 = (u0 >> 32) + (x1 & 0xffffffff) + (x0 >> 32);
  323. const u64 u2 = (u1 >> 32) + (x2 & 0xffffffff) + (x1 >> 32);
  324. const u64 u3 = (u2 >> 32) + (x3 & 0xffffffff) + (x2 >> 32);
  325. const u64 u4 = (u3 >> 32) + (u5 & 3);
  326. // Update the hash
  327. ctx->h[0] = (u32)u0; // u0 <= 1_9ffffff0
  328. ctx->h[1] = (u32)u1; // u1 <= 1_97ffffe0
  329. ctx->h[2] = (u32)u2; // u2 <= 1_8fffffe2
  330. ctx->h[3] = (u32)u3; // u3 <= 1_87ffffe4
  331. ctx->h[4] = (u32)u4; // u4 <= 4
  332. }
  333. // (re-)initialises the input counter and input buffer
  334. static void poly_clear_c(crypto_poly1305_ctx *ctx)
  335. {
  336. ZERO(ctx->c, 4);
  337. ctx->c_idx = 0;
  338. }
  339. static void poly_take_input(crypto_poly1305_ctx *ctx, u8 input)
  340. {
  341. size_t word = ctx->c_idx >> 2;
  342. size_t byte = ctx->c_idx & 3;
  343. ctx->c[word] |= (u32)input << (byte * 8);
  344. ctx->c_idx++;
  345. }
  346. static void poly_update(crypto_poly1305_ctx *ctx,
  347. const u8 *message, size_t message_size)
  348. {
  349. FOR (i, 0, message_size) {
  350. poly_take_input(ctx, message[i]);
  351. if (ctx->c_idx == 16) {
  352. poly_block(ctx);
  353. poly_clear_c(ctx);
  354. }
  355. }
  356. }
  357. void crypto_poly1305_init(crypto_poly1305_ctx *ctx, const u8 key[32])
  358. {
  359. // Initial hash is zero
  360. ZERO(ctx->h, 5);
  361. // add 2^130 to every input block
  362. ctx->c[4] = 1;
  363. poly_clear_c(ctx);
  364. // load r and pad (r has some of its bits cleared)
  365. load32_le_buf(ctx->r , key , 4);
  366. load32_le_buf(ctx->pad, key+16, 4);
  367. FOR (i, 0, 1) { ctx->r[i] &= 0x0fffffff; }
  368. FOR (i, 1, 4) { ctx->r[i] &= 0x0ffffffc; }
  369. }
  370. void crypto_poly1305_update(crypto_poly1305_ctx *ctx,
  371. const u8 *message, size_t message_size)
  372. {
  373. if (message_size == 0) {
  374. return;
  375. }
  376. // Align ourselves with block boundaries
  377. size_t aligned = MIN(align(ctx->c_idx, 16), message_size);
  378. poly_update(ctx, message, aligned);
  379. message += aligned;
  380. message_size -= aligned;
  381. // Process the message block by block
  382. size_t nb_blocks = message_size >> 4;
  383. FOR (i, 0, nb_blocks) {
  384. load32_le_buf(ctx->c, message, 4);
  385. poly_block(ctx);
  386. message += 16;
  387. }
  388. if (nb_blocks > 0) {
  389. poly_clear_c(ctx);
  390. }
  391. message_size &= 15;
  392. // remaining bytes
  393. poly_update(ctx, message, message_size);
  394. }
  395. void crypto_poly1305_final(crypto_poly1305_ctx *ctx, u8 mac[16])
  396. {
  397. // Process the last block (if any)
  398. if (ctx->c_idx != 0) {
  399. // move the final 1 according to remaining input length
  400. // (We may add less than 2^130 to the last input block)
  401. ctx->c[4] = 0;
  402. poly_take_input(ctx, 1);
  403. // one last hash update
  404. poly_block(ctx);
  405. }
  406. // check if we should subtract 2^130-5 by performing the
  407. // corresponding carry propagation.
  408. u64 c = 5;
  409. FOR (i, 0, 4) {
  410. c += ctx->h[i];
  411. c >>= 32;
  412. }
  413. c += ctx->h[4];
  414. c = (c >> 2) * 5; // shift the carry back to the beginning
  415. // c now indicates how many times we should subtract 2^130-5 (0 or 1)
  416. FOR (i, 0, 4) {
  417. c += (u64)ctx->h[i] + ctx->pad[i];
  418. store32_le(mac + i*4, (u32)c);
  419. c = c >> 32;
  420. }
  421. WIPE_CTX(ctx);
  422. }
  423. void crypto_poly1305(u8 mac[16], const u8 *message,
  424. size_t message_size, const u8 key[32])
  425. {
  426. crypto_poly1305_ctx ctx;
  427. crypto_poly1305_init (&ctx, key);
  428. crypto_poly1305_update(&ctx, message, message_size);
  429. crypto_poly1305_final (&ctx, mac);
  430. }
  431. ////////////////
  432. /// Blake2 b ///
  433. ////////////////
  434. static const u64 iv[8] = {
  435. 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
  436. 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
  437. 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
  438. 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
  439. };
  440. // increment the input offset
  441. static void blake2b_incr(crypto_blake2b_ctx *ctx)
  442. {
  443. u64 *x = ctx->input_offset;
  444. size_t y = ctx->input_idx;
  445. x[0] += y;
  446. if (x[0] < y) {
  447. x[1]++;
  448. }
  449. }
  450. static void blake2b_compress(crypto_blake2b_ctx *ctx, int is_last_block)
  451. {
  452. static const u8 sigma[12][16] = {
  453. { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
  454. { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
  455. { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
  456. { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
  457. { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
  458. { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
  459. { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
  460. { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
  461. { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
  462. { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
  463. { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
  464. { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
  465. };
  466. // init work vector
  467. u64 v0 = ctx->hash[0]; u64 v8 = iv[0];
  468. u64 v1 = ctx->hash[1]; u64 v9 = iv[1];
  469. u64 v2 = ctx->hash[2]; u64 v10 = iv[2];
  470. u64 v3 = ctx->hash[3]; u64 v11 = iv[3];
  471. u64 v4 = ctx->hash[4]; u64 v12 = iv[4] ^ ctx->input_offset[0];
  472. u64 v5 = ctx->hash[5]; u64 v13 = iv[5] ^ ctx->input_offset[1];
  473. u64 v6 = ctx->hash[6]; u64 v14 = iv[6] ^ (u64)~(is_last_block - 1);
  474. u64 v7 = ctx->hash[7]; u64 v15 = iv[7];
  475. // mangle work vector
  476. u64 *input = ctx->input;
  477. #define BLAKE2_G(a, b, c, d, x, y) \
  478. a += b + x; d = rotr64(d ^ a, 32); \
  479. c += d; b = rotr64(b ^ c, 24); \
  480. a += b + y; d = rotr64(d ^ a, 16); \
  481. c += d; b = rotr64(b ^ c, 63)
  482. #define BLAKE2_ROUND(i) \
  483. BLAKE2_G(v0, v4, v8 , v12, input[sigma[i][ 0]], input[sigma[i][ 1]]); \
  484. BLAKE2_G(v1, v5, v9 , v13, input[sigma[i][ 2]], input[sigma[i][ 3]]); \
  485. BLAKE2_G(v2, v6, v10, v14, input[sigma[i][ 4]], input[sigma[i][ 5]]); \
  486. BLAKE2_G(v3, v7, v11, v15, input[sigma[i][ 6]], input[sigma[i][ 7]]); \
  487. BLAKE2_G(v0, v5, v10, v15, input[sigma[i][ 8]], input[sigma[i][ 9]]); \
  488. BLAKE2_G(v1, v6, v11, v12, input[sigma[i][10]], input[sigma[i][11]]); \
  489. BLAKE2_G(v2, v7, v8 , v13, input[sigma[i][12]], input[sigma[i][13]]); \
  490. BLAKE2_G(v3, v4, v9 , v14, input[sigma[i][14]], input[sigma[i][15]])
  491. #ifdef BLAKE2_NO_UNROLLING
  492. FOR (i, 0, 12) {
  493. BLAKE2_ROUND(i);
  494. }
  495. #else
  496. BLAKE2_ROUND(0); BLAKE2_ROUND(1); BLAKE2_ROUND(2); BLAKE2_ROUND(3);
  497. BLAKE2_ROUND(4); BLAKE2_ROUND(5); BLAKE2_ROUND(6); BLAKE2_ROUND(7);
  498. BLAKE2_ROUND(8); BLAKE2_ROUND(9); BLAKE2_ROUND(10); BLAKE2_ROUND(11);
  499. #endif
  500. // update hash
  501. ctx->hash[0] ^= v0 ^ v8; ctx->hash[1] ^= v1 ^ v9;
  502. ctx->hash[2] ^= v2 ^ v10; ctx->hash[3] ^= v3 ^ v11;
  503. ctx->hash[4] ^= v4 ^ v12; ctx->hash[5] ^= v5 ^ v13;
  504. ctx->hash[6] ^= v6 ^ v14; ctx->hash[7] ^= v7 ^ v15;
  505. }
  506. static void blake2b_set_input(crypto_blake2b_ctx *ctx, u8 input, size_t index)
  507. {
  508. if (index == 0) {
  509. ZERO(ctx->input, 16);
  510. }
  511. size_t word = index >> 3;
  512. size_t byte = index & 7;
  513. ctx->input[word] |= (u64)input << (byte << 3);
  514. }
  515. static void blake2b_end_block(crypto_blake2b_ctx *ctx)
  516. {
  517. if (ctx->input_idx == 128) { // If buffer is full,
  518. blake2b_incr(ctx); // update the input offset
  519. blake2b_compress(ctx, 0); // and compress the (not last) block
  520. ctx->input_idx = 0;
  521. }
  522. }
  523. static void blake2b_update(crypto_blake2b_ctx *ctx,
  524. const u8 *message, size_t message_size)
  525. {
  526. FOR (i, 0, message_size) {
  527. blake2b_end_block(ctx);
  528. blake2b_set_input(ctx, message[i], ctx->input_idx);
  529. ctx->input_idx++;
  530. }
  531. }
  532. void crypto_blake2b_general_init(crypto_blake2b_ctx *ctx, size_t hash_size,
  533. const u8 *key, size_t key_size)
  534. {
  535. // initial hash
  536. COPY(ctx->hash, iv, 8);
  537. ctx->hash[0] ^= 0x01010000 ^ (key_size << 8) ^ hash_size;
  538. ctx->input_offset[0] = 0; // beginning of the input, no offset
  539. ctx->input_offset[1] = 0; // beginning of the input, no offset
  540. ctx->hash_size = hash_size; // remember the hash size we want
  541. ctx->input_idx = 0;
  542. // if there is a key, the first block is that key (padded with zeroes)
  543. if (key_size > 0) {
  544. u8 key_block[128] = {0};
  545. COPY(key_block, key, key_size);
  546. // same as calling crypto_blake2b_update(ctx, key_block , 128)
  547. load64_le_buf(ctx->input, key_block, 16);
  548. ctx->input_idx = 128;
  549. }
  550. }
  551. void crypto_blake2b_init(crypto_blake2b_ctx *ctx)
  552. {
  553. crypto_blake2b_general_init(ctx, 64, 0, 0);
  554. }
  555. void crypto_blake2b_update(crypto_blake2b_ctx *ctx,
  556. const u8 *message, size_t message_size)
  557. {
  558. if (message_size == 0) {
  559. return;
  560. }
  561. // Align ourselves with block boundaries
  562. size_t aligned = MIN(align(ctx->input_idx, 128), message_size);
  563. blake2b_update(ctx, message, aligned);
  564. message += aligned;
  565. message_size -= aligned;
  566. // Process the message block by block
  567. FOR (i, 0, message_size >> 7) { // number of blocks
  568. blake2b_end_block(ctx);
  569. load64_le_buf(ctx->input, message, 16);
  570. message += 128;
  571. ctx->input_idx = 128;
  572. }
  573. message_size &= 127;
  574. // remaining bytes
  575. blake2b_update(ctx, message, message_size);
  576. }
  577. void crypto_blake2b_final(crypto_blake2b_ctx *ctx, u8 *hash)
  578. {
  579. // Pad the end of the block with zeroes
  580. FOR (i, ctx->input_idx, 128) {
  581. blake2b_set_input(ctx, 0, i);
  582. }
  583. blake2b_incr(ctx); // update the input offset
  584. blake2b_compress(ctx, 1); // compress the last block
  585. size_t nb_words = ctx->hash_size >> 3;
  586. store64_le_buf(hash, ctx->hash, nb_words);
  587. FOR (i, nb_words << 3, ctx->hash_size) {
  588. hash[i] = (ctx->hash[i >> 3] >> (8 * (i & 7))) & 0xff;
  589. }
  590. WIPE_CTX(ctx);
  591. }
  592. void crypto_blake2b_general(u8 *hash , size_t hash_size,
  593. const u8 *key , size_t key_size,
  594. const u8 *message, size_t message_size)
  595. {
  596. crypto_blake2b_ctx ctx;
  597. crypto_blake2b_general_init(&ctx, hash_size, key, key_size);
  598. crypto_blake2b_update(&ctx, message, message_size);
  599. crypto_blake2b_final(&ctx, hash);
  600. }
  601. void crypto_blake2b(u8 hash[64], const u8 *message, size_t message_size)
  602. {
  603. crypto_blake2b_general(hash, 64, 0, 0, message, message_size);
  604. }
  605. static void blake2b_vtable_init(void *ctx) {
  606. crypto_blake2b_init(&((crypto_sign_ctx*)ctx)->hash);
  607. }
  608. static void blake2b_vtable_update(void *ctx, const u8 *m, size_t s) {
  609. crypto_blake2b_update(&((crypto_sign_ctx*)ctx)->hash, m, s);
  610. }
  611. static void blake2b_vtable_final(void *ctx, u8 *h) {
  612. crypto_blake2b_final(&((crypto_sign_ctx*)ctx)->hash, h);
  613. }
  614. const crypto_sign_vtable crypto_blake2b_vtable = {
  615. crypto_blake2b,
  616. blake2b_vtable_init,
  617. blake2b_vtable_update,
  618. blake2b_vtable_final,
  619. sizeof(crypto_sign_ctx),
  620. };
  621. #if MONOCYPHER_ARGON2_ENABLE
  622. ////////////////
  623. /// Argon2 i ///
  624. ////////////////
  625. // references to R, Z, Q etc. come from the spec
  626. // Argon2 operates on 1024 byte blocks.
  627. typedef struct { u64 a[128]; } block;
  628. static void wipe_block(block *b)
  629. {
  630. volatile u64* a = b->a;
  631. ZERO(a, 128);
  632. }
  633. // updates a Blake2 hash with a 32 bit word, little endian.
  634. static void blake_update_32(crypto_blake2b_ctx *ctx, u32 input)
  635. {
  636. u8 buf[4];
  637. store32_le(buf, input);
  638. crypto_blake2b_update(ctx, buf, 4);
  639. WIPE_BUFFER(buf);
  640. }
  641. static void load_block(block *b, const u8 bytes[1024])
  642. {
  643. load64_le_buf(b->a, bytes, 128);
  644. }
  645. static void store_block(u8 bytes[1024], const block *b)
  646. {
  647. store64_le_buf(bytes, b->a, 128);
  648. }
  649. static void copy_block(block *o,const block*in){FOR(i,0,128)o->a[i] = in->a[i];}
  650. static void xor_block(block *o,const block*in){FOR(i,0,128)o->a[i]^= in->a[i];}
  651. // Hash with a virtually unlimited digest size.
  652. // Doesn't extract more entropy than the base hash function.
  653. // Mainly used for filling a whole kilobyte block with pseudo-random bytes.
  654. // (One could use a stream cipher with a seed hash as the key, but
  655. // this would introduce another dependency —and point of failure.)
  656. static void extended_hash(u8 *digest, u32 digest_size,
  657. const u8 *input , u32 input_size)
  658. {
  659. crypto_blake2b_ctx ctx;
  660. crypto_blake2b_general_init(&ctx, MIN(digest_size, 64), 0, 0);
  661. blake_update_32 (&ctx, digest_size);
  662. crypto_blake2b_update (&ctx, input, input_size);
  663. crypto_blake2b_final (&ctx, digest);
  664. if (digest_size > 64) {
  665. // the conversion to u64 avoids integer overflow on
  666. // ludicrously big hash sizes.
  667. u32 r = (u32)(((u64)digest_size + 31) >> 5) - 2;
  668. u32 i = 1;
  669. u32 in = 0;
  670. u32 out = 32;
  671. while (i < r) {
  672. // Input and output overlap. This is intentional
  673. crypto_blake2b(digest + out, digest + in, 64);
  674. i += 1;
  675. in += 32;
  676. out += 32;
  677. }
  678. crypto_blake2b_general(digest + out, digest_size - (32 * r),
  679. 0, 0, // no key
  680. digest + in , 64);
  681. }
  682. }
  683. #define LSB(x) ((x) & 0xffffffff)
  684. #define G(a, b, c, d) \
  685. a += b + 2 * LSB(a) * LSB(b); d ^= a; d = rotr64(d, 32); \
  686. c += d + 2 * LSB(c) * LSB(d); b ^= c; b = rotr64(b, 24); \
  687. a += b + 2 * LSB(a) * LSB(b); d ^= a; d = rotr64(d, 16); \
  688. c += d + 2 * LSB(c) * LSB(d); b ^= c; b = rotr64(b, 63)
  689. #define ROUND(v0, v1, v2, v3, v4, v5, v6, v7, \
  690. v8, v9, v10, v11, v12, v13, v14, v15) \
  691. G(v0, v4, v8, v12); G(v1, v5, v9, v13); \
  692. G(v2, v6, v10, v14); G(v3, v7, v11, v15); \
  693. G(v0, v5, v10, v15); G(v1, v6, v11, v12); \
  694. G(v2, v7, v8, v13); G(v3, v4, v9, v14)
  695. // Core of the compression function G. Computes Z from R in place.
  696. static void g_rounds(block *work_block)
  697. {
  698. // column rounds (work_block = Q)
  699. for (int i = 0; i < 128; i += 16) {
  700. ROUND(work_block->a[i ], work_block->a[i + 1],
  701. work_block->a[i + 2], work_block->a[i + 3],
  702. work_block->a[i + 4], work_block->a[i + 5],
  703. work_block->a[i + 6], work_block->a[i + 7],
  704. work_block->a[i + 8], work_block->a[i + 9],
  705. work_block->a[i + 10], work_block->a[i + 11],
  706. work_block->a[i + 12], work_block->a[i + 13],
  707. work_block->a[i + 14], work_block->a[i + 15]);
  708. }
  709. // row rounds (work_block = Z)
  710. for (int i = 0; i < 16; i += 2) {
  711. ROUND(work_block->a[i ], work_block->a[i + 1],
  712. work_block->a[i + 16], work_block->a[i + 17],
  713. work_block->a[i + 32], work_block->a[i + 33],
  714. work_block->a[i + 48], work_block->a[i + 49],
  715. work_block->a[i + 64], work_block->a[i + 65],
  716. work_block->a[i + 80], work_block->a[i + 81],
  717. work_block->a[i + 96], work_block->a[i + 97],
  718. work_block->a[i + 112], work_block->a[i + 113]);
  719. }
  720. }
  721. // The compression function G (copy version for the first pass)
  722. static void g_copy(block *result, const block *x, const block *y, block* tmp)
  723. {
  724. copy_block(tmp , x ); // tmp = X
  725. xor_block (tmp , y ); // tmp = X ^ Y = R
  726. copy_block(result, tmp); // result = R (only difference with g_xor)
  727. g_rounds (tmp); // tmp = Z
  728. xor_block (result, tmp); // result = R ^ Z
  729. }
  730. // The compression function G (xor version for subsequent passes)
  731. static void g_xor(block *result, const block *x, const block *y, block *tmp)
  732. {
  733. copy_block(tmp , x ); // tmp = X
  734. xor_block (tmp , y ); // tmp = X ^ Y = R
  735. xor_block (result, tmp); // result = R ^ old (only difference with g_copy)
  736. g_rounds (tmp); // tmp = Z
  737. xor_block (result, tmp); // result = R ^ old ^ Z
  738. }
  739. // Unary version of the compression function.
  740. // The missing argument is implied zero.
  741. // Does the transformation in place.
  742. static void unary_g(block *work_block, block *tmp)
  743. {
  744. // work_block == R
  745. copy_block(tmp, work_block); // tmp = R
  746. g_rounds (work_block); // work_block = Z
  747. xor_block (work_block, tmp); // work_block = Z ^ R
  748. }
  749. // Argon2i uses a kind of stream cipher to determine which reference
  750. // block it will take to synthesise the next block. This context hold
  751. // that stream's state. (It's very similar to Chacha20. The block b
  752. // is analogous to Chacha's own pool)
  753. typedef struct {
  754. block b;
  755. u32 pass_number;
  756. u32 slice_number;
  757. u32 nb_blocks;
  758. u32 nb_iterations;
  759. u32 ctr;
  760. u32 offset;
  761. } gidx_ctx;
  762. // The block in the context will determine array indices. To avoid
  763. // timing attacks, it only depends on public information. No looking
  764. // at a previous block to seed the next. This makes offline attacks
  765. // easier, but timing attacks are the bigger threat in many settings.
  766. static void gidx_refresh(gidx_ctx *ctx)
  767. {
  768. // seed the beginning of the block...
  769. ctx->b.a[0] = ctx->pass_number;
  770. ctx->b.a[1] = 0; // lane number (we have only one)
  771. ctx->b.a[2] = ctx->slice_number;
  772. ctx->b.a[3] = ctx->nb_blocks;
  773. ctx->b.a[4] = ctx->nb_iterations;
  774. ctx->b.a[5] = 1; // type: Argon2i
  775. ctx->b.a[6] = ctx->ctr;
  776. ZERO(ctx->b.a + 7, 121); // ...then zero the rest out
  777. // Shuffle the block thus: ctx->b = G((G(ctx->b, zero)), zero)
  778. // (G "square" function), to get cheap pseudo-random numbers.
  779. block tmp;
  780. unary_g(&ctx->b, &tmp);
  781. unary_g(&ctx->b, &tmp);
  782. wipe_block(&tmp);
  783. }
  784. static void gidx_init(gidx_ctx *ctx,
  785. u32 pass_number, u32 slice_number,
  786. u32 nb_blocks, u32 nb_iterations)
  787. {
  788. ctx->pass_number = pass_number;
  789. ctx->slice_number = slice_number;
  790. ctx->nb_blocks = nb_blocks;
  791. ctx->nb_iterations = nb_iterations;
  792. ctx->ctr = 0;
  793. // Offset from the beginning of the segment. For the first slice
  794. // of the first pass, we start at the *third* block, so the offset
  795. // starts at 2, not 0.
  796. if (pass_number != 0 || slice_number != 0) {
  797. ctx->offset = 0;
  798. } else {
  799. ctx->offset = 2;
  800. ctx->ctr++; // Compensates for missed lazy creation
  801. gidx_refresh(ctx); // at the start of gidx_next()
  802. }
  803. }
  804. static u32 gidx_next(gidx_ctx *ctx)
  805. {
  806. // lazily creates the offset block we need
  807. if ((ctx->offset & 127) == 0) {
  808. ctx->ctr++;
  809. gidx_refresh(ctx);
  810. }
  811. u32 index = ctx->offset & 127; // save index for current call
  812. u32 offset = ctx->offset; // save offset for current call
  813. ctx->offset++; // update offset for next call
  814. // Computes the area size.
  815. // Pass 0 : all already finished segments plus already constructed
  816. // blocks in this segment
  817. // Pass 1+: 3 last segments plus already constructed
  818. // blocks in this segment. THE SPEC SUGGESTS OTHERWISE.
  819. // I CONFORM TO THE REFERENCE IMPLEMENTATION.
  820. int first_pass = ctx->pass_number == 0;
  821. u32 slice_size = ctx->nb_blocks >> 2;
  822. u32 nb_segments = first_pass ? ctx->slice_number : 3;
  823. u32 area_size = nb_segments * slice_size + offset - 1;
  824. // Computes the starting position of the reference area.
  825. // CONTRARY TO WHAT THE SPEC SUGGESTS, IT STARTS AT THE
  826. // NEXT SEGMENT, NOT THE NEXT BLOCK.
  827. u32 next_slice = ((ctx->slice_number + 1) & 3) * slice_size;
  828. u32 start_pos = first_pass ? 0 : next_slice;
  829. // Generate offset from J1 (no need for J2, there's only one lane)
  830. u64 j1 = ctx->b.a[index] & 0xffffffff; // pseudo-random number
  831. u64 x = (j1 * j1) >> 32;
  832. u64 y = (area_size * x) >> 32;
  833. u64 z = (area_size - 1) - y;
  834. u64 ref = start_pos + z; // ref < 2 * nb_blocks
  835. return (u32)(ref < ctx->nb_blocks ? ref : ref - ctx->nb_blocks);
  836. }
  837. // Main algorithm
  838. void crypto_argon2i_general(u8 *hash, u32 hash_size,
  839. void *work_area, u32 nb_blocks,
  840. u32 nb_iterations,
  841. const u8 *password, u32 password_size,
  842. const u8 *salt, u32 salt_size,
  843. const u8 *key, u32 key_size,
  844. const u8 *ad, u32 ad_size)
  845. {
  846. // work area seen as blocks (must be suitably aligned)
  847. block *blocks = (block*)work_area;
  848. {
  849. crypto_blake2b_ctx ctx;
  850. crypto_blake2b_init(&ctx);
  851. blake_update_32 (&ctx, 1 ); // p: number of threads
  852. blake_update_32 (&ctx, hash_size );
  853. blake_update_32 (&ctx, nb_blocks );
  854. blake_update_32 (&ctx, nb_iterations);
  855. blake_update_32 (&ctx, 0x13 ); // v: version number
  856. blake_update_32 (&ctx, 1 ); // y: Argon2i
  857. blake_update_32 (&ctx, password_size);
  858. crypto_blake2b_update(&ctx, password, password_size);
  859. blake_update_32 (&ctx, salt_size);
  860. crypto_blake2b_update(&ctx, salt, salt_size);
  861. blake_update_32 (&ctx, key_size);
  862. crypto_blake2b_update(&ctx, key, key_size);
  863. blake_update_32 (&ctx, ad_size);
  864. crypto_blake2b_update(&ctx, ad, ad_size);
  865. u8 initial_hash[72]; // 64 bytes plus 2 words for future hashes
  866. crypto_blake2b_final(&ctx, initial_hash);
  867. // fill first 2 blocks
  868. block tmp_block;
  869. u8 hash_area[1024];
  870. store32_le(initial_hash + 64, 0); // first additional word
  871. store32_le(initial_hash + 68, 0); // second additional word
  872. extended_hash(hash_area, 1024, initial_hash, 72);
  873. load_block(&tmp_block, hash_area);
  874. copy_block(blocks, &tmp_block);
  875. store32_le(initial_hash + 64, 1); // slight modification
  876. extended_hash(hash_area, 1024, initial_hash, 72);
  877. load_block(&tmp_block, hash_area);
  878. copy_block(blocks + 1, &tmp_block);
  879. WIPE_BUFFER(initial_hash);
  880. WIPE_BUFFER(hash_area);
  881. wipe_block(&tmp_block);
  882. }
  883. // Actual number of blocks
  884. nb_blocks -= nb_blocks & 3; // round down to 4 p (p == 1 thread)
  885. const u32 segment_size = nb_blocks >> 2;
  886. // fill (then re-fill) the rest of the blocks
  887. block tmp;
  888. gidx_ctx ctx; // public information, no need to wipe
  889. FOR_T (u32, pass_number, 0, nb_iterations) {
  890. int first_pass = pass_number == 0;
  891. FOR_T (u32, segment, 0, 4) {
  892. gidx_init(&ctx, pass_number, segment, nb_blocks, nb_iterations);
  893. // On the first segment of the first pass,
  894. // blocks 0 and 1 are already filled.
  895. // We use the offset to skip them.
  896. u32 start_offset = first_pass && segment == 0 ? 2 : 0;
  897. u32 segment_start = segment * segment_size + start_offset;
  898. u32 segment_end = (segment + 1) * segment_size;
  899. FOR_T (u32, current_block, segment_start, segment_end) {
  900. u32 reference_block = gidx_next(&ctx);
  901. u32 previous_block = current_block == 0
  902. ? nb_blocks - 1
  903. : current_block - 1;
  904. block *c = blocks + current_block;
  905. block *p = blocks + previous_block;
  906. block *r = blocks + reference_block;
  907. if (first_pass) { g_copy(c, p, r, &tmp); }
  908. else { g_xor (c, p, r, &tmp); }
  909. }
  910. }
  911. }
  912. wipe_block(&tmp);
  913. u8 final_block[1024];
  914. store_block(final_block, blocks + (nb_blocks - 1));
  915. // wipe work area
  916. volatile u64 *p = (u64*)work_area;
  917. ZERO(p, 128 * nb_blocks);
  918. // hash the very last block with H' into the output hash
  919. extended_hash(hash, hash_size, final_block, 1024);
  920. WIPE_BUFFER(final_block);
  921. }
  922. void crypto_argon2i(u8 *hash, u32 hash_size,
  923. void *work_area, u32 nb_blocks, u32 nb_iterations,
  924. const u8 *password, u32 password_size,
  925. const u8 *salt, u32 salt_size)
  926. {
  927. crypto_argon2i_general(hash, hash_size, work_area, nb_blocks, nb_iterations,
  928. password, password_size, salt , salt_size, 0,0,0,0);
  929. }
  930. #endif // MONOCYPHER_ARGON2_ENABLE
  931. ////////////////////////////////////
  932. /// Arithmetic modulo 2^255 - 19 ///
  933. ////////////////////////////////////
  934. // Originally taken from SUPERCOP's ref10 implementation.
  935. // A bit bigger than TweetNaCl, over 4 times faster.
  936. // field element
  937. typedef i32 fe[10];
  938. // field constants
  939. //
  940. // fe_one : 1
  941. // sqrtm1 : sqrt(-1)
  942. // d : -121665 / 121666
  943. // D2 : 2 * -121665 / 121666
  944. // lop_x, lop_y: low order point in Edwards coordinates
  945. // ufactor : -sqrt(-1) * 2
  946. // A2 : 486662^2 (A squared)
  947. static const fe fe_one = {1};
  948. static const fe sqrtm1 = {-32595792, -7943725, 9377950, 3500415, 12389472,
  949. -272473, -25146209, -2005654, 326686, 11406482,};
  950. static const fe d = {-10913610, 13857413, -15372611, 6949391, 114729,
  951. -8787816, -6275908, -3247719, -18696448, -12055116,};
  952. static const fe D2 = {-21827239, -5839606, -30745221, 13898782, 229458,
  953. 15978800, -12551817, -6495438, 29715968, 9444199,};
  954. static const fe lop_x = {21352778, 5345713, 4660180, -8347857, 24143090,
  955. 14568123, 30185756, -12247770, -33528939, 8345319,};
  956. static const fe lop_y = {-6952922, -1265500, 6862341, -7057498, -4037696,
  957. -5447722, 31680899, -15325402, -19365852, 1569102,};
  958. static const fe ufactor = {-1917299, 15887451, -18755900, -7000830, -24778944,
  959. 544946, -16816446, 4011309, -653372, 10741468,};
  960. static const fe A2 = {12721188, 3529, 0, 0, 0, 0, 0, 0, 0, 0,};
  961. static void fe_0(fe h) { ZERO(h , 10); }
  962. static void fe_1(fe h) { h[0] = 1; ZERO(h+1, 9); }
  963. static void fe_copy(fe h,const fe f ){FOR(i,0,10) h[i] = f[i]; }
  964. static void fe_neg (fe h,const fe f ){FOR(i,0,10) h[i] = -f[i]; }
  965. static void fe_add (fe h,const fe f,const fe g){FOR(i,0,10) h[i] = f[i] + g[i];}
  966. static void fe_sub (fe h,const fe f,const fe g){FOR(i,0,10) h[i] = f[i] - g[i];}
  967. static void fe_cswap(fe f, fe g, int b)
  968. {
  969. i32 mask = -b; // -1 = 0xffffffff
  970. FOR (i, 0, 10) {
  971. i32 x = (f[i] ^ g[i]) & mask;
  972. f[i] = f[i] ^ x;
  973. g[i] = g[i] ^ x;
  974. }
  975. }
  976. static void fe_ccopy(fe f, const fe g, int b)
  977. {
  978. i32 mask = -b; // -1 = 0xffffffff
  979. FOR (i, 0, 10) {
  980. i32 x = (f[i] ^ g[i]) & mask;
  981. f[i] = f[i] ^ x;
  982. }
  983. }
  984. // Signed carry propagation
  985. // ------------------------
  986. //
  987. // Let t be a number. It can be uniquely decomposed thus:
  988. //
  989. // t = h*2^26 + l
  990. // such that -2^25 <= l < 2^25
  991. //
  992. // Let c = (t + 2^25) / 2^26 (rounded down)
  993. // c = (h*2^26 + l + 2^25) / 2^26 (rounded down)
  994. // c = h + (l + 2^25) / 2^26 (rounded down)
  995. // c = h (exactly)
  996. // Because 0 <= l + 2^25 < 2^26
  997. //
  998. // Let u = t - c*2^26
  999. // u = h*2^26 + l - h*2^26
  1000. // u = l
  1001. // Therefore, -2^25 <= u < 2^25
  1002. //
  1003. // Additionally, if |t| < x, then |h| < x/2^26 (rounded down)
  1004. //
  1005. // Notations:
  1006. // - In C, 1<<25 means 2^25.
  1007. // - In C, x>>25 means floor(x / (2^25)).
  1008. // - All of the above applies with 25 & 24 as well as 26 & 25.
  1009. //
  1010. //
  1011. // Note on negative right shifts
  1012. // -----------------------------
  1013. //
  1014. // In C, x >> n, where x is a negative integer, is implementation
  1015. // defined. In practice, all platforms do arithmetic shift, which is
  1016. // equivalent to division by 2^26, rounded down. Some compilers, like
  1017. // GCC, even guarantee it.
  1018. //
  1019. // If we ever stumble upon a platform that does not propagate the sign
  1020. // bit (we won't), visible failures will show at the slightest test, and
  1021. // the signed shifts can be replaced by the following:
  1022. //
  1023. // typedef struct { i64 x:39; } s25;
  1024. // typedef struct { i64 x:38; } s26;
  1025. // i64 shift25(i64 x) { s25 s; s.x = ((u64)x)>>25; return s.x; }
  1026. // i64 shift26(i64 x) { s26 s; s.x = ((u64)x)>>26; return s.x; }
  1027. //
  1028. // Current compilers cannot optimise this, causing a 30% drop in
  1029. // performance. Fairly expensive for something that never happens.
  1030. //
  1031. //
  1032. // Precondition
  1033. // ------------
  1034. //
  1035. // |t0| < 2^63
  1036. // |t1|..|t9| < 2^62
  1037. //
  1038. // Algorithm
  1039. // ---------
  1040. // c = t0 + 2^25 / 2^26 -- |c| <= 2^36
  1041. // t0 -= c * 2^26 -- |t0| <= 2^25
  1042. // t1 += c -- |t1| <= 2^63
  1043. //
  1044. // c = t4 + 2^25 / 2^26 -- |c| <= 2^36
  1045. // t4 -= c * 2^26 -- |t4| <= 2^25
  1046. // t5 += c -- |t5| <= 2^63
  1047. //
  1048. // c = t1 + 2^24 / 2^25 -- |c| <= 2^38
  1049. // t1 -= c * 2^25 -- |t1| <= 2^24
  1050. // t2 += c -- |t2| <= 2^63
  1051. //
  1052. // c = t5 + 2^24 / 2^25 -- |c| <= 2^38
  1053. // t5 -= c * 2^25 -- |t5| <= 2^24
  1054. // t6 += c -- |t6| <= 2^63
  1055. //
  1056. // c = t2 + 2^25 / 2^26 -- |c| <= 2^37
  1057. // t2 -= c * 2^26 -- |t2| <= 2^25 < 1.1 * 2^25 (final t2)
  1058. // t3 += c -- |t3| <= 2^63
  1059. //
  1060. // c = t6 + 2^25 / 2^26 -- |c| <= 2^37
  1061. // t6 -= c * 2^26 -- |t6| <= 2^25 < 1.1 * 2^25 (final t6)
  1062. // t7 += c -- |t7| <= 2^63
  1063. //
  1064. // c = t3 + 2^24 / 2^25 -- |c| <= 2^38
  1065. // t3 -= c * 2^25 -- |t3| <= 2^24 < 1.1 * 2^24 (final t3)
  1066. // t4 += c -- |t4| <= 2^25 + 2^38 < 2^39
  1067. //
  1068. // c = t7 + 2^24 / 2^25 -- |c| <= 2^38
  1069. // t7 -= c * 2^25 -- |t7| <= 2^24 < 1.1 * 2^24 (final t7)
  1070. // t8 += c -- |t8| <= 2^63
  1071. //
  1072. // c = t4 + 2^25 / 2^26 -- |c| <= 2^13
  1073. // t4 -= c * 2^26 -- |t4| <= 2^25 < 1.1 * 2^25 (final t4)
  1074. // t5 += c -- |t5| <= 2^24 + 2^13 < 1.1 * 2^24 (final t5)
  1075. //
  1076. // c = t8 + 2^25 / 2^26 -- |c| <= 2^37
  1077. // t8 -= c * 2^26 -- |t8| <= 2^25 < 1.1 * 2^25 (final t8)
  1078. // t9 += c -- |t9| <= 2^63
  1079. //
  1080. // c = t9 + 2^24 / 2^25 -- |c| <= 2^38
  1081. // t9 -= c * 2^25 -- |t9| <= 2^24 < 1.1 * 2^24 (final t9)
  1082. // t0 += c * 19 -- |t0| <= 2^25 + 2^38*19 < 2^44
  1083. //
  1084. // c = t0 + 2^25 / 2^26 -- |c| <= 2^18
  1085. // t0 -= c * 2^26 -- |t0| <= 2^25 < 1.1 * 2^25 (final t0)
  1086. // t1 += c -- |t1| <= 2^24 + 2^18 < 1.1 * 2^24 (final t1)
  1087. //
  1088. // Postcondition
  1089. // -------------
  1090. // |t0|, |t2|, |t4|, |t6|, |t8| < 1.1 * 2^25
  1091. // |t1|, |t3|, |t5|, |t7|, |t9| < 1.1 * 2^24
  1092. #define FE_CARRY \
  1093. i64 c; \
  1094. c = (t0 + ((i64)1<<25)) >> 26; t0 -= c * ((i64)1 << 26); t1 += c; \
  1095. c = (t4 + ((i64)1<<25)) >> 26; t4 -= c * ((i64)1 << 26); t5 += c; \
  1096. c = (t1 + ((i64)1<<24)) >> 25; t1 -= c * ((i64)1 << 25); t2 += c; \
  1097. c = (t5 + ((i64)1<<24)) >> 25; t5 -= c * ((i64)1 << 25); t6 += c; \
  1098. c = (t2 + ((i64)1<<25)) >> 26; t2 -= c * ((i64)1 << 26); t3 += c; \
  1099. c = (t6 + ((i64)1<<25)) >> 26; t6 -= c * ((i64)1 << 26); t7 += c; \
  1100. c = (t3 + ((i64)1<<24)) >> 25; t3 -= c * ((i64)1 << 25); t4 += c; \
  1101. c = (t7 + ((i64)1<<24)) >> 25; t7 -= c * ((i64)1 << 25); t8 += c; \
  1102. c = (t4 + ((i64)1<<25)) >> 26; t4 -= c * ((i64)1 << 26); t5 += c; \
  1103. c = (t8 + ((i64)1<<25)) >> 26; t8 -= c * ((i64)1 << 26); t9 += c; \
  1104. c = (t9 + ((i64)1<<24)) >> 25; t9 -= c * ((i64)1 << 25); t0 += c * 19; \
  1105. c = (t0 + ((i64)1<<25)) >> 26; t0 -= c * ((i64)1 << 26); t1 += c; \
  1106. h[0]=(i32)t0; h[1]=(i32)t1; h[2]=(i32)t2; h[3]=(i32)t3; h[4]=(i32)t4; \
  1107. h[5]=(i32)t5; h[6]=(i32)t6; h[7]=(i32)t7; h[8]=(i32)t8; h[9]=(i32)t9
  1108. static void fe_frombytes(fe h, const u8 s[32])
  1109. {
  1110. i64 t0 = load32_le(s); // t0 < 2^32
  1111. i64 t1 = load24_le(s + 4) << 6; // t1 < 2^30
  1112. i64 t2 = load24_le(s + 7) << 5; // t2 < 2^29
  1113. i64 t3 = load24_le(s + 10) << 3; // t3 < 2^27
  1114. i64 t4 = load24_le(s + 13) << 2; // t4 < 2^26
  1115. i64 t5 = load32_le(s + 16); // t5 < 2^32
  1116. i64 t6 = load24_le(s + 20) << 7; // t6 < 2^31
  1117. i64 t7 = load24_le(s + 23) << 5; // t7 < 2^29
  1118. i64 t8 = load24_le(s + 26) << 4; // t8 < 2^28
  1119. i64 t9 = (load24_le(s + 29) & 0x7fffff) << 2; // t9 < 2^25
  1120. FE_CARRY; // Carry recondition OK
  1121. }
  1122. // Precondition
  1123. // |h[0]|, |h[2]|, |h[4]|, |h[6]|, |h[8]| < 1.1 * 2^25
  1124. // |h[1]|, |h[3]|, |h[5]|, |h[7]|, |h[9]| < 1.1 * 2^24
  1125. //
  1126. // Therefore, |h| < 2^255-19
  1127. // There are two possibilities:
  1128. //
  1129. // - If h is positive, all we need to do is reduce its individual
  1130. // limbs down to their tight positive range.
  1131. // - If h is negative, we also need to add 2^255-19 to it.
  1132. // Or just remove 19 and chop off any excess bit.
  1133. static void fe_tobytes(u8 s[32], const fe h)
  1134. {
  1135. i32 t[10];
  1136. COPY(t, h, 10);
  1137. i32 q = (19 * t[9] + (((i32) 1) << 24)) >> 25;
  1138. // |t9| < 1.1 * 2^24
  1139. // -1.1 * 2^24 < t9 < 1.1 * 2^24
  1140. // -21 * 2^24 < 19 * t9 < 21 * 2^24
  1141. // -2^29 < 19 * t9 + 2^24 < 2^29
  1142. // -2^29 / 2^25 < (19 * t9 + 2^24) / 2^25 < 2^29 / 2^25
  1143. // -16 < (19 * t9 + 2^24) / 2^25 < 16
  1144. FOR (i, 0, 5) {
  1145. q += t[2*i ]; q >>= 26; // q = 0 or -1
  1146. q += t[2*i+1]; q >>= 25; // q = 0 or -1
  1147. }
  1148. // q = 0 iff h >= 0
  1149. // q = -1 iff h < 0
  1150. // Adding q * 19 to h reduces h to its proper range.
  1151. q *= 19; // Shift carry back to the beginning
  1152. FOR (i, 0, 5) {
  1153. t[i*2 ] += q; q = t[i*2 ] >> 26; t[i*2 ] -= q * ((i32)1 << 26);
  1154. t[i*2+1] += q; q = t[i*2+1] >> 25; t[i*2+1] -= q * ((i32)1 << 25);
  1155. }
  1156. // h is now fully reduced, and q represents the excess bit.
  1157. store32_le(s + 0, ((u32)t[0] >> 0) | ((u32)t[1] << 26));
  1158. store32_le(s + 4, ((u32)t[1] >> 6) | ((u32)t[2] << 19));
  1159. store32_le(s + 8, ((u32)t[2] >> 13) | ((u32)t[3] << 13));
  1160. store32_le(s + 12, ((u32)t[3] >> 19) | ((u32)t[4] << 6));
  1161. store32_le(s + 16, ((u32)t[5] >> 0) | ((u32)t[6] << 25));
  1162. store32_le(s + 20, ((u32)t[6] >> 7) | ((u32)t[7] << 19));
  1163. store32_le(s + 24, ((u32)t[7] >> 13) | ((u32)t[8] << 12));
  1164. store32_le(s + 28, ((u32)t[8] >> 20) | ((u32)t[9] << 6));
  1165. WIPE_BUFFER(t);
  1166. }
  1167. // Precondition
  1168. // -------------
  1169. // |f0|, |f2|, |f4|, |f6|, |f8| < 1.65 * 2^26
  1170. // |f1|, |f3|, |f5|, |f7|, |f9| < 1.65 * 2^25
  1171. //
  1172. // |g0|, |g2|, |g4|, |g6|, |g8| < 1.65 * 2^26
  1173. // |g1|, |g3|, |g5|, |g7|, |g9| < 1.65 * 2^25
  1174. static void fe_mul_small(fe h, const fe f, i32 g)
  1175. {
  1176. i64 t0 = f[0] * (i64) g; i64 t1 = f[1] * (i64) g;
  1177. i64 t2 = f[2] * (i64) g; i64 t3 = f[3] * (i64) g;
  1178. i64 t4 = f[4] * (i64) g; i64 t5 = f[5] * (i64) g;
  1179. i64 t6 = f[6] * (i64) g; i64 t7 = f[7] * (i64) g;
  1180. i64 t8 = f[8] * (i64) g; i64 t9 = f[9] * (i64) g;
  1181. // |t0|, |t2|, |t4|, |t6|, |t8| < 1.65 * 2^26 * 2^31 < 2^58
  1182. // |t1|, |t3|, |t5|, |t7|, |t9| < 1.65 * 2^25 * 2^31 < 2^57
  1183. FE_CARRY; // Carry precondition OK
  1184. }
  1185. // Precondition
  1186. // -------------
  1187. // |f0|, |f2|, |f4|, |f6|, |f8| < 1.65 * 2^26
  1188. // |f1|, |f3|, |f5|, |f7|, |f9| < 1.65 * 2^25
  1189. //
  1190. // |g0|, |g2|, |g4|, |g6|, |g8| < 1.65 * 2^26
  1191. // |g1|, |g3|, |g5|, |g7|, |g9| < 1.65 * 2^25
  1192. static void fe_mul(fe h, const fe f, const fe g)
  1193. {
  1194. // Everything is unrolled and put in temporary variables.
  1195. // We could roll the loop, but that would make curve25519 twice as slow.
  1196. i32 f0 = f[0]; i32 f1 = f[1]; i32 f2 = f[2]; i32 f3 = f[3]; i32 f4 = f[4];
  1197. i32 f5 = f[5]; i32 f6 = f[6]; i32 f7 = f[7]; i32 f8 = f[8]; i32 f9 = f[9];
  1198. i32 g0 = g[0]; i32 g1 = g[1]; i32 g2 = g[2]; i32 g3 = g[3]; i32 g4 = g[4];
  1199. i32 g5 = g[5]; i32 g6 = g[6]; i32 g7 = g[7]; i32 g8 = g[8]; i32 g9 = g[9];
  1200. i32 F1 = f1*2; i32 F3 = f3*2; i32 F5 = f5*2; i32 F7 = f7*2; i32 F9 = f9*2;
  1201. i32 G1 = g1*19; i32 G2 = g2*19; i32 G3 = g3*19;
  1202. i32 G4 = g4*19; i32 G5 = g5*19; i32 G6 = g6*19;
  1203. i32 G7 = g7*19; i32 G8 = g8*19; i32 G9 = g9*19;
  1204. // |F1|, |F3|, |F5|, |F7|, |F9| < 1.65 * 2^26
  1205. // |G0|, |G2|, |G4|, |G6|, |G8| < 2^31
  1206. // |G1|, |G3|, |G5|, |G7|, |G9| < 2^30
  1207. i64 t0 = f0*(i64)g0 + F1*(i64)G9 + f2*(i64)G8 + F3*(i64)G7 + f4*(i64)G6
  1208. + F5*(i64)G5 + f6*(i64)G4 + F7*(i64)G3 + f8*(i64)G2 + F9*(i64)G1;
  1209. i64 t1 = f0*(i64)g1 + f1*(i64)g0 + f2*(i64)G9 + f3*(i64)G8 + f4*(i64)G7
  1210. + f5*(i64)G6 + f6*(i64)G5 + f7*(i64)G4 + f8*(i64)G3 + f9*(i64)G2;
  1211. i64 t2 = f0*(i64)g2 + F1*(i64)g1 + f2*(i64)g0 + F3*(i64)G9 + f4*(i64)G8
  1212. + F5*(i64)G7 + f6*(i64)G6 + F7*(i64)G5 + f8*(i64)G4 + F9*(i64)G3;
  1213. i64 t3 = f0*(i64)g3 + f1*(i64)g2 + f2*(i64)g1 + f3*(i64)g0 + f4*(i64)G9
  1214. + f5*(i64)G8 + f6*(i64)G7 + f7*(i64)G6 + f8*(i64)G5 + f9*(i64)G4;
  1215. i64 t4 = f0*(i64)g4 + F1*(i64)g3 + f2*(i64)g2 + F3*(i64)g1 + f4*(i64)g0
  1216. + F5*(i64)G9 + f6*(i64)G8 + F7*(i64)G7 + f8*(i64)G6 + F9*(i64)G5;
  1217. i64 t5 = f0*(i64)g5 + f1*(i64)g4 + f2*(i64)g3 + f3*(i64)g2 + f4*(i64)g1
  1218. + f5*(i64)g0 + f6*(i64)G9 + f7*(i64)G8 + f8*(i64)G7 + f9*(i64)G6;
  1219. i64 t6 = f0*(i64)g6 + F1*(i64)g5 + f2*(i64)g4 + F3*(i64)g3 + f4*(i64)g2
  1220. + F5*(i64)g1 + f6*(i64)g0 + F7*(i64)G9 + f8*(i64)G8 + F9*(i64)G7;
  1221. i64 t7 = f0*(i64)g7 + f1*(i64)g6 + f2*(i64)g5 + f3*(i64)g4 + f4*(i64)g3
  1222. + f5*(i64)g2 + f6*(i64)g1 + f7*(i64)g0 + f8*(i64)G9 + f9*(i64)G8;
  1223. i64 t8 = f0*(i64)g8 + F1*(i64)g7 + f2*(i64)g6 + F3*(i64)g5 + f4*(i64)g4
  1224. + F5*(i64)g3 + f6*(i64)g2 + F7*(i64)g1 + f8*(i64)g0 + F9*(i64)G9;
  1225. i64 t9 = f0*(i64)g9 + f1*(i64)g8 + f2*(i64)g7 + f3*(i64)g6 + f4*(i64)g5
  1226. + f5*(i64)g4 + f6*(i64)g3 + f7*(i64)g2 + f8*(i64)g1 + f9*(i64)g0;
  1227. // t0 < 0.67 * 2^61
  1228. // t1 < 0.41 * 2^61
  1229. // t2 < 0.52 * 2^61
  1230. // t3 < 0.32 * 2^61
  1231. // t4 < 0.38 * 2^61
  1232. // t5 < 0.22 * 2^61
  1233. // t6 < 0.23 * 2^61
  1234. // t7 < 0.13 * 2^61
  1235. // t8 < 0.09 * 2^61
  1236. // t9 < 0.03 * 2^61
  1237. FE_CARRY; // Everything below 2^62, Carry precondition OK
  1238. }
  1239. // Precondition
  1240. // -------------
  1241. // |f0|, |f2|, |f4|, |f6|, |f8| < 1.65 * 2^26
  1242. // |f1|, |f3|, |f5|, |f7|, |f9| < 1.65 * 2^25
  1243. //
  1244. // Note: we could use fe_mul() for this, but this is significantly faster
  1245. static void fe_sq(fe h, const fe f)
  1246. {
  1247. i32 f0 = f[0]; i32 f1 = f[1]; i32 f2 = f[2]; i32 f3 = f[3]; i32 f4 = f[4];
  1248. i32 f5 = f[5]; i32 f6 = f[6]; i32 f7 = f[7]; i32 f8 = f[8]; i32 f9 = f[9];
  1249. i32 f0_2 = f0*2; i32 f1_2 = f1*2; i32 f2_2 = f2*2; i32 f3_2 = f3*2;
  1250. i32 f4_2 = f4*2; i32 f5_2 = f5*2; i32 f6_2 = f6*2; i32 f7_2 = f7*2;
  1251. i32 f5_38 = f5*38; i32 f6_19 = f6*19; i32 f7_38 = f7*38;
  1252. i32 f8_19 = f8*19; i32 f9_38 = f9*38;
  1253. // |f0_2| , |f2_2| , |f4_2| , |f6_2| , |f8_2| < 1.65 * 2^27
  1254. // |f1_2| , |f3_2| , |f5_2| , |f7_2| , |f9_2| < 1.65 * 2^26
  1255. // |f5_38|, |f6_19|, |f7_38|, |f8_19|, |f9_38| < 2^31
  1256. i64 t0 = f0 *(i64)f0 + f1_2*(i64)f9_38 + f2_2*(i64)f8_19
  1257. + f3_2*(i64)f7_38 + f4_2*(i64)f6_19 + f5 *(i64)f5_38;
  1258. i64 t1 = f0_2*(i64)f1 + f2 *(i64)f9_38 + f3_2*(i64)f8_19
  1259. + f4 *(i64)f7_38 + f5_2*(i64)f6_19;
  1260. i64 t2 = f0_2*(i64)f2 + f1_2*(i64)f1 + f3_2*(i64)f9_38
  1261. + f4_2*(i64)f8_19 + f5_2*(i64)f7_38 + f6 *(i64)f6_19;
  1262. i64 t3 = f0_2*(i64)f3 + f1_2*(i64)f2 + f4 *(i64)f9_38
  1263. + f5_2*(i64)f8_19 + f6 *(i64)f7_38;
  1264. i64 t4 = f0_2*(i64)f4 + f1_2*(i64)f3_2 + f2 *(i64)f2
  1265. + f5_2*(i64)f9_38 + f6_2*(i64)f8_19 + f7 *(i64)f7_38;
  1266. i64 t5 = f0_2*(i64)f5 + f1_2*(i64)f4 + f2_2*(i64)f3
  1267. + f6 *(i64)f9_38 + f7_2*(i64)f8_19;
  1268. i64 t6 = f0_2*(i64)f6 + f1_2*(i64)f5_2 + f2_2*(i64)f4
  1269. + f3_2*(i64)f3 + f7_2*(i64)f9_38 + f8 *(i64)f8_19;
  1270. i64 t7 = f0_2*(i64)f7 + f1_2*(i64)f6 + f2_2*(i64)f5
  1271. + f3_2*(i64)f4 + f8 *(i64)f9_38;
  1272. i64 t8 = f0_2*(i64)f8 + f1_2*(i64)f7_2 + f2_2*(i64)f6
  1273. + f3_2*(i64)f5_2 + f4 *(i64)f4 + f9 *(i64)f9_38;
  1274. i64 t9 = f0_2*(i64)f9 + f1_2*(i64)f8 + f2_2*(i64)f7
  1275. + f3_2*(i64)f6 + f4 *(i64)f5_2;
  1276. // t0 < 0.67 * 2^61
  1277. // t1 < 0.41 * 2^61
  1278. // t2 < 0.52 * 2^61
  1279. // t3 < 0.32 * 2^61
  1280. // t4 < 0.38 * 2^61
  1281. // t5 < 0.22 * 2^61
  1282. // t6 < 0.23 * 2^61
  1283. // t7 < 0.13 * 2^61
  1284. // t8 < 0.09 * 2^61
  1285. // t9 < 0.03 * 2^61
  1286. FE_CARRY;
  1287. }
  1288. // h = 2 * (f^2)
  1289. //
  1290. // Precondition
  1291. // -------------
  1292. // |f0|, |f2|, |f4|, |f6|, |f8| < 1.65 * 2^26
  1293. // |f1|, |f3|, |f5|, |f7|, |f9| < 1.65 * 2^25
  1294. //
  1295. // Note: we could implement fe_sq2() by copying fe_sq(), multiplying
  1296. // each limb by 2, *then* perform the carry. This saves one carry.
  1297. // However, doing so with the stated preconditions does not work (t2
  1298. // would overflow). There are 3 ways to solve this:
  1299. //
  1300. // 1. Show that t2 actually never overflows (it really does not).
  1301. // 2. Accept an additional carry, at a small lost of performance.
  1302. // 3. Make sure the input of fe_sq2() is freshly carried.
  1303. //
  1304. // SUPERCOP ref10 relies on (1).
  1305. // Monocypher chose (2) and (3), mostly to save code.
  1306. static void fe_sq2(fe h, const fe f)
  1307. {
  1308. fe_sq(h, f);
  1309. fe_mul_small(h, h, 2);
  1310. }
  1311. // This could be simplified, but it would be slower
  1312. static void fe_pow22523(fe out, const fe z)
  1313. {
  1314. fe t0, t1, t2;
  1315. fe_sq(t0, z);
  1316. fe_sq(t1,t0); fe_sq(t1, t1); fe_mul(t1, z, t1);
  1317. fe_mul(t0, t0, t1);
  1318. fe_sq(t0, t0); fe_mul(t0, t1, t0);
  1319. fe_sq(t1, t0); FOR (i, 1, 5) fe_sq(t1, t1); fe_mul(t0, t1, t0);
  1320. fe_sq(t1, t0); FOR (i, 1, 10) fe_sq(t1, t1); fe_mul(t1, t1, t0);
  1321. fe_sq(t2, t1); FOR (i, 1, 20) fe_sq(t2, t2); fe_mul(t1, t2, t1);
  1322. fe_sq(t1, t1); FOR (i, 1, 10) fe_sq(t1, t1); fe_mul(t0, t1, t0);
  1323. fe_sq(t1, t0); FOR (i, 1, 50) fe_sq(t1, t1); fe_mul(t1, t1, t0);
  1324. fe_sq(t2, t1); FOR (i, 1, 100) fe_sq(t2, t2); fe_mul(t1, t2, t1);
  1325. fe_sq(t1, t1); FOR (i, 1, 50) fe_sq(t1, t1); fe_mul(t0, t1, t0);
  1326. fe_sq(t0, t0); FOR (i, 1, 2) fe_sq(t0, t0); fe_mul(out, t0, z);
  1327. WIPE_BUFFER(t0);
  1328. WIPE_BUFFER(t1);
  1329. WIPE_BUFFER(t2);
  1330. }
  1331. // Inverting means multiplying by 2^255 - 21
  1332. // 2^255 - 21 = (2^252 - 3) * 8 + 3
  1333. // So we reuse the multiplication chain of fe_pow22523
  1334. static void fe_invert(fe out, const fe z)
  1335. {
  1336. fe tmp;
  1337. fe_pow22523(tmp, z);
  1338. // tmp2^8 * z^3
  1339. fe_sq(tmp, tmp); // 0
  1340. fe_sq(tmp, tmp); fe_mul(tmp, tmp, z); // 1
  1341. fe_sq(tmp, tmp); fe_mul(out, tmp, z); // 1
  1342. WIPE_BUFFER(tmp);
  1343. }
  1344. // Parity check. Returns 0 if even, 1 if odd
  1345. static int fe_isodd(const fe f)
  1346. {
  1347. u8 s[32];
  1348. fe_tobytes(s, f);
  1349. u8 isodd = s[0] & 1;
  1350. WIPE_BUFFER(s);
  1351. return isodd;
  1352. }
  1353. // Returns 1 if equal, 0 if not equal
  1354. static int fe_isequal(const fe f, const fe g)
  1355. {
  1356. u8 fs[32];
  1357. u8 gs[32];
  1358. fe_tobytes(fs, f);
  1359. fe_tobytes(gs, g);
  1360. int isdifferent = crypto_verify32(fs, gs);
  1361. WIPE_BUFFER(fs);
  1362. WIPE_BUFFER(gs);
  1363. return 1 + isdifferent;
  1364. }
  1365. // Inverse square root.
  1366. // Returns true if x is a non zero square, false otherwise.
  1367. // After the call:
  1368. // isr = sqrt(1/x) if x is non-zero square.
  1369. // isr = sqrt(sqrt(-1)/x) if x is not a square.
  1370. // isr = 0 if x is zero.
  1371. // We do not guarantee the sign of the square root.
  1372. //
  1373. // Notes:
  1374. // Let quartic = x^((p-1)/4)
  1375. //
  1376. // x^((p-1)/2) = chi(x)
  1377. // quartic^2 = chi(x)
  1378. // quartic = sqrt(chi(x))
  1379. // quartic = 1 or -1 or sqrt(-1) or -sqrt(-1)
  1380. //
  1381. // Note that x is a square if quartic is 1 or -1
  1382. // There are 4 cases to consider:
  1383. //
  1384. // if quartic = 1 (x is a square)
  1385. // then x^((p-1)/4) = 1
  1386. // x^((p-5)/4) * x = 1
  1387. // x^((p-5)/4) = 1/x
  1388. // x^((p-5)/8) = sqrt(1/x) or -sqrt(1/x)
  1389. //
  1390. // if quartic = -1 (x is a square)
  1391. // then x^((p-1)/4) = -1
  1392. // x^((p-5)/4) * x = -1
  1393. // x^((p-5)/4) = -1/x
  1394. // x^((p-5)/8) = sqrt(-1) / sqrt(x)
  1395. // x^((p-5)/8) * sqrt(-1) = sqrt(-1)^2 / sqrt(x)
  1396. // x^((p-5)/8) * sqrt(-1) = -1/sqrt(x)
  1397. // x^((p-5)/8) * sqrt(-1) = -sqrt(1/x) or sqrt(1/x)
  1398. //
  1399. // if quartic = sqrt(-1) (x is not a square)
  1400. // then x^((p-1)/4) = sqrt(-1)
  1401. // x^((p-5)/4) * x = sqrt(-1)
  1402. // x^((p-5)/4) = sqrt(-1)/x
  1403. // x^((p-5)/8) = sqrt(sqrt(-1)/x) or -sqrt(sqrt(-1)/x)
  1404. //
  1405. // Note that the product of two non-squares is always a square:
  1406. // For any non-squares a and b, chi(a) = -1 and chi(b) = -1.
  1407. // Since chi(x) = x^((p-1)/2), chi(a)*chi(b) = chi(a*b) = 1.
  1408. // Therefore a*b is a square.
  1409. //
  1410. // Since sqrt(-1) and x are both non-squares, their product is a
  1411. // square, and we can compute their square root.
  1412. //
  1413. // if quartic = -sqrt(-1) (x is not a square)
  1414. // then x^((p-1)/4) = -sqrt(-1)
  1415. // x^((p-5)/4) * x = -sqrt(-1)
  1416. // x^((p-5)/4) = -sqrt(-1)/x
  1417. // x^((p-5)/8) = sqrt(-sqrt(-1)/x)
  1418. // x^((p-5)/8) = sqrt( sqrt(-1)/x) * sqrt(-1)
  1419. // x^((p-5)/8) * sqrt(-1) = sqrt( sqrt(-1)/x) * sqrt(-1)^2
  1420. // x^((p-5)/8) * sqrt(-1) = sqrt( sqrt(-1)/x) * -1
  1421. // x^((p-5)/8) * sqrt(-1) = -sqrt(sqrt(-1)/x) or sqrt(sqrt(-1)/x)
  1422. static int invsqrt(fe isr, const fe x)
  1423. {
  1424. fe check, quartic;
  1425. fe_copy(check, x);
  1426. fe_pow22523(isr, check);
  1427. fe_sq (quartic, isr);
  1428. fe_mul(quartic, quartic, check);
  1429. fe_1 (check); int p1 = fe_isequal(quartic, check);
  1430. fe_neg(check, check ); int m1 = fe_isequal(quartic, check);
  1431. fe_neg(check, sqrtm1); int ms = fe_isequal(quartic, check);
  1432. fe_mul(check, isr, sqrtm1);
  1433. fe_ccopy(isr, check, m1 | ms);
  1434. WIPE_BUFFER(quartic);
  1435. WIPE_BUFFER(check);
  1436. return p1 | m1;
  1437. }
  1438. // trim a scalar for scalar multiplication
  1439. static void trim_scalar(u8 scalar[32])
  1440. {
  1441. scalar[ 0] &= 248;
  1442. scalar[31] &= 127;
  1443. scalar[31] |= 64;
  1444. }
  1445. // get bit from scalar at position i
  1446. static int scalar_bit(const u8 s[32], int i)
  1447. {
  1448. if (i < 0) { return 0; } // handle -1 for sliding windows
  1449. return (s[i>>3] >> (i&7)) & 1;
  1450. }
  1451. ///////////////
  1452. /// X-25519 /// Taken from SUPERCOP's ref10 implementation.
  1453. ///////////////
  1454. static void scalarmult(u8 q[32], const u8 scalar[32], const u8 p[32],
  1455. int nb_bits)
  1456. {
  1457. // computes the scalar product
  1458. fe x1;
  1459. fe_frombytes(x1, p);
  1460. // computes the actual scalar product (the result is in x2 and z2)
  1461. fe x2, z2, x3, z3, t0, t1;
  1462. // Montgomery ladder
  1463. // In projective coordinates, to avoid divisions: x = X / Z
  1464. // We don't care about the y coordinate, it's only 1 bit of information
  1465. fe_1(x2); fe_0(z2); // "zero" point
  1466. fe_copy(x3, x1); fe_1(z3); // "one" point
  1467. int swap = 0;
  1468. for (int pos = nb_bits-1; pos >= 0; --pos) {
  1469. // constant time conditional swap before ladder step
  1470. int b = scalar_bit(scalar, pos);
  1471. swap ^= b; // xor trick avoids swapping at the end of the loop
  1472. fe_cswap(x2, x3, swap);
  1473. fe_cswap(z2, z3, swap);
  1474. swap = b; // anticipates one last swap after the loop
  1475. // Montgomery ladder step: replaces (P2, P3) by (P2*2, P2+P3)
  1476. // with differential addition
  1477. fe_sub(t0, x3, z3);
  1478. fe_sub(t1, x2, z2);
  1479. fe_add(x2, x2, z2);
  1480. fe_add(z2, x3, z3);
  1481. fe_mul(z3, t0, x2);
  1482. fe_mul(z2, z2, t1);
  1483. fe_sq (t0, t1 );
  1484. fe_sq (t1, x2 );
  1485. fe_add(x3, z3, z2);
  1486. fe_sub(z2, z3, z2);
  1487. fe_mul(x2, t1, t0);
  1488. fe_sub(t1, t1, t0);
  1489. fe_sq (z2, z2 );
  1490. fe_mul_small(z3, t1, 121666);
  1491. fe_sq (x3, x3 );
  1492. fe_add(t0, t0, z3);
  1493. fe_mul(z3, x1, z2);
  1494. fe_mul(z2, t1, t0);
  1495. }
  1496. // last swap is necessary to compensate for the xor trick
  1497. // Note: after this swap, P3 == P2 + P1.
  1498. fe_cswap(x2, x3, swap);
  1499. fe_cswap(z2, z3, swap);
  1500. // normalises the coordinates: x == X / Z
  1501. fe_invert(z2, z2);
  1502. fe_mul(x2, x2, z2);
  1503. fe_tobytes(q, x2);
  1504. WIPE_BUFFER(x1);
  1505. WIPE_BUFFER(x2); WIPE_BUFFER(z2); WIPE_BUFFER(t0);
  1506. WIPE_BUFFER(x3); WIPE_BUFFER(z3); WIPE_BUFFER(t1);
  1507. }
  1508. void crypto_x25519(u8 raw_shared_secret[32],
  1509. const u8 your_secret_key [32],
  1510. const u8 their_public_key [32])
  1511. {
  1512. // restrict the possible scalar values
  1513. u8 e[32];
  1514. COPY(e, your_secret_key, 32);
  1515. trim_scalar(e);
  1516. scalarmult(raw_shared_secret, e, their_public_key, 255);
  1517. WIPE_BUFFER(e);
  1518. }
  1519. void crypto_x25519_public_key(u8 public_key[32],
  1520. const u8 secret_key[32])
  1521. {
  1522. static const u8 base_point[32] = {9};
  1523. crypto_x25519(public_key, secret_key, base_point);
  1524. }
  1525. ///////////////////////////
  1526. /// Arithmetic modulo L ///
  1527. ///////////////////////////
  1528. static const u32 L[8] = {0x5cf5d3ed, 0x5812631a, 0xa2f79cd6, 0x14def9de,
  1529. 0x00000000, 0x00000000, 0x00000000, 0x10000000,};
  1530. // p = a*b + p
  1531. static void multiply(u32 p[16], const u32 a[8], const u32 b[8])
  1532. {
  1533. FOR (i, 0, 8) {
  1534. u64 carry = 0;
  1535. FOR (j, 0, 8) {
  1536. carry += p[i+j] + (u64)a[i] * b[j];
  1537. p[i+j] = (u32)carry;
  1538. carry >>= 32;
  1539. }
  1540. p[i+8] = (u32)carry;
  1541. }
  1542. }
  1543. static int is_above_l(const u32 x[8])
  1544. {
  1545. // We work with L directly, in a 2's complement encoding
  1546. // (-L == ~L + 1)
  1547. u64 carry = 1;
  1548. FOR (i, 0, 8) {
  1549. carry += (u64)x[i] + ~L[i];
  1550. carry >>= 32;
  1551. }
  1552. return carry;
  1553. }
  1554. // Final reduction modulo L, by conditionally removing L.
  1555. // if x < l , then r = x
  1556. // if l <= x 2*l, then r = x-l
  1557. // otherwise the result will be wrong
  1558. static void remove_l(u32 r[8], const u32 x[8])
  1559. {
  1560. u64 carry = is_above_l(x);
  1561. u32 mask = ~(u32)carry + 1; // carry == 0 or 1
  1562. FOR (i, 0, 8) {
  1563. carry += (u64)x[i] + (~L[i] & mask);
  1564. r[i] = (u32)carry;
  1565. carry >>= 32;
  1566. }
  1567. }
  1568. // Full reduction modulo L (Barrett reduction)
  1569. static void mod_l(u8 reduced[32], const u32 x[16])
  1570. {
  1571. static const u32 r[9] = {0x0a2c131b,0xed9ce5a3,0x086329a7,0x2106215d,
  1572. 0xffffffeb,0xffffffff,0xffffffff,0xffffffff,0xf,};
  1573. // xr = x * r
  1574. u32 xr[25] = {0};
  1575. FOR (i, 0, 9) {
  1576. u64 carry = 0;
  1577. FOR (j, 0, 16) {
  1578. carry += xr[i+j] + (u64)r[i] * x[j];
  1579. xr[i+j] = (u32)carry;
  1580. carry >>= 32;
  1581. }
  1582. xr[i+16] = (u32)carry;
  1583. }
  1584. // xr = floor(xr / 2^512) * L
  1585. // Since the result is guaranteed to be below 2*L,
  1586. // it is enough to only compute the first 256 bits.
  1587. // The division is performed by saying xr[i+16]. (16 * 32 = 512)
  1588. ZERO(xr, 8);
  1589. FOR (i, 0, 8) {
  1590. u64 carry = 0;
  1591. FOR (j, 0, 8-i) {
  1592. carry += xr[i+j] + (u64)xr[i+16] * L[j];
  1593. xr[i+j] = (u32)carry;
  1594. carry >>= 32;
  1595. }
  1596. }
  1597. // xr = x - xr
  1598. u64 carry = 1;
  1599. FOR (i, 0, 8) {
  1600. carry += (u64)x[i] + ~xr[i];
  1601. xr[i] = (u32)carry;
  1602. carry >>= 32;
  1603. }
  1604. // Final reduction modulo L (conditional subtraction)
  1605. remove_l(xr, xr);
  1606. store32_le_buf(reduced, xr, 8);
  1607. WIPE_BUFFER(xr);
  1608. }
  1609. static void reduce(u8 r[64])
  1610. {
  1611. u32 x[16];
  1612. load32_le_buf(x, r, 16);
  1613. mod_l(r, x);
  1614. WIPE_BUFFER(x);
  1615. }
  1616. // r = (a * b) + c
  1617. static void mul_add(u8 r[32], const u8 a[32], const u8 b[32], const u8 c[32])
  1618. {
  1619. u32 A[8]; load32_le_buf(A, a, 8);
  1620. u32 B[8]; load32_le_buf(B, b, 8);
  1621. u32 p[16];
  1622. load32_le_buf(p, c, 8);
  1623. ZERO(p + 8, 8);
  1624. multiply(p, A, B);
  1625. mod_l(r, p);
  1626. WIPE_BUFFER(p);
  1627. WIPE_BUFFER(A);
  1628. WIPE_BUFFER(B);
  1629. }
  1630. ///////////////
  1631. /// Ed25519 ///
  1632. ///////////////
  1633. // Point (group element, ge) in a twisted Edwards curve,
  1634. // in extended projective coordinates.
  1635. // ge : x = X/Z, y = Y/Z, T = XY/Z
  1636. // ge_cached : Yp = X+Y, Ym = X-Y, T2 = T*D2
  1637. // ge_precomp: Z = 1
  1638. typedef struct { fe X; fe Y; fe Z; fe T; } ge;
  1639. typedef struct { fe Yp; fe Ym; fe Z; fe T2; } ge_cached;
  1640. typedef struct { fe Yp; fe Ym; fe T2; } ge_precomp;
  1641. static void ge_zero(ge *p)
  1642. {
  1643. fe_0(p->X);
  1644. fe_1(p->Y);
  1645. fe_1(p->Z);
  1646. fe_0(p->T);
  1647. }
  1648. static void ge_tobytes(u8 s[32], const ge *h)
  1649. {
  1650. fe recip, x, y;
  1651. fe_invert(recip, h->Z);
  1652. fe_mul(x, h->X, recip);
  1653. fe_mul(y, h->Y, recip);
  1654. fe_tobytes(s, y);
  1655. s[31] ^= fe_isodd(x) << 7;
  1656. WIPE_BUFFER(recip);
  1657. WIPE_BUFFER(x);
  1658. WIPE_BUFFER(y);
  1659. }
  1660. // h = s, where s is a point encoded in 32 bytes
  1661. //
  1662. // Variable time! Inputs must not be secret!
  1663. // => Use only to *check* signatures.
  1664. //
  1665. // From the specifications:
  1666. // The encoding of s contains y and the sign of x
  1667. // x = sqrt((y^2 - 1) / (d*y^2 + 1))
  1668. // In extended coordinates:
  1669. // X = x, Y = y, Z = 1, T = x*y
  1670. //
  1671. // Note that num * den is a square iff num / den is a square
  1672. // If num * den is not a square, the point was not on the curve.
  1673. // From the above:
  1674. // Let num = y^2 - 1
  1675. // Let den = d*y^2 + 1
  1676. // x = sqrt((y^2 - 1) / (d*y^2 + 1))
  1677. // x = sqrt(num / den)
  1678. // x = sqrt(num^2 / (num * den))
  1679. // x = num * sqrt(1 / (num * den))
  1680. //
  1681. // Therefore, we can just compute:
  1682. // num = y^2 - 1
  1683. // den = d*y^2 + 1
  1684. // isr = invsqrt(num * den) // abort if not square
  1685. // x = num * isr
  1686. // Finally, negate x if its sign is not as specified.
  1687. static int ge_frombytes_vartime(ge *h, const u8 s[32])
  1688. {
  1689. fe_frombytes(h->Y, s);
  1690. fe_1(h->Z);
  1691. fe_sq (h->T, h->Y); // t = y^2
  1692. fe_mul(h->X, h->T, d ); // x = d*y^2
  1693. fe_sub(h->T, h->T, h->Z); // t = y^2 - 1
  1694. fe_add(h->X, h->X, h->Z); // x = d*y^2 + 1
  1695. fe_mul(h->X, h->T, h->X); // x = (y^2 - 1) * (d*y^2 + 1)
  1696. int is_square = invsqrt(h->X, h->X);
  1697. if (!is_square) {
  1698. return -1; // Not on the curve, abort
  1699. }
  1700. fe_mul(h->X, h->T, h->X); // x = sqrt((y^2 - 1) / (d*y^2 + 1))
  1701. if (fe_isodd(h->X) != (s[31] >> 7)) {
  1702. fe_neg(h->X, h->X);
  1703. }
  1704. fe_mul(h->T, h->X, h->Y);
  1705. return 0;
  1706. }
  1707. static void ge_cache(ge_cached *c, const ge *p)
  1708. {
  1709. fe_add (c->Yp, p->Y, p->X);
  1710. fe_sub (c->Ym, p->Y, p->X);
  1711. fe_copy(c->Z , p->Z );
  1712. fe_mul (c->T2, p->T, D2 );
  1713. }
  1714. // Internal buffers are not wiped! Inputs must not be secret!
  1715. // => Use only to *check* signatures.
  1716. static void ge_add(ge *s, const ge *p, const ge_cached *q)
  1717. {
  1718. fe a, b;
  1719. fe_add(a , p->Y, p->X );
  1720. fe_sub(b , p->Y, p->X );
  1721. fe_mul(a , a , q->Yp);
  1722. fe_mul(b , b , q->Ym);
  1723. fe_add(s->Y, a , b );
  1724. fe_sub(s->X, a , b );
  1725. fe_add(s->Z, p->Z, p->Z );
  1726. fe_mul(s->Z, s->Z, q->Z );
  1727. fe_mul(s->T, p->T, q->T2);
  1728. fe_add(a , s->Z, s->T );
  1729. fe_sub(b , s->Z, s->T );
  1730. fe_mul(s->T, s->X, s->Y);
  1731. fe_mul(s->X, s->X, b );
  1732. fe_mul(s->Y, s->Y, a );
  1733. fe_mul(s->Z, a , b );
  1734. }
  1735. // Internal buffers are not wiped! Inputs must not be secret!
  1736. // => Use only to *check* signatures.
  1737. static void ge_sub(ge *s, const ge *p, const ge_cached *q)
  1738. {
  1739. ge_cached neg;
  1740. fe_copy(neg.Ym, q->Yp);
  1741. fe_copy(neg.Yp, q->Ym);
  1742. fe_copy(neg.Z , q->Z );
  1743. fe_neg (neg.T2, q->T2);
  1744. ge_add(s, p, &neg);
  1745. }
  1746. static void ge_madd(ge *s, const ge *p, const ge_precomp *q, fe a, fe b)
  1747. {
  1748. fe_add(a , p->Y, p->X );
  1749. fe_sub(b , p->Y, p->X );
  1750. fe_mul(a , a , q->Yp);
  1751. fe_mul(b , b , q->Ym);
  1752. fe_add(s->Y, a , b );
  1753. fe_sub(s->X, a , b );
  1754. fe_add(s->Z, p->Z, p->Z );
  1755. fe_mul(s->T, p->T, q->T2);
  1756. fe_add(a , s->Z, s->T );
  1757. fe_sub(b , s->Z, s->T );
  1758. fe_mul(s->T, s->X, s->Y);
  1759. fe_mul(s->X, s->X, b );
  1760. fe_mul(s->Y, s->Y, a );
  1761. fe_mul(s->Z, a , b );
  1762. }
  1763. static void ge_msub(ge *s, const ge *p, const ge_precomp *q, fe a, fe b)
  1764. {
  1765. fe_add(a , p->Y, p->X );
  1766. fe_sub(b , p->Y, p->X );
  1767. fe_mul(a , a , q->Ym);
  1768. fe_mul(b , b , q->Yp);
  1769. fe_add(s->Y, a , b );
  1770. fe_sub(s->X, a , b );
  1771. fe_add(s->Z, p->Z, p->Z );
  1772. fe_mul(s->T, p->T, q->T2);
  1773. fe_sub(a , s->Z, s->T );
  1774. fe_add(b , s->Z, s->T );
  1775. fe_mul(s->T, s->X, s->Y);
  1776. fe_mul(s->X, s->X, b );
  1777. fe_mul(s->Y, s->Y, a );
  1778. fe_mul(s->Z, a , b );
  1779. }
  1780. static void ge_double(ge *s, const ge *p, ge *q)
  1781. {
  1782. fe_sq (q->X, p->X);
  1783. fe_sq (q->Y, p->Y);
  1784. fe_sq2(q->Z, p->Z);
  1785. fe_add(q->T, p->X, p->Y);
  1786. fe_sq (s->T, q->T);
  1787. fe_add(q->T, q->Y, q->X);
  1788. fe_sub(q->Y, q->Y, q->X);
  1789. fe_sub(q->X, s->T, q->T);
  1790. fe_sub(q->Z, q->Z, q->Y);
  1791. fe_mul(s->X, q->X , q->Z);
  1792. fe_mul(s->Y, q->T , q->Y);
  1793. fe_mul(s->Z, q->Y , q->Z);
  1794. fe_mul(s->T, q->X , q->T);
  1795. }
  1796. // 5-bit signed window in cached format (Niels coordinates, Z=1)
  1797. static const ge_precomp b_window[8] = {
  1798. {{25967493,-14356035,29566456,3660896,-12694345,
  1799. 4014787,27544626,-11754271,-6079156,2047605,},
  1800. {-12545711,934262,-2722910,3049990,-727428,
  1801. 9406986,12720692,5043384,19500929,-15469378,},
  1802. {-8738181,4489570,9688441,-14785194,10184609,
  1803. -12363380,29287919,11864899,-24514362,-4438546,},},
  1804. {{15636291,-9688557,24204773,-7912398,616977,
  1805. -16685262,27787600,-14772189,28944400,-1550024,},
  1806. {16568933,4717097,-11556148,-1102322,15682896,
  1807. -11807043,16354577,-11775962,7689662,11199574,},
  1808. {30464156,-5976125,-11779434,-15670865,23220365,
  1809. 15915852,7512774,10017326,-17749093,-9920357,},},
  1810. {{10861363,11473154,27284546,1981175,-30064349,
  1811. 12577861,32867885,14515107,-15438304,10819380,},
  1812. {4708026,6336745,20377586,9066809,-11272109,
  1813. 6594696,-25653668,12483688,-12668491,5581306,},
  1814. {19563160,16186464,-29386857,4097519,10237984,
  1815. -4348115,28542350,13850243,-23678021,-15815942,},},
  1816. {{5153746,9909285,1723747,-2777874,30523605,
  1817. 5516873,19480852,5230134,-23952439,-15175766,},
  1818. {-30269007,-3463509,7665486,10083793,28475525,
  1819. 1649722,20654025,16520125,30598449,7715701,},
  1820. {28881845,14381568,9657904,3680757,-20181635,
  1821. 7843316,-31400660,1370708,29794553,-1409300,},},
  1822. {{-22518993,-6692182,14201702,-8745502,-23510406,
  1823. 8844726,18474211,-1361450,-13062696,13821877,},
  1824. {-6455177,-7839871,3374702,-4740862,-27098617,
  1825. -10571707,31655028,-7212327,18853322,-14220951,},
  1826. {4566830,-12963868,-28974889,-12240689,-7602672,
  1827. -2830569,-8514358,-10431137,2207753,-3209784,},},
  1828. {{-25154831,-4185821,29681144,7868801,-6854661,
  1829. -9423865,-12437364,-663000,-31111463,-16132436,},
  1830. {25576264,-2703214,7349804,-11814844,16472782,
  1831. 9300885,3844789,15725684,171356,6466918,},
  1832. {23103977,13316479,9739013,-16149481,817875,
  1833. -15038942,8965339,-14088058,-30714912,16193877,},},
  1834. {{-33521811,3180713,-2394130,14003687,-16903474,
  1835. -16270840,17238398,4729455,-18074513,9256800,},
  1836. {-25182317,-4174131,32336398,5036987,-21236817,
  1837. 11360617,22616405,9761698,-19827198,630305,},
  1838. {-13720693,2639453,-24237460,-7406481,9494427,
  1839. -5774029,-6554551,-15960994,-2449256,-14291300,},},
  1840. {{-3151181,-5046075,9282714,6866145,-31907062,
  1841. -863023,-18940575,15033784,25105118,-7894876,},
  1842. {-24326370,15950226,-31801215,-14592823,-11662737,
  1843. -5090925,1573892,-2625887,2198790,-15804619,},
  1844. {-3099351,10324967,-2241613,7453183,-5446979,
  1845. -2735503,-13812022,-16236442,-32461234,-12290683,},},
  1846. };
  1847. // Incremental sliding windows (left to right)
  1848. // Based on Roberto Maria Avanzi[2005]
  1849. typedef struct {
  1850. i16 next_index; // position of the next signed digit
  1851. i8 next_digit; // next signed digit (odd number below 2^window_width)
  1852. u8 next_check; // point at which we must check for a new window
  1853. } slide_ctx;
  1854. static void slide_init(slide_ctx *ctx, const u8 scalar[32])
  1855. {
  1856. // scalar is guaranteed to be below L, either because we checked (s),
  1857. // or because we reduced it modulo L (h_ram). L is under 2^253, so
  1858. // so bits 253 to 255 are guaranteed to be zero. No need to test them.
  1859. //
  1860. // Note however that L is very close to 2^252, so bit 252 is almost
  1861. // always zero. If we were to start at bit 251, the tests wouldn't
  1862. // catch the off-by-one error (constructing one that does would be
  1863. // prohibitively expensive).
  1864. //
  1865. // We should still check bit 252, though.
  1866. int i = 252;
  1867. while (i > 0 && scalar_bit(scalar, i) == 0) {
  1868. i--;
  1869. }
  1870. ctx->next_check = (u8)(i + 1);
  1871. ctx->next_index = -1;
  1872. ctx->next_digit = -1;
  1873. }
  1874. static int slide_step(slide_ctx *ctx, int width, int i, const u8 scalar[32])
  1875. {
  1876. if (i == ctx->next_check) {
  1877. if (scalar_bit(scalar, i) == scalar_bit(scalar, i - 1)) {
  1878. ctx->next_check--;
  1879. } else {
  1880. // compute digit of next window
  1881. int w = MIN(width, i + 1);
  1882. int v = -(scalar_bit(scalar, i) << (w-1));
  1883. FOR_T (int, j, 0, w-1) {
  1884. v += scalar_bit(scalar, i-(w-1)+j) << j;
  1885. }
  1886. v += scalar_bit(scalar, i-w);
  1887. int lsb = v & (~v + 1); // smallest bit of v
  1888. int s = ( ((lsb & 0xAA) != 0) // log2(lsb)
  1889. | (((lsb & 0xCC) != 0) << 1)
  1890. | (((lsb & 0xF0) != 0) << 2));
  1891. ctx->next_index = (i16)(i-(w-1)+s);
  1892. ctx->next_digit = (i8) (v >> s );
  1893. ctx->next_check -= (u8) w;
  1894. }
  1895. }
  1896. return i == ctx->next_index ? ctx->next_digit: 0;
  1897. }
  1898. #define P_W_WIDTH 3 // Affects the size of the stack
  1899. #define B_W_WIDTH 5 // Affects the size of the binary
  1900. #define P_W_SIZE (1<<(P_W_WIDTH-2))
  1901. // P = [b]B + [p]P, where B is the base point
  1902. //
  1903. // Variable time! Internal buffers are not wiped! Inputs must not be secret!
  1904. // => Use only to *check* signatures.
  1905. static void ge_double_scalarmult_vartime(ge *P, const u8 p[32], const u8 b[32])
  1906. {
  1907. // cache P window for addition
  1908. ge_cached cP[P_W_SIZE];
  1909. {
  1910. ge P2, tmp;
  1911. ge_double(&P2, P, &tmp);
  1912. ge_cache(&cP[0], P);
  1913. FOR (i, 1, P_W_SIZE) {
  1914. ge_add(&tmp, &P2, &cP[i-1]);
  1915. ge_cache(&cP[i], &tmp);
  1916. }
  1917. }
  1918. // Merged double and add ladder, fused with sliding
  1919. slide_ctx p_slide; slide_init(&p_slide, p);
  1920. slide_ctx b_slide; slide_init(&b_slide, b);
  1921. int i = MAX(p_slide.next_check, b_slide.next_check);
  1922. ge *sum = P;
  1923. ge_zero(sum);
  1924. while (i >= 0) {
  1925. ge tmp;
  1926. ge_double(sum, sum, &tmp);
  1927. int p_digit = slide_step(&p_slide, P_W_WIDTH, i, p);
  1928. int b_digit = slide_step(&b_slide, B_W_WIDTH, i, b);
  1929. if (p_digit > 0) { ge_add(sum, sum, &cP[ p_digit / 2]); }
  1930. if (p_digit < 0) { ge_sub(sum, sum, &cP[-p_digit / 2]); }
  1931. fe t1, t2;
  1932. if (b_digit > 0) { ge_madd(sum, sum, b_window + b_digit/2, t1, t2); }
  1933. if (b_digit < 0) { ge_msub(sum, sum, b_window + -b_digit/2, t1, t2); }
  1934. i--;
  1935. }
  1936. }
  1937. // R_check = s[B] - h_ram[pk], where B is the base point
  1938. //
  1939. // Variable time! Internal buffers are not wiped! Inputs must not be secret!
  1940. // => Use only to *check* signatures.
  1941. static int ge_r_check(u8 R_check[32], u8 s[32], u8 h_ram[32], u8 pk[32])
  1942. {
  1943. ge A; // not secret, not wiped
  1944. u32 s32[8]; // not secret, not wiped
  1945. load32_le_buf(s32, s, 8);
  1946. if (ge_frombytes_vartime(&A, pk) || // A = pk
  1947. is_above_l(s32)) { // prevent s malleability
  1948. return -1;
  1949. }
  1950. fe_neg(A.X, A.X);
  1951. fe_neg(A.T, A.T); // A = -pk
  1952. ge_double_scalarmult_vartime(&A, h_ram, s); // A = [s]B - [h_ram]pk
  1953. ge_tobytes(R_check, &A); // R_check = A
  1954. return 0;
  1955. }
  1956. // 5-bit signed comb in cached format (Niels coordinates, Z=1)
  1957. static const ge_precomp b_comb_low[8] = {
  1958. {{-6816601,-2324159,-22559413,124364,18015490,
  1959. 8373481,19993724,1979872,-18549925,9085059,},
  1960. {10306321,403248,14839893,9633706,8463310,
  1961. -8354981,-14305673,14668847,26301366,2818560,},
  1962. {-22701500,-3210264,-13831292,-2927732,-16326337,
  1963. -14016360,12940910,177905,12165515,-2397893,},},
  1964. {{-12282262,-7022066,9920413,-3064358,-32147467,
  1965. 2927790,22392436,-14852487,2719975,16402117,},
  1966. {-7236961,-4729776,2685954,-6525055,-24242706,
  1967. -15940211,-6238521,14082855,10047669,12228189,},
  1968. {-30495588,-12893761,-11161261,3539405,-11502464,
  1969. 16491580,-27286798,-15030530,-7272871,-15934455,},},
  1970. {{17650926,582297,-860412,-187745,-12072900,
  1971. -10683391,-20352381,15557840,-31072141,-5019061,},
  1972. {-6283632,-2259834,-4674247,-4598977,-4089240,
  1973. 12435688,-31278303,1060251,6256175,10480726,},
  1974. {-13871026,2026300,-21928428,-2741605,-2406664,
  1975. -8034988,7355518,15733500,-23379862,7489131,},},
  1976. {{6883359,695140,23196907,9644202,-33430614,
  1977. 11354760,-20134606,6388313,-8263585,-8491918,},
  1978. {-7716174,-13605463,-13646110,14757414,-19430591,
  1979. -14967316,10359532,-11059670,-21935259,12082603,},
  1980. {-11253345,-15943946,10046784,5414629,24840771,
  1981. 8086951,-6694742,9868723,15842692,-16224787,},},
  1982. {{9639399,11810955,-24007778,-9320054,3912937,
  1983. -9856959,996125,-8727907,-8919186,-14097242,},
  1984. {7248867,14468564,25228636,-8795035,14346339,
  1985. 8224790,6388427,-7181107,6468218,-8720783,},
  1986. {15513115,15439095,7342322,-10157390,18005294,
  1987. -7265713,2186239,4884640,10826567,7135781,},},
  1988. {{-14204238,5297536,-5862318,-6004934,28095835,
  1989. 4236101,-14203318,1958636,-16816875,3837147,},
  1990. {-5511166,-13176782,-29588215,12339465,15325758,
  1991. -15945770,-8813185,11075932,-19608050,-3776283,},
  1992. {11728032,9603156,-4637821,-5304487,-7827751,
  1993. 2724948,31236191,-16760175,-7268616,14799772,},},
  1994. {{-28842672,4840636,-12047946,-9101456,-1445464,
  1995. 381905,-30977094,-16523389,1290540,12798615,},
  1996. {27246947,-10320914,14792098,-14518944,5302070,
  1997. -8746152,-3403974,-4149637,-27061213,10749585,},
  1998. {25572375,-6270368,-15353037,16037944,1146292,
  1999. 32198,23487090,9585613,24714571,-1418265,},},
  2000. {{19844825,282124,-17583147,11004019,-32004269,
  2001. -2716035,6105106,-1711007,-21010044,14338445,},
  2002. {8027505,8191102,-18504907,-12335737,25173494,
  2003. -5923905,15446145,7483684,-30440441,10009108,},
  2004. {-14134701,-4174411,10246585,-14677495,33553567,
  2005. -14012935,23366126,15080531,-7969992,7663473,},},
  2006. };
  2007. static const ge_precomp b_comb_high[8] = {
  2008. {{33055887,-4431773,-521787,6654165,951411,
  2009. -6266464,-5158124,6995613,-5397442,-6985227,},
  2010. {4014062,6967095,-11977872,3960002,8001989,
  2011. 5130302,-2154812,-1899602,-31954493,-16173976,},
  2012. {16271757,-9212948,23792794,731486,-25808309,
  2013. -3546396,6964344,-4767590,10976593,10050757,},},
  2014. {{2533007,-4288439,-24467768,-12387405,-13450051,
  2015. 14542280,12876301,13893535,15067764,8594792,},
  2016. {20073501,-11623621,3165391,-13119866,13188608,
  2017. -11540496,-10751437,-13482671,29588810,2197295,},
  2018. {-1084082,11831693,6031797,14062724,14748428,
  2019. -8159962,-20721760,11742548,31368706,13161200,},},
  2020. {{2050412,-6457589,15321215,5273360,25484180,
  2021. 124590,-18187548,-7097255,-6691621,-14604792,},
  2022. {9938196,2162889,-6158074,-1711248,4278932,
  2023. -2598531,-22865792,-7168500,-24323168,11746309,},
  2024. {-22691768,-14268164,5965485,9383325,20443693,
  2025. 5854192,28250679,-1381811,-10837134,13717818,},},
  2026. {{-8495530,16382250,9548884,-4971523,-4491811,
  2027. -3902147,6182256,-12832479,26628081,10395408,},
  2028. {27329048,-15853735,7715764,8717446,-9215518,
  2029. -14633480,28982250,-5668414,4227628,242148,},
  2030. {-13279943,-7986904,-7100016,8764468,-27276630,
  2031. 3096719,29678419,-9141299,3906709,11265498,},},
  2032. {{11918285,15686328,-17757323,-11217300,-27548967,
  2033. 4853165,-27168827,6807359,6871949,-1075745,},
  2034. {-29002610,13984323,-27111812,-2713442,28107359,
  2035. -13266203,6155126,15104658,3538727,-7513788,},
  2036. {14103158,11233913,-33165269,9279850,31014152,
  2037. 4335090,-1827936,4590951,13960841,12787712,},},
  2038. {{1469134,-16738009,33411928,13942824,8092558,
  2039. -8778224,-11165065,1437842,22521552,-2792954,},
  2040. {31352705,-4807352,-25327300,3962447,12541566,
  2041. -9399651,-27425693,7964818,-23829869,5541287,},
  2042. {-25732021,-6864887,23848984,3039395,-9147354,
  2043. 6022816,-27421653,10590137,25309915,-1584678,},},
  2044. {{-22951376,5048948,31139401,-190316,-19542447,
  2045. -626310,-17486305,-16511925,-18851313,-12985140,},
  2046. {-9684890,14681754,30487568,7717771,-10829709,
  2047. 9630497,30290549,-10531496,-27798994,-13812825,},
  2048. {5827835,16097107,-24501327,12094619,7413972,
  2049. 11447087,28057551,-1793987,-14056981,4359312,},},
  2050. {{26323183,2342588,-21887793,-1623758,-6062284,
  2051. 2107090,-28724907,9036464,-19618351,-13055189,},
  2052. {-29697200,14829398,-4596333,14220089,-30022969,
  2053. 2955645,12094100,-13693652,-5941445,7047569,},
  2054. {-3201977,14413268,-12058324,-16417589,-9035655,
  2055. -7224648,9258160,1399236,30397584,-5684634,},},
  2056. };
  2057. static void lookup_add(ge *p, ge_precomp *tmp_c, fe tmp_a, fe tmp_b,
  2058. const ge_precomp comb[8], const u8 scalar[32], int i)
  2059. {
  2060. u8 teeth = (u8)((scalar_bit(scalar, i) ) +
  2061. (scalar_bit(scalar, i + 32) << 1) +
  2062. (scalar_bit(scalar, i + 64) << 2) +
  2063. (scalar_bit(scalar, i + 96) << 3));
  2064. u8 high = teeth >> 3;
  2065. u8 index = (teeth ^ (high - 1)) & 7;
  2066. FOR (j, 0, 8) {
  2067. i32 select = 1 & (((j ^ index) - 1) >> 8);
  2068. fe_ccopy(tmp_c->Yp, comb[j].Yp, select);
  2069. fe_ccopy(tmp_c->Ym, comb[j].Ym, select);
  2070. fe_ccopy(tmp_c->T2, comb[j].T2, select);
  2071. }
  2072. fe_neg(tmp_a, tmp_c->T2);
  2073. fe_cswap(tmp_c->T2, tmp_a , high ^ 1);
  2074. fe_cswap(tmp_c->Yp, tmp_c->Ym, high ^ 1);
  2075. ge_madd(p, p, tmp_c, tmp_a, tmp_b);
  2076. }
  2077. // p = [scalar]B, where B is the base point
  2078. static void ge_scalarmult_base(ge *p, const u8 scalar[32])
  2079. {
  2080. // twin 4-bits signed combs, from Mike Hamburg's
  2081. // Fast and compact elliptic-curve cryptography (2012)
  2082. // 1 / 2 modulo L
  2083. static const u8 half_mod_L[32] = {
  2084. 247,233,122,46,141,49,9,44,107,206,123,81,239,124,111,10,
  2085. 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8, };
  2086. // (2^256 - 1) / 2 modulo L
  2087. static const u8 half_ones[32] = {
  2088. 142,74,204,70,186,24,118,107,184,231,190,57,250,173,119,99,
  2089. 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,7, };
  2090. // All bits set form: 1 means 1, 0 means -1
  2091. u8 s_scalar[32];
  2092. mul_add(s_scalar, scalar, half_mod_L, half_ones);
  2093. // Double and add ladder
  2094. fe tmp_a, tmp_b; // temporaries for addition
  2095. ge_precomp tmp_c; // temporary for comb lookup
  2096. ge tmp_d; // temporary for doubling
  2097. fe_1(tmp_c.Yp);
  2098. fe_1(tmp_c.Ym);
  2099. fe_0(tmp_c.T2);
  2100. // Save a double on the first iteration
  2101. ge_zero(p);
  2102. lookup_add(p, &tmp_c, tmp_a, tmp_b, b_comb_low , s_scalar, 31);
  2103. lookup_add(p, &tmp_c, tmp_a, tmp_b, b_comb_high, s_scalar, 31+128);
  2104. // Regular double & add for the rest
  2105. for (int i = 30; i >= 0; i--) {
  2106. ge_double(p, p, &tmp_d);
  2107. lookup_add(p, &tmp_c, tmp_a, tmp_b, b_comb_low , s_scalar, i);
  2108. lookup_add(p, &tmp_c, tmp_a, tmp_b, b_comb_high, s_scalar, i+128);
  2109. }
  2110. // Note: we could save one addition at the end if we assumed the
  2111. // scalar fit in 252 bit. Which it does in practice if it is
  2112. // selected at random. However, non-random, non-hashed scalars
  2113. // *can* overflow 252 bits in practice. Better account for that
  2114. // than leaving that kind of subtle corner case.
  2115. WIPE_BUFFER(tmp_a); WIPE_CTX(&tmp_d);
  2116. WIPE_BUFFER(tmp_b); WIPE_CTX(&tmp_c);
  2117. WIPE_BUFFER(s_scalar);
  2118. }
  2119. void crypto_sign_public_key_custom_hash(u8 public_key[32],
  2120. const u8 secret_key[32],
  2121. const crypto_sign_vtable *hash)
  2122. {
  2123. u8 a[64];
  2124. hash->hash(a, secret_key, 32);
  2125. trim_scalar(a);
  2126. ge A;
  2127. ge_scalarmult_base(&A, a);
  2128. ge_tobytes(public_key, &A);
  2129. WIPE_BUFFER(a);
  2130. WIPE_CTX(&A);
  2131. }
  2132. void crypto_sign_public_key(u8 public_key[32], const u8 secret_key[32])
  2133. {
  2134. crypto_sign_public_key_custom_hash(public_key, secret_key,
  2135. &crypto_blake2b_vtable);
  2136. }
  2137. void crypto_sign_init_first_pass_custom_hash(crypto_sign_ctx_abstract *ctx,
  2138. const u8 secret_key[32],
  2139. const u8 public_key[32],
  2140. const crypto_sign_vtable *hash)
  2141. {
  2142. ctx->hash = hash; // set vtable
  2143. u8 *a = ctx->buf;
  2144. u8 *prefix = ctx->buf + 32;
  2145. ctx->hash->hash(a, secret_key, 32);
  2146. trim_scalar(a);
  2147. if (public_key == 0) {
  2148. crypto_sign_public_key_custom_hash(ctx->pk, secret_key, ctx->hash);
  2149. } else {
  2150. COPY(ctx->pk, public_key, 32);
  2151. }
  2152. // Deterministic part of EdDSA: Construct a nonce by hashing the message
  2153. // instead of generating a random number.
  2154. // An actual random number would work just fine, and would save us
  2155. // the trouble of hashing the message twice. If we did that
  2156. // however, the user could fuck it up and reuse the nonce.
  2157. ctx->hash->init (ctx);
  2158. ctx->hash->update(ctx, prefix , 32);
  2159. }
  2160. void crypto_sign_init_first_pass(crypto_sign_ctx_abstract *ctx,
  2161. const u8 secret_key[32],
  2162. const u8 public_key[32])
  2163. {
  2164. crypto_sign_init_first_pass_custom_hash(ctx, secret_key, public_key,
  2165. &crypto_blake2b_vtable);
  2166. }
  2167. void crypto_sign_update(crypto_sign_ctx_abstract *ctx,
  2168. const u8 *msg, size_t msg_size)
  2169. {
  2170. ctx->hash->update(ctx, msg, msg_size);
  2171. }
  2172. void crypto_sign_init_second_pass(crypto_sign_ctx_abstract *ctx)
  2173. {
  2174. u8 *r = ctx->buf + 32;
  2175. u8 *half_sig = ctx->buf + 64;
  2176. ctx->hash->final(ctx, r);
  2177. reduce(r);
  2178. // first half of the signature = "random" nonce times the base point
  2179. ge R;
  2180. ge_scalarmult_base(&R, r);
  2181. ge_tobytes(half_sig, &R);
  2182. WIPE_CTX(&R);
  2183. // Hash R, the public key, and the message together.
  2184. // It cannot be done in parallel with the first hash.
  2185. ctx->hash->init (ctx);
  2186. ctx->hash->update(ctx, half_sig, 32);
  2187. ctx->hash->update(ctx, ctx->pk , 32);
  2188. }
  2189. void crypto_sign_final(crypto_sign_ctx_abstract *ctx, u8 signature[64])
  2190. {
  2191. u8 *a = ctx->buf;
  2192. u8 *r = ctx->buf + 32;
  2193. u8 *half_sig = ctx->buf + 64;
  2194. u8 h_ram[64];
  2195. ctx->hash->final(ctx, h_ram);
  2196. reduce(h_ram);
  2197. COPY(signature, half_sig, 32);
  2198. mul_add(signature + 32, h_ram, a, r); // s = h_ram * a + r
  2199. WIPE_BUFFER(h_ram);
  2200. crypto_wipe(ctx, ctx->hash->ctx_size);
  2201. }
  2202. void crypto_sign(u8 signature[64],
  2203. const u8 secret_key[32],
  2204. const u8 public_key[32],
  2205. const u8 *message, size_t message_size)
  2206. {
  2207. crypto_sign_ctx ctx;
  2208. crypto_sign_ctx_abstract *actx = (crypto_sign_ctx_abstract*)&ctx;
  2209. crypto_sign_init_first_pass (actx, secret_key, public_key);
  2210. crypto_sign_update (actx, message, message_size);
  2211. crypto_sign_init_second_pass(actx);
  2212. crypto_sign_update (actx, message, message_size);
  2213. crypto_sign_final (actx, signature);
  2214. }
  2215. void crypto_check_init_custom_hash(crypto_check_ctx_abstract *ctx,
  2216. const u8 signature[64],
  2217. const u8 public_key[32],
  2218. const crypto_sign_vtable *hash)
  2219. {
  2220. ctx->hash = hash; // set vtable
  2221. COPY(ctx->buf, signature , 64);
  2222. COPY(ctx->pk , public_key, 32);
  2223. ctx->hash->init (ctx);
  2224. ctx->hash->update(ctx, signature , 32);
  2225. ctx->hash->update(ctx, public_key, 32);
  2226. }
  2227. void crypto_check_init(crypto_check_ctx_abstract *ctx, const u8 signature[64],
  2228. const u8 public_key[32])
  2229. {
  2230. crypto_check_init_custom_hash(ctx, signature, public_key,
  2231. &crypto_blake2b_vtable);
  2232. }
  2233. void crypto_check_update(crypto_check_ctx_abstract *ctx,
  2234. const u8 *msg, size_t msg_size)
  2235. {
  2236. ctx->hash->update(ctx, msg, msg_size);
  2237. }
  2238. int crypto_check_final(crypto_check_ctx_abstract *ctx)
  2239. {
  2240. u8 h_ram[64];
  2241. ctx->hash->final(ctx, h_ram);
  2242. reduce(h_ram);
  2243. u8 *R = ctx->buf; // R
  2244. u8 *s = ctx->buf + 32; // s
  2245. u8 *R_check = ctx->pk; // overwrite ctx->pk to save stack space
  2246. if (ge_r_check(R_check, s, h_ram, ctx->pk)) {
  2247. return -1;
  2248. }
  2249. return crypto_verify32(R, R_check); // R == R_check ? OK : fail
  2250. }
  2251. int crypto_check(const u8 signature[64], const u8 public_key[32],
  2252. const u8 *message, size_t message_size)
  2253. {
  2254. crypto_check_ctx ctx;
  2255. crypto_check_ctx_abstract *actx = (crypto_check_ctx_abstract*)&ctx;
  2256. crypto_check_init (actx, signature, public_key);
  2257. crypto_check_update(actx, message, message_size);
  2258. return crypto_check_final(actx);
  2259. }
  2260. ///////////////////////
  2261. /// EdDSA to X25519 ///
  2262. ///////////////////////
  2263. void crypto_from_eddsa_private(u8 x25519[32], const u8 eddsa[32])
  2264. {
  2265. u8 a[64];
  2266. crypto_blake2b(a, eddsa, 32);
  2267. COPY(x25519, a, 32);
  2268. WIPE_BUFFER(a);
  2269. }
  2270. void crypto_from_eddsa_public(u8 x25519[32], const u8 eddsa[32])
  2271. {
  2272. fe t1, t2;
  2273. fe_frombytes(t2, eddsa);
  2274. fe_add(t1, fe_one, t2);
  2275. fe_sub(t2, fe_one, t2);
  2276. fe_invert(t2, t2);
  2277. fe_mul(t1, t1, t2);
  2278. fe_tobytes(x25519, t1);
  2279. WIPE_BUFFER(t1);
  2280. WIPE_BUFFER(t2);
  2281. }
  2282. /////////////////////////////////////////////
  2283. /// Dirty ephemeral public key generation ///
  2284. /////////////////////////////////////////////
  2285. // Those functions generates a public key, *without* clearing the
  2286. // cofactor. Sending that key over the network leaks 3 bits of the
  2287. // private key. Use only to generate ephemeral keys that will be hidden
  2288. // with crypto_curve_to_hidden().
  2289. //
  2290. // The public key is otherwise compatible with crypto_x25519() and
  2291. // crypto_key_exchange() (those properly clear the cofactor).
  2292. //
  2293. // Note that the distribution of the resulting public keys is almost
  2294. // uniform. Flipping the sign of the v coordinate (not provided by this
  2295. // function), covers the entire key space almost perfectly, where
  2296. // "almost" means a 2^-128 bias (undetectable). This uniformity is
  2297. // needed to ensure the proper randomness of the resulting
  2298. // representatives (once we apply crypto_curve_to_hidden()).
  2299. //
  2300. // Recall that Curve25519 has order C = 2^255 + e, with e < 2^128 (not
  2301. // to be confused with the prime order of the main subgroup, L, which is
  2302. // 8 times less than that).
  2303. //
  2304. // Generating all points would require us to multiply a point of order C
  2305. // (the base point plus any point of order 8) by all scalars from 0 to
  2306. // C-1. Clamping limits us to scalars between 2^254 and 2^255 - 1. But
  2307. // by negating the resulting point at random, we also cover scalars from
  2308. // -2^255 + 1 to -2^254 (which modulo C is congruent to e+1 to 2^254 + e).
  2309. //
  2310. // In practice:
  2311. // - Scalars from 0 to e + 1 are never generated
  2312. // - Scalars from 2^255 to 2^255 + e are never generated
  2313. // - Scalars from 2^254 + 1 to 2^254 + e are generated twice
  2314. //
  2315. // Since e < 2^128, detecting this bias requires observing over 2^100
  2316. // representatives from a given source (this will never happen), *and*
  2317. // recovering enough of the private key to determine that they do, or do
  2318. // not, belong to the biased set (this practically requires solving
  2319. // discrete logarithm, which is conjecturally intractable).
  2320. //
  2321. // In practice, this means the bias is impossible to detect.
  2322. // s + (x*L) % 8*L
  2323. // Guaranteed to fit in 256 bits iff s fits in 255 bits.
  2324. // L < 2^253
  2325. // x%8 < 2^3
  2326. // L * (x%8) < 2^255
  2327. // s < 2^255
  2328. // s + L * (x%8) < 2^256
  2329. static void add_xl(u8 s[32], u8 x)
  2330. {
  2331. u64 mod8 = x & 7;
  2332. u64 carry = 0;
  2333. FOR (i , 0, 8) {
  2334. carry = carry + load32_le(s + 4*i) + L[i] * mod8;
  2335. store32_le(s + 4*i, (u32)carry);
  2336. carry >>= 32;
  2337. }
  2338. }
  2339. // "Small" dirty ephemeral key.
  2340. // Use if you need to shrink the size of the binary, and can afford to
  2341. // slow down by a factor of two (compared to the fast version)
  2342. //
  2343. // This version works by decoupling the cofactor from the main factor.
  2344. //
  2345. // - The trimmed scalar determines the main factor
  2346. // - The clamped bits of the scalar determine the cofactor.
  2347. //
  2348. // Cofactor and main factor are combined into a single scalar, which is
  2349. // then multiplied by a point of order 8*L (unlike the base point, which
  2350. // has prime order). That "dirty" base point is the addition of the
  2351. // regular base point (9), and a point of order 8.
  2352. void crypto_x25519_dirty_small(u8 public_key[32], const u8 secret_key[32])
  2353. {
  2354. // Base point of order 8*L
  2355. // Raw scalar multiplication with it does not clear the cofactor,
  2356. // and the resulting public key will reveal 3 bits of the scalar.
  2357. static const u8 dirty_base_point[32] = {
  2358. 0x34, 0xfc, 0x6c, 0xb7, 0xc8, 0xde, 0x58, 0x97, 0x77, 0x70, 0xd9, 0x52,
  2359. 0x16, 0xcc, 0xdc, 0x6c, 0x85, 0x90, 0xbe, 0xcd, 0x91, 0x9c, 0x07, 0x59,
  2360. 0x94, 0x14, 0x56, 0x3b, 0x4b, 0xa4, 0x47, 0x0f, };
  2361. // separate the main factor & the cofactor of the scalar
  2362. u8 scalar[32];
  2363. COPY(scalar, secret_key, 32);
  2364. trim_scalar(scalar);
  2365. // Separate the main factor and the cofactor
  2366. //
  2367. // The scalar is trimmed, so its cofactor is cleared. The three
  2368. // least significant bits however still have a main factor. We must
  2369. // remove it for X25519 compatibility.
  2370. //
  2371. // We exploit the fact that 5*L = 1 (modulo 8)
  2372. // cofactor = lsb * 5 * L (modulo 8*L)
  2373. // combined = scalar + cofactor (modulo 8*L)
  2374. // combined = scalar + (lsb * 5 * L) (modulo 8*L)
  2375. add_xl(scalar, secret_key[0] * 5);
  2376. scalarmult(public_key, scalar, dirty_base_point, 256);
  2377. WIPE_BUFFER(scalar);
  2378. }
  2379. // "Fast" dirty ephemeral key
  2380. // We use this one by default.
  2381. //
  2382. // This version works by performing a regular scalar multiplication,
  2383. // then add a low order point. The scalar multiplication is done in
  2384. // Edwards space for more speed (*2 compared to the "small" version).
  2385. // The cost is a bigger binary for programs that don't also sign messages.
  2386. void crypto_x25519_dirty_fast(u8 public_key[32], const u8 secret_key[32])
  2387. {
  2388. u8 scalar[32];
  2389. ge pk;
  2390. COPY(scalar, secret_key, 32);
  2391. trim_scalar(scalar);
  2392. ge_scalarmult_base(&pk, scalar);
  2393. // Select low order point
  2394. // We're computing the [cofactor]lop scalar multiplication, where:
  2395. // cofactor = tweak & 7.
  2396. // lop = (lop_x, lop_y)
  2397. // lop_x = sqrt((sqrt(d + 1) + 1) / d)
  2398. // lop_y = -lop_x * sqrtm1
  2399. // Notes:
  2400. // - A (single) Montgomery ladder would be twice as slow.
  2401. // - An actual scalar multiplication would hurt performance.
  2402. // - A full table lookup would take more code.
  2403. u8 cofactor = secret_key[0] & 7;
  2404. int a = (cofactor >> 2) & 1;
  2405. int b = (cofactor >> 1) & 1;
  2406. int c = (cofactor >> 0) & 1;
  2407. fe t1, t2, t3;
  2408. fe_0(t1);
  2409. fe_ccopy(t1, sqrtm1, b);
  2410. fe_ccopy(t1, lop_x , c);
  2411. fe_neg (t3, t1);
  2412. fe_ccopy(t1, t3, a);
  2413. fe_1(t2);
  2414. fe_0(t3);
  2415. fe_ccopy(t2, t3 , b);
  2416. fe_ccopy(t2, lop_y, c);
  2417. fe_neg (t3, t2);
  2418. fe_ccopy(t2, t3, a^b);
  2419. ge_precomp low_order_point;
  2420. fe_add(low_order_point.Yp, t2, t1);
  2421. fe_sub(low_order_point.Ym, t2, t1);
  2422. fe_mul(low_order_point.T2, t2, t1);
  2423. fe_mul(low_order_point.T2, low_order_point.T2, D2);
  2424. // Add low order point to the public key
  2425. ge_madd(&pk, &pk, &low_order_point, t1, t2);
  2426. // Convert to Montgomery u coordinate (we ignore the sign)
  2427. fe_add(t1, pk.Z, pk.Y);
  2428. fe_sub(t2, pk.Z, pk.Y);
  2429. fe_invert(t2, t2);
  2430. fe_mul(t1, t1, t2);
  2431. fe_tobytes(public_key, t1);
  2432. WIPE_BUFFER(t1); WIPE_BUFFER(scalar);
  2433. WIPE_BUFFER(t2); WIPE_CTX(&pk);
  2434. WIPE_BUFFER(t3); WIPE_CTX(&low_order_point);
  2435. }
  2436. ///////////////////
  2437. /// Elligator 2 ///
  2438. ///////////////////
  2439. static const fe A = {486662};
  2440. // Elligator direct map
  2441. //
  2442. // Computes the point corresponding to a representative, encoded in 32
  2443. // bytes (little Endian). Since positive representatives fits in 254
  2444. // bits, The two most significant bits are ignored.
  2445. //
  2446. // From the paper:
  2447. // w = -A / (fe(1) + non_square * r^2)
  2448. // e = chi(w^3 + A*w^2 + w)
  2449. // u = e*w - (fe(1)-e)*(A//2)
  2450. // v = -e * sqrt(u^3 + A*u^2 + u)
  2451. //
  2452. // We ignore v because we don't need it for X25519 (the Montgomery
  2453. // ladder only uses u).
  2454. //
  2455. // Note that e is either 0, 1 or -1
  2456. // if e = 0 u = 0 and v = 0
  2457. // if e = 1 u = w
  2458. // if e = -1 u = -w - A = w * non_square * r^2
  2459. //
  2460. // Let r1 = non_square * r^2
  2461. // Let r2 = 1 + r1
  2462. // Note that r2 cannot be zero, -1/non_square is not a square.
  2463. // We can (tediously) verify that:
  2464. // w^3 + A*w^2 + w = (A^2*r1 - r2^2) * A / r2^3
  2465. // Therefore:
  2466. // chi(w^3 + A*w^2 + w) = chi((A^2*r1 - r2^2) * (A / r2^3))
  2467. // chi(w^3 + A*w^2 + w) = chi((A^2*r1 - r2^2) * (A / r2^3)) * 1
  2468. // chi(w^3 + A*w^2 + w) = chi((A^2*r1 - r2^2) * (A / r2^3)) * chi(r2^6)
  2469. // chi(w^3 + A*w^2 + w) = chi((A^2*r1 - r2^2) * (A / r2^3) * r2^6)
  2470. // chi(w^3 + A*w^2 + w) = chi((A^2*r1 - r2^2) * A * r2^3)
  2471. // Corollary:
  2472. // e = 1 if (A^2*r1 - r2^2) * A * r2^3) is a non-zero square
  2473. // e = -1 if (A^2*r1 - r2^2) * A * r2^3) is not a square
  2474. // Note that w^3 + A*w^2 + w (and therefore e) can never be zero:
  2475. // w^3 + A*w^2 + w = w * (w^2 + A*w + 1)
  2476. // w^3 + A*w^2 + w = w * (w^2 + A*w + A^2/4 - A^2/4 + 1)
  2477. // w^3 + A*w^2 + w = w * (w + A/2)^2 - A^2/4 + 1)
  2478. // which is zero only if:
  2479. // w = 0 (impossible)
  2480. // (w + A/2)^2 = A^2/4 - 1 (impossible, because A^2/4-1 is not a square)
  2481. //
  2482. // Let isr = invsqrt((A^2*r1 - r2^2) * A * r2^3)
  2483. // isr = sqrt(1 / ((A^2*r1 - r2^2) * A * r2^3)) if e = 1
  2484. // isr = sqrt(sqrt(-1) / ((A^2*r1 - r2^2) * A * r2^3)) if e = -1
  2485. //
  2486. // if e = 1
  2487. // let u1 = -A * (A^2*r1 - r2^2) * A * r2^2 * isr^2
  2488. // u1 = w
  2489. // u1 = u
  2490. //
  2491. // if e = -1
  2492. // let ufactor = -non_square * sqrt(-1) * r^2
  2493. // let vfactor = sqrt(ufactor)
  2494. // let u2 = -A * (A^2*r1 - r2^2) * A * r2^2 * isr^2 * ufactor
  2495. // u2 = w * -1 * -non_square * r^2
  2496. // u2 = w * non_square * r^2
  2497. // u2 = u
  2498. void crypto_hidden_to_curve(uint8_t curve[32], const uint8_t hidden[32])
  2499. {
  2500. // Representatives are encoded in 254 bits.
  2501. // The two most significant ones are random padding that must be ignored.
  2502. u8 clamped[32];
  2503. COPY(clamped, hidden, 32);
  2504. clamped[31] &= 0x3f;
  2505. fe r, u, t1, t2, t3;
  2506. fe_frombytes(r, clamped);
  2507. fe_sq2(t1, r);
  2508. fe_add(u, t1, fe_one);
  2509. fe_sq (t2, u);
  2510. fe_mul(t3, A2, t1);
  2511. fe_sub(t3, t3, t2);
  2512. fe_mul(t3, t3, A);
  2513. fe_mul(t1, t2, u);
  2514. fe_mul(t1, t3, t1);
  2515. int is_square = invsqrt(t1, t1);
  2516. fe_sq(u, r);
  2517. fe_mul(u, u, ufactor);
  2518. fe_ccopy(u, fe_one, is_square);
  2519. fe_sq (t1, t1);
  2520. fe_mul(u, u, A);
  2521. fe_mul(u, u, t3);
  2522. fe_mul(u, u, t2);
  2523. fe_mul(u, u, t1);
  2524. fe_neg(u, u);
  2525. fe_tobytes(curve, u);
  2526. WIPE_BUFFER(t1); WIPE_BUFFER(r);
  2527. WIPE_BUFFER(t2); WIPE_BUFFER(u);
  2528. WIPE_BUFFER(t3); WIPE_BUFFER(clamped);
  2529. }
  2530. // Elligator inverse map
  2531. //
  2532. // Computes the representative of a point, if possible. If not, it does
  2533. // nothing and returns -1. Note that the success of the operation
  2534. // depends only on the point (more precisely its u coordinate). The
  2535. // tweak parameter is used only upon success
  2536. //
  2537. // The tweak should be a random byte. Beyond that, its contents are an
  2538. // implementation detail. Currently, the tweak comprises:
  2539. // - Bit 1 : sign of the v coordinate (0 if positive, 1 if negative)
  2540. // - Bit 2-5: not used
  2541. // - Bits 6-7: random padding
  2542. //
  2543. // From the paper:
  2544. // Let sq = -non_square * u * (u+A)
  2545. // if sq is not a square, or u = -A, there is no mapping
  2546. // Assuming there is a mapping:
  2547. // if v is positive: r = sqrt(-(u+A) / u)
  2548. // if v is negative: r = sqrt(-u / (u+A))
  2549. //
  2550. // We compute isr = invsqrt(-non_square * u * (u+A))
  2551. // if it wasn't a non-zero square, abort.
  2552. // else, isr = sqrt(-1 / (non_square * u * (u+A))
  2553. //
  2554. // This causes us to abort if u is zero, even though we shouldn't. This
  2555. // never happens in practice, because (i) a random point in the curve has
  2556. // a negligible chance of being zero, and (ii) scalar multiplication with
  2557. // a trimmed scalar *never* yields zero.
  2558. //
  2559. // Since:
  2560. // isr * (u+A) = sqrt(-1 / (non_square * u * (u+A)) * (u+A)
  2561. // isr * (u+A) = sqrt(-(u+A) / (non_square * u * (u+A))
  2562. // and:
  2563. // isr = u = sqrt(-1 / (non_square * u * (u+A)) * u
  2564. // isr = u = sqrt(-u / (non_square * u * (u+A))
  2565. // Therefore:
  2566. // if v is positive: r = isr * (u+A)
  2567. // if v is negative: r = isr * u
  2568. int crypto_curve_to_hidden(u8 hidden[32], const u8 public_key[32], u8 tweak)
  2569. {
  2570. fe t1, t2, t3;
  2571. fe_frombytes(t1, public_key);
  2572. fe_add(t2, t1, A);
  2573. fe_mul(t3, t1, t2);
  2574. fe_mul_small(t3, t3, -2);
  2575. int is_square = invsqrt(t3, t3);
  2576. if (!is_square) {
  2577. // The only variable time bit. This ultimately reveals how many
  2578. // tries it took us to find a representable key.
  2579. // This does not affect security as long as we try keys at random.
  2580. WIPE_BUFFER(t1);
  2581. WIPE_BUFFER(t2);
  2582. WIPE_BUFFER(t3);
  2583. return -1;
  2584. }
  2585. fe_ccopy (t1, t2, tweak & 1);
  2586. fe_mul (t3, t1, t3);
  2587. fe_mul_small(t1, t3, 2);
  2588. fe_neg (t2, t3);
  2589. fe_ccopy (t3, t2, fe_isodd(t1));
  2590. fe_tobytes(hidden, t3);
  2591. // Pad with two random bits
  2592. hidden[31] |= tweak & 0xc0;
  2593. WIPE_BUFFER(t1);
  2594. WIPE_BUFFER(t2);
  2595. WIPE_BUFFER(t3);
  2596. return 0;
  2597. }
  2598. void crypto_hidden_key_pair(u8 hidden[32], u8 secret_key[32], u8 seed[32])
  2599. {
  2600. u8 pk [32]; // public key
  2601. u8 buf[64]; // seed + representative
  2602. COPY(buf + 32, seed, 32);
  2603. do {
  2604. crypto_chacha20(buf, 0, 64, buf+32, zero);
  2605. crypto_x25519_dirty_fast(pk, buf); // or the "small" version
  2606. } while(crypto_curve_to_hidden(buf+32, pk, buf[32]));
  2607. // Note that the return value of crypto_curve_to_hidden() is
  2608. // independent from its tweak parameter.
  2609. // Therefore, buf[32] is not actually reused. Either we loop one
  2610. // more time and buf[32] is used for the new seed, or we succeeded,
  2611. // and buf[32] becomes the tweak parameter.
  2612. crypto_wipe(seed, 32);
  2613. COPY(hidden , buf + 32, 32);
  2614. COPY(secret_key, buf , 32);
  2615. WIPE_BUFFER(buf);
  2616. WIPE_BUFFER(pk);
  2617. }
  2618. ////////////////////
  2619. /// Key exchange ///
  2620. ////////////////////
  2621. void crypto_key_exchange(u8 shared_key[32],
  2622. const u8 your_secret_key [32],
  2623. const u8 their_public_key[32])
  2624. {
  2625. crypto_x25519(shared_key, your_secret_key, their_public_key);
  2626. crypto_hchacha20(shared_key, shared_key, zero);
  2627. }
  2628. ///////////////////////
  2629. /// Scalar division ///
  2630. ///////////////////////
  2631. // Montgomery reduction.
  2632. // Divides x by (2^256), and reduces the result modulo L
  2633. //
  2634. // Precondition:
  2635. // x < L * 2^256
  2636. // Constants:
  2637. // r = 2^256 (makes division by r trivial)
  2638. // k = (r * (1/r) - 1) // L (1/r is computed modulo L )
  2639. // Algorithm:
  2640. // s = (x * k) % r
  2641. // t = x + s*L (t is always a multiple of r)
  2642. // u = (t/r) % L (u is always below 2*L, conditional subtraction is enough)
  2643. static void redc(u32 u[8], u32 x[16])
  2644. {
  2645. static const u32 k[8] = { 0x12547e1b, 0xd2b51da3, 0xfdba84ff, 0xb1a206f2,
  2646. 0xffa36bea, 0x14e75438, 0x6fe91836, 0x9db6c6f2,};
  2647. static const u32 l[8] = { 0x5cf5d3ed, 0x5812631a, 0xa2f79cd6, 0x14def9de,
  2648. 0x00000000, 0x00000000, 0x00000000, 0x10000000,};
  2649. // s = x * k (modulo 2^256)
  2650. // This is cheaper than the full multiplication.
  2651. u32 s[8] = {0};
  2652. FOR (i, 0, 8) {
  2653. u64 carry = 0;
  2654. FOR (j, 0, 8-i) {
  2655. carry += s[i+j] + (u64)x[i] * k[j];
  2656. s[i+j] = (u32)carry;
  2657. carry >>= 32;
  2658. }
  2659. }
  2660. u32 t[16] = {0};
  2661. multiply(t, s, l);
  2662. // t = t + x
  2663. u64 carry = 0;
  2664. FOR (i, 0, 16) {
  2665. carry += (u64)t[i] + x[i];
  2666. t[i] = (u32)carry;
  2667. carry >>= 32;
  2668. }
  2669. // u = (t / 2^256) % L
  2670. // Note that t / 2^256 is always below 2*L,
  2671. // So a constant time conditional subtraction is enough
  2672. // We work with L directly, in a 2's complement encoding
  2673. // (-L == ~L + 1)
  2674. remove_l(u, t+8);
  2675. WIPE_BUFFER(s);
  2676. WIPE_BUFFER(t);
  2677. }
  2678. void crypto_x25519_inverse(u8 blind_salt [32], const u8 private_key[32],
  2679. const u8 curve_point[32])
  2680. {
  2681. static const u8 Lm2[32] = { // L - 2
  2682. 0xeb, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2,
  2683. 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  2684. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, };
  2685. // 1 in Montgomery form
  2686. u32 m_inv [8] = {0x8d98951d, 0xd6ec3174, 0x737dcf70, 0xc6ef5bf4,
  2687. 0xfffffffe, 0xffffffff, 0xffffffff, 0x0fffffff,};
  2688. u8 scalar[32];
  2689. COPY(scalar, private_key, 32);
  2690. trim_scalar(scalar);
  2691. // Convert the scalar in Montgomery form
  2692. // m_scl = scalar * 2^256 (modulo L)
  2693. u32 m_scl[8];
  2694. {
  2695. u32 tmp[16];
  2696. ZERO(tmp, 8);
  2697. load32_le_buf(tmp+8, scalar, 8);
  2698. mod_l(scalar, tmp);
  2699. load32_le_buf(m_scl, scalar, 8);
  2700. WIPE_BUFFER(tmp); // Wipe ASAP to save stack space
  2701. }
  2702. u32 product[16];
  2703. for (int i = 252; i >= 0; i--) {
  2704. ZERO(product, 16);
  2705. multiply(product, m_inv, m_inv);
  2706. redc(m_inv, product);
  2707. if (scalar_bit(Lm2, i)) {
  2708. ZERO(product, 16);
  2709. multiply(product, m_inv, m_scl);
  2710. redc(m_inv, product);
  2711. }
  2712. }
  2713. // Convert the inverse *out* of Montgomery form
  2714. // scalar = m_inv / 2^256 (modulo L)
  2715. COPY(product, m_inv, 8);
  2716. ZERO(product + 8, 8);
  2717. redc(m_inv, product);
  2718. store32_le_buf(scalar, m_inv, 8); // the *inverse* of the scalar
  2719. // Clear the cofactor of scalar:
  2720. // cleared = scalar * (3*L + 1) (modulo 8*L)
  2721. // cleared = scalar + scalar * 3 * L (modulo 8*L)
  2722. // Note that (scalar * 3) is reduced modulo 8, so we only need the
  2723. // first byte.
  2724. add_xl(scalar, scalar[0] * 3);
  2725. // Recall that 8*L < 2^256. However it is also very close to
  2726. // 2^255. If we spanned the ladder over 255 bits, random tests
  2727. // wouldn't catch the off-by-one error.
  2728. scalarmult(blind_salt, scalar, curve_point, 256);
  2729. WIPE_BUFFER(scalar); WIPE_BUFFER(m_scl);
  2730. WIPE_BUFFER(product); WIPE_BUFFER(m_inv);
  2731. }
  2732. ////////////////////////////////
  2733. /// Authenticated encryption ///
  2734. ////////////////////////////////
  2735. static void lock_auth(u8 mac[16], const u8 auth_key[32],
  2736. const u8 *ad , size_t ad_size,
  2737. const u8 *cipher_text, size_t text_size)
  2738. {
  2739. u8 sizes[16]; // Not secret, not wiped
  2740. store64_le(sizes + 0, ad_size);
  2741. store64_le(sizes + 8, text_size);
  2742. crypto_poly1305_ctx poly_ctx; // auto wiped...
  2743. crypto_poly1305_init (&poly_ctx, auth_key);
  2744. crypto_poly1305_update(&poly_ctx, ad , ad_size);
  2745. crypto_poly1305_update(&poly_ctx, zero , align(ad_size, 16));
  2746. crypto_poly1305_update(&poly_ctx, cipher_text, text_size);
  2747. crypto_poly1305_update(&poly_ctx, zero , align(text_size, 16));
  2748. crypto_poly1305_update(&poly_ctx, sizes , 16);
  2749. crypto_poly1305_final (&poly_ctx, mac); // ...here
  2750. }
  2751. void crypto_lock_aead(u8 mac[16], u8 *cipher_text,
  2752. const u8 key[32], const u8 nonce[24],
  2753. const u8 *ad , size_t ad_size,
  2754. const u8 *plain_text, size_t text_size)
  2755. {
  2756. u8 sub_key[32];
  2757. u8 auth_key[64]; // "Wasting" the whole Chacha block is faster
  2758. crypto_hchacha20(sub_key, key, nonce);
  2759. crypto_chacha20(auth_key, 0, 64, sub_key, nonce + 16);
  2760. crypto_chacha20_ctr(cipher_text, plain_text, text_size,
  2761. sub_key, nonce + 16, 1);
  2762. lock_auth(mac, auth_key, ad, ad_size, cipher_text, text_size);
  2763. WIPE_BUFFER(sub_key);
  2764. WIPE_BUFFER(auth_key);
  2765. }
  2766. int crypto_unlock_aead(u8 *plain_text, const u8 key[32], const u8 nonce[24],
  2767. const u8 mac[16],
  2768. const u8 *ad , size_t ad_size,
  2769. const u8 *cipher_text, size_t text_size)
  2770. {
  2771. u8 sub_key[32];
  2772. u8 auth_key[64]; // "Wasting" the whole Chacha block is faster
  2773. crypto_hchacha20(sub_key, key, nonce);
  2774. crypto_chacha20(auth_key, 0, 64, sub_key, nonce + 16);
  2775. u8 real_mac[16];
  2776. lock_auth(real_mac, auth_key, ad, ad_size, cipher_text, text_size);
  2777. WIPE_BUFFER(auth_key);
  2778. if (crypto_verify16(mac, real_mac)) {
  2779. WIPE_BUFFER(sub_key);
  2780. WIPE_BUFFER(real_mac);
  2781. return -1;
  2782. }
  2783. crypto_chacha20_ctr(plain_text, cipher_text, text_size,
  2784. sub_key, nonce + 16, 1);
  2785. WIPE_BUFFER(sub_key);
  2786. WIPE_BUFFER(real_mac);
  2787. return 0;
  2788. }
  2789. void crypto_lock(u8 mac[16], u8 *cipher_text,
  2790. const u8 key[32], const u8 nonce[24],
  2791. const u8 *plain_text, size_t text_size)
  2792. {
  2793. crypto_lock_aead(mac, cipher_text, key, nonce, 0, 0, plain_text, text_size);
  2794. }
  2795. int crypto_unlock(u8 *plain_text,
  2796. const u8 key[32], const u8 nonce[24], const u8 mac[16],
  2797. const u8 *cipher_text, size_t text_size)
  2798. {
  2799. return crypto_unlock_aead(plain_text, key, nonce, mac, 0, 0,
  2800. cipher_text, text_size);
  2801. }