Coverage Report

Created: 2022-01-18 06:27

/Users/buildslave/jenkins/workspace/coverage/llvm-project/libcxx/src/ryu/d2fixed.cpp
Line
Count
Source (jump to first uncovered line)
1
//===----------------------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
// Copyright (c) Microsoft Corporation.
10
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11
12
// Copyright 2018 Ulf Adams
13
// Copyright (c) Microsoft Corporation. All rights reserved.
14
15
// Boost Software License - Version 1.0 - August 17th, 2003
16
17
// Permission is hereby granted, free of charge, to any person or organization
18
// obtaining a copy of the software and accompanying documentation covered by
19
// this license (the "Software") to use, reproduce, display, distribute,
20
// execute, and transmit the Software, and to prepare derivative works of the
21
// Software, and to permit third-parties to whom the Software is furnished to
22
// do so, all subject to the following:
23
24
// The copyright notices in the Software and this entire statement, including
25
// the above license grant, this restriction and the following disclaimer,
26
// must be included in all copies of the Software, in whole or in part, and
27
// all derivative works of the Software, unless such copies or derivative
28
// works are solely in the form of machine-executable object code generated by
29
// a source language processor.
30
31
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
34
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
35
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
36
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
37
// DEALINGS IN THE SOFTWARE.
38
39
// Avoid formatting to keep the changes with the original code minimal.
40
// clang-format off
41
42
#include "__config"
43
#include "charconv"
44
#include "cstring"
45
#include "system_error"
46
47
#include "include/ryu/common.h"
48
#include "include/ryu/d2fixed.h"
49
#include "include/ryu/d2fixed_full_table.h"
50
#include "include/ryu/d2s.h"
51
#include "include/ryu/d2s_intrinsics.h"
52
#include "include/ryu/digit_table.h"
53
54
_LIBCPP_BEGIN_NAMESPACE_STD
55
56
inline constexpr int __POW10_ADDITIONAL_BITS = 120;
57
58
#ifdef _LIBCPP_INTRINSIC128
59
// Returns the low 64 bits of the high 128 bits of the 256-bit product of a and b.
60
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __umul256_hi128_lo64(
61
0
  const uint64_t __aHi, const uint64_t __aLo, const uint64_t __bHi, const uint64_t __bLo) {
62
0
  uint64_t __b00Hi;
63
0
  const uint64_t __b00Lo = __ryu_umul128(__aLo, __bLo, &__b00Hi);
64
0
  uint64_t __b01Hi;
65
0
  const uint64_t __b01Lo = __ryu_umul128(__aLo, __bHi, &__b01Hi);
66
0
  uint64_t __b10Hi;
67
0
  const uint64_t __b10Lo = __ryu_umul128(__aHi, __bLo, &__b10Hi);
68
0
  uint64_t __b11Hi;
69
0
  const uint64_t __b11Lo = __ryu_umul128(__aHi, __bHi, &__b11Hi);
70
0
  (void) __b00Lo; // unused
71
0
  (void) __b11Hi; // unused
72
0
  const uint64_t __temp1Lo = __b10Lo + __b00Hi;
73
0
  const uint64_t __temp1Hi = __b10Hi + (__temp1Lo < __b10Lo);
74
0
  const uint64_t __temp2Lo = __b01Lo + __temp1Lo;
75
0
  const uint64_t __temp2Hi = __b01Hi + (__temp2Lo < __b01Lo);
76
0
  return __b11Lo + __temp1Hi + __temp2Hi;
77
0
}
78
79
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __uint128_mod1e9(const uint64_t __vHi, const uint64_t __vLo) {
80
  // After multiplying, we're going to shift right by 29, then truncate to uint32_t.
81
  // This means that we need only 29 + 32 = 61 bits, so we can truncate to uint64_t before shifting.
82
0
  const uint64_t __multiplied = __umul256_hi128_lo64(__vHi, __vLo, 0x89705F4136B4A597u, 0x31680A88F8953031u);
83
84
  // For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
85
0
  const uint32_t __shifted = static_cast<uint32_t>(__multiplied >> 29);
86
87
0
  return static_cast<uint32_t>(__vLo) - 1000000000 * __shifted;
88
0
}
89
#endif // ^^^ intrinsics available ^^^
90
91
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __mulShift_mod1e9(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {
92
0
  uint64_t __high0;                                               // 64
93
0
  const uint64_t __low0 = __ryu_umul128(__m, __mul[0], &__high0); // 0
94
0
  uint64_t __high1;                                               // 128
95
0
  const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
96
0
  uint64_t __high2;                                               // 192
97
0
  const uint64_t __low2 = __ryu_umul128(__m, __mul[2], &__high2); // 128
98
0
  const uint64_t __s0low = __low0;                  // 0
99
0
  (void) __s0low; // unused
100
0
  const uint64_t __s0high = __low1 + __high0;       // 64
101
0
  const uint32_t __c1 = __s0high < __low1;
102
0
  const uint64_t __s1low = __low2 + __high1 + __c1; // 128
103
0
  const uint32_t __c2 = __s1low < __low2; // __high1 + __c1 can't overflow, so compare against __low2
104
0
  const uint64_t __s1high = __high2 + __c2;         // 192
105
0
  _LIBCPP_ASSERT(__j >= 128, "");
106
0
  _LIBCPP_ASSERT(__j <= 180, "");
107
0
#ifdef _LIBCPP_INTRINSIC128
108
0
  const uint32_t __dist = static_cast<uint32_t>(__j - 128); // __dist: [0, 52]
109
0
  const uint64_t __shiftedhigh = __s1high >> __dist;
110
0
  const uint64_t __shiftedlow = __ryu_shiftright128(__s1low, __s1high, __dist);
111
0
  return __uint128_mod1e9(__shiftedhigh, __shiftedlow);
112
#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
113
  if (__j < 160) { // __j: [128, 160)
114
    const uint64_t __r0 = __mod1e9(__s1high);
115
    const uint64_t __r1 = __mod1e9((__r0 << 32) | (__s1low >> 32));
116
    const uint64_t __r2 = ((__r1 << 32) | (__s1low & 0xffffffff));
117
    return __mod1e9(__r2 >> (__j - 128));
118
  } else { // __j: [160, 192)
119
    const uint64_t __r0 = __mod1e9(__s1high);
120
    const uint64_t __r1 = ((__r0 << 32) | (__s1low >> 32));
121
    return __mod1e9(__r1 >> (__j - 160));
122
  }
123
#endif // ^^^ intrinsics unavailable ^^^
124
0
}
125
126
0
void __append_n_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
127
0
  uint32_t __i = 0;
128
0
  while (__digits >= 10000) {
129
0
#ifdef __clang__ // TRANSITION, LLVM-38217
130
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
131
#else
132
    const uint32_t __c = __digits % 10000;
133
#endif
134
0
    __digits /= 10000;
135
0
    const uint32_t __c0 = (__c % 100) << 1;
136
0
    const uint32_t __c1 = (__c / 100) << 1;
137
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c0, 2);
138
0
    _VSTD::memcpy(__result + __olength - __i - 4, __DIGIT_TABLE + __c1, 2);
139
0
    __i += 4;
140
0
  }
141
0
  if (__digits >= 100) {
142
0
    const uint32_t __c = (__digits % 100) << 1;
143
0
    __digits /= 100;
144
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
145
0
    __i += 2;
146
0
  }
147
0
  if (__digits >= 10) {
148
0
    const uint32_t __c = __digits << 1;
149
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
150
0
  } else {
151
0
    __result[0] = static_cast<char>('0' + __digits);
152
0
  }
153
0
}
154
155
0
_LIBCPP_HIDE_FROM_ABI inline void __append_d_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
156
0
  uint32_t __i = 0;
157
0
  while (__digits >= 10000) {
158
0
#ifdef __clang__ // TRANSITION, LLVM-38217
159
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
160
#else
161
    const uint32_t __c = __digits % 10000;
162
#endif
163
0
    __digits /= 10000;
164
0
    const uint32_t __c0 = (__c % 100) << 1;
165
0
    const uint32_t __c1 = (__c / 100) << 1;
166
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c0, 2);
167
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 4, __DIGIT_TABLE + __c1, 2);
168
0
    __i += 4;
169
0
  }
170
0
  if (__digits >= 100) {
171
0
    const uint32_t __c = (__digits % 100) << 1;
172
0
    __digits /= 100;
173
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c, 2);
174
0
    __i += 2;
175
0
  }
176
0
  if (__digits >= 10) {
177
0
    const uint32_t __c = __digits << 1;
178
0
    __result[2] = __DIGIT_TABLE[__c + 1];
179
0
    __result[1] = '.';
180
0
    __result[0] = __DIGIT_TABLE[__c];
181
0
  } else {
182
0
    __result[1] = '.';
183
0
    __result[0] = static_cast<char>('0' + __digits);
184
0
  }
185
0
}
186
187
0
_LIBCPP_HIDE_FROM_ABI inline void __append_c_digits(const uint32_t __count, uint32_t __digits, char* const __result) {
188
0
  uint32_t __i = 0;
189
0
  for (; __i < __count - 1; __i += 2) {
190
0
    const uint32_t __c = (__digits % 100) << 1;
191
0
    __digits /= 100;
192
0
    _VSTD::memcpy(__result + __count - __i - 2, __DIGIT_TABLE + __c, 2);
193
0
  }
194
0
  if (__i < __count) {
195
0
    const char __c = static_cast<char>('0' + (__digits % 10));
196
0
    __result[__count - __i - 1] = __c;
197
0
  }
198
0
}
199
200
0
void __append_nine_digits(uint32_t __digits, char* const __result) {
201
0
  if (__digits == 0) {
202
0
    _VSTD::memset(__result, '0', 9);
203
0
    return;
204
0
  }
205
206
0
  for (uint32_t __i = 0; __i < 5; __i += 4) {
207
0
#ifdef __clang__ // TRANSITION, LLVM-38217
208
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
209
#else
210
    const uint32_t __c = __digits % 10000;
211
#endif
212
0
    __digits /= 10000;
213
0
    const uint32_t __c0 = (__c % 100) << 1;
214
0
    const uint32_t __c1 = (__c / 100) << 1;
215
0
    _VSTD::memcpy(__result + 7 - __i, __DIGIT_TABLE + __c0, 2);
216
0
    _VSTD::memcpy(__result + 5 - __i, __DIGIT_TABLE + __c1, 2);
217
0
  }
218
0
  __result[0] = static_cast<char>('0' + __digits);
219
0
}
220
221
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __indexForExponent(const uint32_t __e) {
222
0
  return (__e + 15) / 16;
223
0
}
224
225
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __pow10BitsForIndex(const uint32_t __idx) {
226
0
  return 16 * __idx + __POW10_ADDITIONAL_BITS;
227
0
}
228
229
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __lengthForIndex(const uint32_t __idx) {
230
  // +1 for ceil, +16 for mantissa, +8 to round up when dividing by 9
231
0
  return (__log10Pow2(16 * static_cast<int32_t>(__idx)) + 1 + 16 + 8) / 9;
232
0
}
233
234
[[nodiscard]] to_chars_result __d2fixed_buffered_n(char* _First, char* const _Last, const double __d,
235
0
  const uint32_t __precision) {
236
0
  char* const _Original_first = _First;
237
238
0
  const uint64_t __bits = __double_to_bits(__d);
239
240
  // Case distinction; exit early for the easy cases.
241
0
  if (__bits == 0) {
242
0
    const int32_t _Total_zero_length = 1 // leading zero
243
0
      + static_cast<int32_t>(__precision != 0) // possible decimal point
244
0
      + static_cast<int32_t>(__precision); // zeroes after decimal point
245
246
0
    if (_Last - _First < _Total_zero_length) {
247
0
      return { _Last, errc::value_too_large };
248
0
    }
249
250
0
    *_First++ = '0';
251
0
    if (__precision > 0) {
252
0
      *_First++ = '.';
253
0
      _VSTD::memset(_First, '0', __precision);
254
0
      _First += __precision;
255
0
    }
256
0
    return { _First, errc{} };
257
0
  }
258
259
  // Decode __bits into mantissa and exponent.
260
0
  const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
261
0
  const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
262
263
0
  int32_t __e2;
264
0
  uint64_t __m2;
265
0
  if (__ieeeExponent == 0) {
266
0
    __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
267
0
    __m2 = __ieeeMantissa;
268
0
  } else {
269
0
    __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
270
0
    __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
271
0
  }
272
273
0
  bool __nonzero = false;
274
0
  if (__e2 >= -52) {
275
0
    const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
276
0
    const uint32_t __p10bits = __pow10BitsForIndex(__idx);
277
0
    const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
278
0
    for (int32_t __i = __len - 1; __i >= 0; --__i) {
279
0
      const uint32_t __j = __p10bits - __e2;
280
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
281
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
282
0
      const uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
283
0
        static_cast<int32_t>(__j + 8));
284
0
      if (__nonzero) {
285
0
        if (_Last - _First < 9) {
286
0
          return { _Last, errc::value_too_large };
287
0
        }
288
0
        __append_nine_digits(__digits, _First);
289
0
        _First += 9;
290
0
      } else if (__digits != 0) {
291
0
        const uint32_t __olength = __decimalLength9(__digits);
292
0
        if (_Last - _First < static_cast<ptrdiff_t>(__olength)) {
293
0
          return { _Last, errc::value_too_large };
294
0
        }
295
0
        __append_n_digits(__olength, __digits, _First);
296
0
        _First += __olength;
297
0
        __nonzero = true;
298
0
      }
299
0
    }
300
0
  }
301
0
  if (!__nonzero) {
302
0
    if (_First == _Last) {
303
0
      return { _Last, errc::value_too_large };
304
0
    }
305
0
    *_First++ = '0';
306
0
  }
307
0
  if (__precision > 0) {
308
0
    if (_First == _Last) {
309
0
      return { _Last, errc::value_too_large };
310
0
    }
311
0
    *_First++ = '.';
312
0
  }
313
0
  if (__e2 < 0) {
314
0
    const int32_t __idx = -__e2 / 16;
315
0
    const uint32_t __blocks = __precision / 9 + 1;
316
    // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
317
0
    int __roundUp = 0;
318
0
    uint32_t __i = 0;
319
0
    if (__blocks <= __MIN_BLOCK_2[__idx]) {
320
0
      __i = __blocks;
321
0
      if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
322
0
        return { _Last, errc::value_too_large };
323
0
      }
324
0
      _VSTD::memset(_First, '0', __precision);
325
0
      _First += __precision;
326
0
    } else if (__i < __MIN_BLOCK_2[__idx]) {
327
0
      __i = __MIN_BLOCK_2[__idx];
328
0
      if (_Last - _First < static_cast<ptrdiff_t>(9 * __i)) {
329
0
        return { _Last, errc::value_too_large };
330
0
      }
331
0
      _VSTD::memset(_First, '0', 9 * __i);
332
0
      _First += 9 * __i;
333
0
    }
334
0
    for (; __i < __blocks; ++__i) {
335
0
      const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
336
0
      const uint32_t __p = __POW10_OFFSET_2[__idx] + __i - __MIN_BLOCK_2[__idx];
337
0
      if (__p >= __POW10_OFFSET_2[__idx + 1]) {
338
        // If the remaining digits are all 0, then we might as well use memset.
339
        // No rounding required in this case.
340
0
        const uint32_t __fill = __precision - 9 * __i;
341
0
        if (_Last - _First < static_cast<ptrdiff_t>(__fill)) {
342
0
          return { _Last, errc::value_too_large };
343
0
        }
344
0
        _VSTD::memset(_First, '0', __fill);
345
0
        _First += __fill;
346
0
        break;
347
0
      }
348
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
349
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
350
0
      uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
351
0
      if (__i < __blocks - 1) {
352
0
        if (_Last - _First < 9) {
353
0
          return { _Last, errc::value_too_large };
354
0
        }
355
0
        __append_nine_digits(__digits, _First);
356
0
        _First += 9;
357
0
      } else {
358
0
        const uint32_t __maximum = __precision - 9 * __i;
359
0
        uint32_t __lastDigit = 0;
360
0
        for (uint32_t __k = 0; __k < 9 - __maximum; ++__k) {
361
0
          __lastDigit = __digits % 10;
362
0
          __digits /= 10;
363
0
        }
364
0
        if (__lastDigit != 5) {
365
0
          __roundUp = __lastDigit > 5;
366
0
        } else {
367
          // Is m * 10^(additionalDigits + 1) / 2^(-__e2) integer?
368
0
          const int32_t __requiredTwos = -__e2 - static_cast<int32_t>(__precision) - 1;
369
0
          const bool __trailingZeros = __requiredTwos <= 0
370
0
            || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
371
0
          __roundUp = __trailingZeros ? 2 : 1;
372
0
        }
373
0
        if (__maximum > 0) {
374
0
          if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
375
0
            return { _Last, errc::value_too_large };
376
0
          }
377
0
          __append_c_digits(__maximum, __digits, _First);
378
0
          _First += __maximum;
379
0
        }
380
0
        break;
381
0
      }
382
0
    }
383
0
    if (__roundUp != 0) {
384
0
      char* _Round = _First;
385
0
      char* _Dot = _Last;
386
0
      while (true) {
387
0
        if (_Round == _Original_first) {
388
0
          _Round[0] = '1';
389
0
          if (_Dot != _Last) {
390
0
            _Dot[0] = '0';
391
0
            _Dot[1] = '.';
392
0
          }
393
0
          if (_First == _Last) {
394
0
            return { _Last, errc::value_too_large };
395
0
          }
396
0
          *_First++ = '0';
397
0
          break;
398
0
        }
399
0
        --_Round;
400
0
        const char __c = _Round[0];
401
0
        if (__c == '.') {
402
0
          _Dot = _Round;
403
0
        } else if (__c == '9') {
404
0
          _Round[0] = '0';
405
0
          __roundUp = 1;
406
0
        } else {
407
0
          if (__roundUp == 1 || __c % 2 != 0) {
408
0
            _Round[0] = __c + 1;
409
0
          }
410
0
          break;
411
0
        }
412
0
      }
413
0
    }
414
0
  } else {
415
0
    if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
416
0
      return { _Last, errc::value_too_large };
417
0
    }
418
0
    _VSTD::memset(_First, '0', __precision);
419
0
    _First += __precision;
420
0
  }
421
0
  return { _First, errc{} };
422
0
}
423
424
[[nodiscard]] to_chars_result __d2exp_buffered_n(char* _First, char* const _Last, const double __d,
425
0
  uint32_t __precision) {
426
0
  char* const _Original_first = _First;
427
428
0
  const uint64_t __bits = __double_to_bits(__d);
429
430
  // Case distinction; exit early for the easy cases.
431
0
  if (__bits == 0) {
432
0
    const int32_t _Total_zero_length = 1 // leading zero
433
0
      + static_cast<int32_t>(__precision != 0) // possible decimal point
434
0
      + static_cast<int32_t>(__precision) // zeroes after decimal point
435
0
      + 4; // "e+00"
436
0
    if (_Last - _First < _Total_zero_length) {
437
0
      return { _Last, errc::value_too_large };
438
0
    }
439
0
    *_First++ = '0';
440
0
    if (__precision > 0) {
441
0
      *_First++ = '.';
442
0
      _VSTD::memset(_First, '0', __precision);
443
0
      _First += __precision;
444
0
    }
445
0
    _VSTD::memcpy(_First, "e+00", 4);
446
0
    _First += 4;
447
0
    return { _First, errc{} };
448
0
  }
449
450
  // Decode __bits into mantissa and exponent.
451
0
  const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
452
0
  const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
453
454
0
  int32_t __e2;
455
0
  uint64_t __m2;
456
0
  if (__ieeeExponent == 0) {
457
0
    __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
458
0
    __m2 = __ieeeMantissa;
459
0
  } else {
460
0
    __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
461
0
    __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
462
0
  }
463
464
0
  const bool __printDecimalPoint = __precision > 0;
465
0
  ++__precision;
466
0
  uint32_t __digits = 0;
467
0
  uint32_t __printedDigits = 0;
468
0
  uint32_t __availableDigits = 0;
469
0
  int32_t __exp = 0;
470
0
  if (__e2 >= -52) {
471
0
    const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
472
0
    const uint32_t __p10bits = __pow10BitsForIndex(__idx);
473
0
    const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
474
0
    for (int32_t __i = __len - 1; __i >= 0; --__i) {
475
0
      const uint32_t __j = __p10bits - __e2;
476
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
477
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
478
0
      __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
479
0
        static_cast<int32_t>(__j + 8));
480
0
      if (__printedDigits != 0) {
481
0
        if (__printedDigits + 9 > __precision) {
482
0
          __availableDigits = 9;
483
0
          break;
484
0
        }
485
0
        if (_Last - _First < 9) {
486
0
          return { _Last, errc::value_too_large };
487
0
        }
488
0
        __append_nine_digits(__digits, _First);
489
0
        _First += 9;
490
0
        __printedDigits += 9;
491
0
      } else if (__digits != 0) {
492
0
        __availableDigits = __decimalLength9(__digits);
493
0
        __exp = __i * 9 + static_cast<int32_t>(__availableDigits) - 1;
494
0
        if (__availableDigits > __precision) {
495
0
          break;
496
0
        }
497
0
        if (__printDecimalPoint) {
498
0
          if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
499
0
            return { _Last, errc::value_too_large };
500
0
          }
501
0
          __append_d_digits(__availableDigits, __digits, _First);
502
0
          _First += __availableDigits + 1; // +1 for decimal point
503
0
        } else {
504
0
          if (_First == _Last) {
505
0
            return { _Last, errc::value_too_large };
506
0
          }
507
0
          *_First++ = static_cast<char>('0' + __digits);
508
0
        }
509
0
        __printedDigits = __availableDigits;
510
0
        __availableDigits = 0;
511
0
      }
512
0
    }
513
0
  }
514
515
0
  if (__e2 < 0 && __availableDigits == 0) {
516
0
    const int32_t __idx = -__e2 / 16;
517
0
    for (int32_t __i = __MIN_BLOCK_2[__idx]; __i < 200; ++__i) {
518
0
      const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
519
0
      const uint32_t __p = __POW10_OFFSET_2[__idx] + static_cast<uint32_t>(__i) - __MIN_BLOCK_2[__idx];
520
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
521
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
522
0
      __digits = (__p >= __POW10_OFFSET_2[__idx + 1]) ? 0 : __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
523
0
      if (__printedDigits != 0) {
524
0
        if (__printedDigits + 9 > __precision) {
525
0
          __availableDigits = 9;
526
0
          break;
527
0
        }
528
0
        if (_Last - _First < 9) {
529
0
          return { _Last, errc::value_too_large };
530
0
        }
531
0
        __append_nine_digits(__digits, _First);
532
0
        _First += 9;
533
0
        __printedDigits += 9;
534
0
      } else if (__digits != 0) {
535
0
        __availableDigits = __decimalLength9(__digits);
536
0
        __exp = -(__i + 1) * 9 + static_cast<int32_t>(__availableDigits) - 1;
537
0
        if (__availableDigits > __precision) {
538
0
          break;
539
0
        }
540
0
        if (__printDecimalPoint) {
541
0
          if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
542
0
            return { _Last, errc::value_too_large };
543
0
          }
544
0
          __append_d_digits(__availableDigits, __digits, _First);
545
0
          _First += __availableDigits + 1; // +1 for decimal point
546
0
        } else {
547
0
          if (_First == _Last) {
548
0
            return { _Last, errc::value_too_large };
549
0
          }
550
0
          *_First++ = static_cast<char>('0' + __digits);
551
0
        }
552
0
        __printedDigits = __availableDigits;
553
0
        __availableDigits = 0;
554
0
      }
555
0
    }
556
0
  }
557
558
0
  const uint32_t __maximum = __precision - __printedDigits;
559
0
  if (__availableDigits == 0) {
560
0
    __digits = 0;
561
0
  }
562
0
  uint32_t __lastDigit = 0;
563
0
  if (__availableDigits > __maximum) {
564
0
    for (uint32_t __k = 0; __k < __availableDigits - __maximum; ++__k) {
565
0
      __lastDigit = __digits % 10;
566
0
      __digits /= 10;
567
0
    }
568
0
  }
569
  // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
570
0
  int __roundUp = 0;
571
0
  if (__lastDigit != 5) {
572
0
    __roundUp = __lastDigit > 5;
573
0
  } else {
574
    // Is m * 2^__e2 * 10^(__precision + 1 - __exp) integer?
575
    // __precision was already increased by 1, so we don't need to write + 1 here.
576
0
    const int32_t __rexp = static_cast<int32_t>(__precision) - __exp;
577
0
    const int32_t __requiredTwos = -__e2 - __rexp;
578
0
    bool __trailingZeros = __requiredTwos <= 0
579
0
      || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
580
0
    if (__rexp < 0) {
581
0
      const int32_t __requiredFives = -__rexp;
582
0
      __trailingZeros = __trailingZeros && __multipleOfPowerOf5(__m2, static_cast<uint32_t>(__requiredFives));
583
0
    }
584
0
    __roundUp = __trailingZeros ? 2 : 1;
585
0
  }
586
0
  if (__printedDigits != 0) {
587
0
    if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
588
0
      return { _Last, errc::value_too_large };
589
0
    }
590
0
    if (__digits == 0) {
591
0
      _VSTD::memset(_First, '0', __maximum);
592
0
    } else {
593
0
      __append_c_digits(__maximum, __digits, _First);
594
0
    }
595
0
    _First += __maximum;
596
0
  } else {
597
0
    if (__printDecimalPoint) {
598
0
      if (_Last - _First < static_cast<ptrdiff_t>(__maximum + 1)) {
599
0
        return { _Last, errc::value_too_large };
600
0
      }
601
0
      __append_d_digits(__maximum, __digits, _First);
602
0
      _First += __maximum + 1; // +1 for decimal point
603
0
    } else {
604
0
      if (_First == _Last) {
605
0
        return { _Last, errc::value_too_large };
606
0
      }
607
0
      *_First++ = static_cast<char>('0' + __digits);
608
0
    }
609
0
  }
610
0
  if (__roundUp != 0) {
611
0
    char* _Round = _First;
612
0
    while (true) {
613
0
      if (_Round == _Original_first) {
614
0
        _Round[0] = '1';
615
0
        ++__exp;
616
0
        break;
617
0
      }
618
0
      --_Round;
619
0
      const char __c = _Round[0];
620
0
      if (__c == '.') {
621
        // Keep going.
622
0
      } else if (__c == '9') {
623
0
        _Round[0] = '0';
624
0
        __roundUp = 1;
625
0
      } else {
626
0
        if (__roundUp == 1 || __c % 2 != 0) {
627
0
          _Round[0] = __c + 1;
628
0
        }
629
0
        break;
630
0
      }
631
0
    }
632
0
  }
633
634
0
  char _Sign_character;
635
636
0
  if (__exp < 0) {
637
0
    _Sign_character = '-';
638
0
    __exp = -__exp;
639
0
  } else {
640
0
    _Sign_character = '+';
641
0
  }
642
643
0
  const int _Exponent_part_length = __exp >= 100
644
0
    ? 5 // "e+NNN"
645
0
    : 4; // "e+NN"
646
647
0
  if (_Last - _First < _Exponent_part_length) {
648
0
    return { _Last, errc::value_too_large };
649
0
  }
650
651
0
  *_First++ = 'e';
652
0
  *_First++ = _Sign_character;
653
654
0
  if (__exp >= 100) {
655
0
    const int32_t __c = __exp % 10;
656
0
    _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * (__exp / 10), 2);
657
0
    _First[2] = static_cast<char>('0' + __c);
658
0
    _First += 3;
659
0
  } else {
660
0
    _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * __exp, 2);
661
0
    _First += 2;
662
0
  }
663
664
0
  return { _First, errc{} };
665
0
}
666
667
_LIBCPP_END_NAMESPACE_STD
668
669
// clang-format on