Coverage Report

Created: 2022-05-17 06:19

/Users/buildslave/jenkins/workspace/coverage/llvm-project/libcxx/src/ryu/d2fixed.cpp
Line
Count
Source (jump to first uncovered line)
1
//===----------------------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
// Copyright (c) Microsoft Corporation.
10
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11
12
// Copyright 2018 Ulf Adams
13
// Copyright (c) Microsoft Corporation. All rights reserved.
14
15
// Boost Software License - Version 1.0 - August 17th, 2003
16
17
// Permission is hereby granted, free of charge, to any person or organization
18
// obtaining a copy of the software and accompanying documentation covered by
19
// this license (the "Software") to use, reproduce, display, distribute,
20
// execute, and transmit the Software, and to prepare derivative works of the
21
// Software, and to permit third-parties to whom the Software is furnished to
22
// do so, all subject to the following:
23
24
// The copyright notices in the Software and this entire statement, including
25
// the above license grant, this restriction and the following disclaimer,
26
// must be included in all copies of the Software, in whole or in part, and
27
// all derivative works of the Software, unless such copies or derivative
28
// works are solely in the form of machine-executable object code generated by
29
// a source language processor.
30
31
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
34
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
35
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
36
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
37
// DEALINGS IN THE SOFTWARE.
38
39
// Avoid formatting to keep the changes with the original code minimal.
40
// clang-format off
41
42
#include <__assert>
43
#include <__config>
44
#include <charconv>
45
#include <cstring>
46
#include <system_error>
47
48
#include "include/ryu/common.h"
49
#include "include/ryu/d2fixed.h"
50
#include "include/ryu/d2fixed_full_table.h"
51
#include "include/ryu/d2s.h"
52
#include "include/ryu/d2s_intrinsics.h"
53
#include "include/ryu/digit_table.h"
54
55
_LIBCPP_BEGIN_NAMESPACE_STD
56
57
inline constexpr int __POW10_ADDITIONAL_BITS = 120;
58
59
#ifdef _LIBCPP_INTRINSIC128
60
// Returns the low 64 bits of the high 128 bits of the 256-bit product of a and b.
61
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __umul256_hi128_lo64(
62
0
  const uint64_t __aHi, const uint64_t __aLo, const uint64_t __bHi, const uint64_t __bLo) {
63
0
  uint64_t __b00Hi;
64
0
  const uint64_t __b00Lo = __ryu_umul128(__aLo, __bLo, &__b00Hi);
65
0
  uint64_t __b01Hi;
66
0
  const uint64_t __b01Lo = __ryu_umul128(__aLo, __bHi, &__b01Hi);
67
0
  uint64_t __b10Hi;
68
0
  const uint64_t __b10Lo = __ryu_umul128(__aHi, __bLo, &__b10Hi);
69
0
  uint64_t __b11Hi;
70
0
  const uint64_t __b11Lo = __ryu_umul128(__aHi, __bHi, &__b11Hi);
71
0
  (void) __b00Lo; // unused
72
0
  (void) __b11Hi; // unused
73
0
  const uint64_t __temp1Lo = __b10Lo + __b00Hi;
74
0
  const uint64_t __temp1Hi = __b10Hi + (__temp1Lo < __b10Lo);
75
0
  const uint64_t __temp2Lo = __b01Lo + __temp1Lo;
76
0
  const uint64_t __temp2Hi = __b01Hi + (__temp2Lo < __b01Lo);
77
0
  return __b11Lo + __temp1Hi + __temp2Hi;
78
0
}
79
80
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __uint128_mod1e9(const uint64_t __vHi, const uint64_t __vLo) {
81
  // After multiplying, we're going to shift right by 29, then truncate to uint32_t.
82
  // This means that we need only 29 + 32 = 61 bits, so we can truncate to uint64_t before shifting.
83
0
  const uint64_t __multiplied = __umul256_hi128_lo64(__vHi, __vLo, 0x89705F4136B4A597u, 0x31680A88F8953031u);
84
85
  // For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
86
0
  const uint32_t __shifted = static_cast<uint32_t>(__multiplied >> 29);
87
88
0
  return static_cast<uint32_t>(__vLo) - 1000000000 * __shifted;
89
0
}
90
#endif // ^^^ intrinsics available ^^^
91
92
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __mulShift_mod1e9(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {
93
0
  uint64_t __high0;                                               // 64
94
0
  const uint64_t __low0 = __ryu_umul128(__m, __mul[0], &__high0); // 0
95
0
  uint64_t __high1;                                               // 128
96
0
  const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
97
0
  uint64_t __high2;                                               // 192
98
0
  const uint64_t __low2 = __ryu_umul128(__m, __mul[2], &__high2); // 128
99
0
  const uint64_t __s0low = __low0;                  // 0
100
0
  (void) __s0low; // unused
101
0
  const uint64_t __s0high = __low1 + __high0;       // 64
102
0
  const uint32_t __c1 = __s0high < __low1;
103
0
  const uint64_t __s1low = __low2 + __high1 + __c1; // 128
104
0
  const uint32_t __c2 = __s1low < __low2; // __high1 + __c1 can't overflow, so compare against __low2
105
0
  const uint64_t __s1high = __high2 + __c2;         // 192
106
0
  _LIBCPP_ASSERT(__j >= 128, "");
107
0
  _LIBCPP_ASSERT(__j <= 180, "");
108
0
#ifdef _LIBCPP_INTRINSIC128
109
0
  const uint32_t __dist = static_cast<uint32_t>(__j - 128); // __dist: [0, 52]
110
0
  const uint64_t __shiftedhigh = __s1high >> __dist;
111
0
  const uint64_t __shiftedlow = __ryu_shiftright128(__s1low, __s1high, __dist);
112
0
  return __uint128_mod1e9(__shiftedhigh, __shiftedlow);
113
#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
114
  if (__j < 160) { // __j: [128, 160)
115
    const uint64_t __r0 = __mod1e9(__s1high);
116
    const uint64_t __r1 = __mod1e9((__r0 << 32) | (__s1low >> 32));
117
    const uint64_t __r2 = ((__r1 << 32) | (__s1low & 0xffffffff));
118
    return __mod1e9(__r2 >> (__j - 128));
119
  } else { // __j: [160, 192)
120
    const uint64_t __r0 = __mod1e9(__s1high);
121
    const uint64_t __r1 = ((__r0 << 32) | (__s1low >> 32));
122
    return __mod1e9(__r1 >> (__j - 160));
123
  }
124
#endif // ^^^ intrinsics unavailable ^^^
125
0
}
126
127
0
void __append_n_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
128
0
  uint32_t __i = 0;
129
0
  while (__digits >= 10000) {
130
0
#ifdef __clang__ // TRANSITION, LLVM-38217
131
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
132
#else
133
    const uint32_t __c = __digits % 10000;
134
#endif
135
0
    __digits /= 10000;
136
0
    const uint32_t __c0 = (__c % 100) << 1;
137
0
    const uint32_t __c1 = (__c / 100) << 1;
138
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c0, 2);
139
0
    _VSTD::memcpy(__result + __olength - __i - 4, __DIGIT_TABLE + __c1, 2);
140
0
    __i += 4;
141
0
  }
142
0
  if (__digits >= 100) {
143
0
    const uint32_t __c = (__digits % 100) << 1;
144
0
    __digits /= 100;
145
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
146
0
    __i += 2;
147
0
  }
148
0
  if (__digits >= 10) {
149
0
    const uint32_t __c = __digits << 1;
150
0
    _VSTD::memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
151
0
  } else {
152
0
    __result[0] = static_cast<char>('0' + __digits);
153
0
  }
154
0
}
155
156
0
_LIBCPP_HIDE_FROM_ABI inline void __append_d_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
157
0
  uint32_t __i = 0;
158
0
  while (__digits >= 10000) {
159
0
#ifdef __clang__ // TRANSITION, LLVM-38217
160
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
161
#else
162
    const uint32_t __c = __digits % 10000;
163
#endif
164
0
    __digits /= 10000;
165
0
    const uint32_t __c0 = (__c % 100) << 1;
166
0
    const uint32_t __c1 = (__c / 100) << 1;
167
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c0, 2);
168
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 4, __DIGIT_TABLE + __c1, 2);
169
0
    __i += 4;
170
0
  }
171
0
  if (__digits >= 100) {
172
0
    const uint32_t __c = (__digits % 100) << 1;
173
0
    __digits /= 100;
174
0
    _VSTD::memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c, 2);
175
0
    __i += 2;
176
0
  }
177
0
  if (__digits >= 10) {
178
0
    const uint32_t __c = __digits << 1;
179
0
    __result[2] = __DIGIT_TABLE[__c + 1];
180
0
    __result[1] = '.';
181
0
    __result[0] = __DIGIT_TABLE[__c];
182
0
  } else {
183
0
    __result[1] = '.';
184
0
    __result[0] = static_cast<char>('0' + __digits);
185
0
  }
186
0
}
187
188
0
_LIBCPP_HIDE_FROM_ABI inline void __append_c_digits(const uint32_t __count, uint32_t __digits, char* const __result) {
189
0
  uint32_t __i = 0;
190
0
  for (; __i < __count - 1; __i += 2) {
191
0
    const uint32_t __c = (__digits % 100) << 1;
192
0
    __digits /= 100;
193
0
    _VSTD::memcpy(__result + __count - __i - 2, __DIGIT_TABLE + __c, 2);
194
0
  }
195
0
  if (__i < __count) {
196
0
    const char __c = static_cast<char>('0' + (__digits % 10));
197
0
    __result[__count - __i - 1] = __c;
198
0
  }
199
0
}
200
201
0
void __append_nine_digits(uint32_t __digits, char* const __result) {
202
0
  if (__digits == 0) {
203
0
    _VSTD::memset(__result, '0', 9);
204
0
    return;
205
0
  }
206
207
0
  for (uint32_t __i = 0; __i < 5; __i += 4) {
208
0
#ifdef __clang__ // TRANSITION, LLVM-38217
209
0
    const uint32_t __c = __digits - 10000 * (__digits / 10000);
210
#else
211
    const uint32_t __c = __digits % 10000;
212
#endif
213
0
    __digits /= 10000;
214
0
    const uint32_t __c0 = (__c % 100) << 1;
215
0
    const uint32_t __c1 = (__c / 100) << 1;
216
0
    _VSTD::memcpy(__result + 7 - __i, __DIGIT_TABLE + __c0, 2);
217
0
    _VSTD::memcpy(__result + 5 - __i, __DIGIT_TABLE + __c1, 2);
218
0
  }
219
0
  __result[0] = static_cast<char>('0' + __digits);
220
0
}
221
222
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __indexForExponent(const uint32_t __e) {
223
0
  return (__e + 15) / 16;
224
0
}
225
226
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __pow10BitsForIndex(const uint32_t __idx) {
227
0
  return 16 * __idx + __POW10_ADDITIONAL_BITS;
228
0
}
229
230
0
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __lengthForIndex(const uint32_t __idx) {
231
  // +1 for ceil, +16 for mantissa, +8 to round up when dividing by 9
232
0
  return (__log10Pow2(16 * static_cast<int32_t>(__idx)) + 1 + 16 + 8) / 9;
233
0
}
234
235
[[nodiscard]] to_chars_result __d2fixed_buffered_n(char* _First, char* const _Last, const double __d,
236
0
  const uint32_t __precision) {
237
0
  char* const _Original_first = _First;
238
239
0
  const uint64_t __bits = __double_to_bits(__d);
240
241
  // Case distinction; exit early for the easy cases.
242
0
  if (__bits == 0) {
243
0
    const int32_t _Total_zero_length = 1 // leading zero
244
0
      + static_cast<int32_t>(__precision != 0) // possible decimal point
245
0
      + static_cast<int32_t>(__precision); // zeroes after decimal point
246
247
0
    if (_Last - _First < _Total_zero_length) {
248
0
      return { _Last, errc::value_too_large };
249
0
    }
250
251
0
    *_First++ = '0';
252
0
    if (__precision > 0) {
253
0
      *_First++ = '.';
254
0
      _VSTD::memset(_First, '0', __precision);
255
0
      _First += __precision;
256
0
    }
257
0
    return { _First, errc{} };
258
0
  }
259
260
  // Decode __bits into mantissa and exponent.
261
0
  const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
262
0
  const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
263
264
0
  int32_t __e2;
265
0
  uint64_t __m2;
266
0
  if (__ieeeExponent == 0) {
267
0
    __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
268
0
    __m2 = __ieeeMantissa;
269
0
  } else {
270
0
    __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
271
0
    __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
272
0
  }
273
274
0
  bool __nonzero = false;
275
0
  if (__e2 >= -52) {
276
0
    const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
277
0
    const uint32_t __p10bits = __pow10BitsForIndex(__idx);
278
0
    const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
279
0
    for (int32_t __i = __len - 1; __i >= 0; --__i) {
280
0
      const uint32_t __j = __p10bits - __e2;
281
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
282
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
283
0
      const uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
284
0
        static_cast<int32_t>(__j + 8));
285
0
      if (__nonzero) {
286
0
        if (_Last - _First < 9) {
287
0
          return { _Last, errc::value_too_large };
288
0
        }
289
0
        __append_nine_digits(__digits, _First);
290
0
        _First += 9;
291
0
      } else if (__digits != 0) {
292
0
        const uint32_t __olength = __decimalLength9(__digits);
293
0
        if (_Last - _First < static_cast<ptrdiff_t>(__olength)) {
294
0
          return { _Last, errc::value_too_large };
295
0
        }
296
0
        __append_n_digits(__olength, __digits, _First);
297
0
        _First += __olength;
298
0
        __nonzero = true;
299
0
      }
300
0
    }
301
0
  }
302
0
  if (!__nonzero) {
303
0
    if (_First == _Last) {
304
0
      return { _Last, errc::value_too_large };
305
0
    }
306
0
    *_First++ = '0';
307
0
  }
308
0
  if (__precision > 0) {
309
0
    if (_First == _Last) {
310
0
      return { _Last, errc::value_too_large };
311
0
    }
312
0
    *_First++ = '.';
313
0
  }
314
0
  if (__e2 < 0) {
315
0
    const int32_t __idx = -__e2 / 16;
316
0
    const uint32_t __blocks = __precision / 9 + 1;
317
    // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
318
0
    int __roundUp = 0;
319
0
    uint32_t __i = 0;
320
0
    if (__blocks <= __MIN_BLOCK_2[__idx]) {
321
0
      __i = __blocks;
322
0
      if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
323
0
        return { _Last, errc::value_too_large };
324
0
      }
325
0
      _VSTD::memset(_First, '0', __precision);
326
0
      _First += __precision;
327
0
    } else if (__i < __MIN_BLOCK_2[__idx]) {
328
0
      __i = __MIN_BLOCK_2[__idx];
329
0
      if (_Last - _First < static_cast<ptrdiff_t>(9 * __i)) {
330
0
        return { _Last, errc::value_too_large };
331
0
      }
332
0
      _VSTD::memset(_First, '0', 9 * __i);
333
0
      _First += 9 * __i;
334
0
    }
335
0
    for (; __i < __blocks; ++__i) {
336
0
      const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
337
0
      const uint32_t __p = __POW10_OFFSET_2[__idx] + __i - __MIN_BLOCK_2[__idx];
338
0
      if (__p >= __POW10_OFFSET_2[__idx + 1]) {
339
        // If the remaining digits are all 0, then we might as well use memset.
340
        // No rounding required in this case.
341
0
        const uint32_t __fill = __precision - 9 * __i;
342
0
        if (_Last - _First < static_cast<ptrdiff_t>(__fill)) {
343
0
          return { _Last, errc::value_too_large };
344
0
        }
345
0
        _VSTD::memset(_First, '0', __fill);
346
0
        _First += __fill;
347
0
        break;
348
0
      }
349
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
350
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
351
0
      uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
352
0
      if (__i < __blocks - 1) {
353
0
        if (_Last - _First < 9) {
354
0
          return { _Last, errc::value_too_large };
355
0
        }
356
0
        __append_nine_digits(__digits, _First);
357
0
        _First += 9;
358
0
      } else {
359
0
        const uint32_t __maximum = __precision - 9 * __i;
360
0
        uint32_t __lastDigit = 0;
361
0
        for (uint32_t __k = 0; __k < 9 - __maximum; ++__k) {
362
0
          __lastDigit = __digits % 10;
363
0
          __digits /= 10;
364
0
        }
365
0
        if (__lastDigit != 5) {
366
0
          __roundUp = __lastDigit > 5;
367
0
        } else {
368
          // Is m * 10^(additionalDigits + 1) / 2^(-__e2) integer?
369
0
          const int32_t __requiredTwos = -__e2 - static_cast<int32_t>(__precision) - 1;
370
0
          const bool __trailingZeros = __requiredTwos <= 0
371
0
            || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
372
0
          __roundUp = __trailingZeros ? 2 : 1;
373
0
        }
374
0
        if (__maximum > 0) {
375
0
          if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
376
0
            return { _Last, errc::value_too_large };
377
0
          }
378
0
          __append_c_digits(__maximum, __digits, _First);
379
0
          _First += __maximum;
380
0
        }
381
0
        break;
382
0
      }
383
0
    }
384
0
    if (__roundUp != 0) {
385
0
      char* _Round = _First;
386
0
      char* _Dot = _Last;
387
0
      while (true) {
388
0
        if (_Round == _Original_first) {
389
0
          _Round[0] = '1';
390
0
          if (_Dot != _Last) {
391
0
            _Dot[0] = '0';
392
0
            _Dot[1] = '.';
393
0
          }
394
0
          if (_First == _Last) {
395
0
            return { _Last, errc::value_too_large };
396
0
          }
397
0
          *_First++ = '0';
398
0
          break;
399
0
        }
400
0
        --_Round;
401
0
        const char __c = _Round[0];
402
0
        if (__c == '.') {
403
0
          _Dot = _Round;
404
0
        } else if (__c == '9') {
405
0
          _Round[0] = '0';
406
0
          __roundUp = 1;
407
0
        } else {
408
0
          if (__roundUp == 1 || __c % 2 != 0) {
409
0
            _Round[0] = __c + 1;
410
0
          }
411
0
          break;
412
0
        }
413
0
      }
414
0
    }
415
0
  } else {
416
0
    if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
417
0
      return { _Last, errc::value_too_large };
418
0
    }
419
0
    _VSTD::memset(_First, '0', __precision);
420
0
    _First += __precision;
421
0
  }
422
0
  return { _First, errc{} };
423
0
}
424
425
[[nodiscard]] to_chars_result __d2exp_buffered_n(char* _First, char* const _Last, const double __d,
426
0
  uint32_t __precision) {
427
0
  char* const _Original_first = _First;
428
429
0
  const uint64_t __bits = __double_to_bits(__d);
430
431
  // Case distinction; exit early for the easy cases.
432
0
  if (__bits == 0) {
433
0
    const int32_t _Total_zero_length = 1 // leading zero
434
0
      + static_cast<int32_t>(__precision != 0) // possible decimal point
435
0
      + static_cast<int32_t>(__precision) // zeroes after decimal point
436
0
      + 4; // "e+00"
437
0
    if (_Last - _First < _Total_zero_length) {
438
0
      return { _Last, errc::value_too_large };
439
0
    }
440
0
    *_First++ = '0';
441
0
    if (__precision > 0) {
442
0
      *_First++ = '.';
443
0
      _VSTD::memset(_First, '0', __precision);
444
0
      _First += __precision;
445
0
    }
446
0
    _VSTD::memcpy(_First, "e+00", 4);
447
0
    _First += 4;
448
0
    return { _First, errc{} };
449
0
  }
450
451
  // Decode __bits into mantissa and exponent.
452
0
  const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
453
0
  const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
454
455
0
  int32_t __e2;
456
0
  uint64_t __m2;
457
0
  if (__ieeeExponent == 0) {
458
0
    __e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
459
0
    __m2 = __ieeeMantissa;
460
0
  } else {
461
0
    __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
462
0
    __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
463
0
  }
464
465
0
  const bool __printDecimalPoint = __precision > 0;
466
0
  ++__precision;
467
0
  uint32_t __digits = 0;
468
0
  uint32_t __printedDigits = 0;
469
0
  uint32_t __availableDigits = 0;
470
0
  int32_t __exp = 0;
471
0
  if (__e2 >= -52) {
472
0
    const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
473
0
    const uint32_t __p10bits = __pow10BitsForIndex(__idx);
474
0
    const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
475
0
    for (int32_t __i = __len - 1; __i >= 0; --__i) {
476
0
      const uint32_t __j = __p10bits - __e2;
477
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
478
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
479
0
      __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
480
0
        static_cast<int32_t>(__j + 8));
481
0
      if (__printedDigits != 0) {
482
0
        if (__printedDigits + 9 > __precision) {
483
0
          __availableDigits = 9;
484
0
          break;
485
0
        }
486
0
        if (_Last - _First < 9) {
487
0
          return { _Last, errc::value_too_large };
488
0
        }
489
0
        __append_nine_digits(__digits, _First);
490
0
        _First += 9;
491
0
        __printedDigits += 9;
492
0
      } else if (__digits != 0) {
493
0
        __availableDigits = __decimalLength9(__digits);
494
0
        __exp = __i * 9 + static_cast<int32_t>(__availableDigits) - 1;
495
0
        if (__availableDigits > __precision) {
496
0
          break;
497
0
        }
498
0
        if (__printDecimalPoint) {
499
0
          if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
500
0
            return { _Last, errc::value_too_large };
501
0
          }
502
0
          __append_d_digits(__availableDigits, __digits, _First);
503
0
          _First += __availableDigits + 1; // +1 for decimal point
504
0
        } else {
505
0
          if (_First == _Last) {
506
0
            return { _Last, errc::value_too_large };
507
0
          }
508
0
          *_First++ = static_cast<char>('0' + __digits);
509
0
        }
510
0
        __printedDigits = __availableDigits;
511
0
        __availableDigits = 0;
512
0
      }
513
0
    }
514
0
  }
515
516
0
  if (__e2 < 0 && __availableDigits == 0) {
517
0
    const int32_t __idx = -__e2 / 16;
518
0
    for (int32_t __i = __MIN_BLOCK_2[__idx]; __i < 200; ++__i) {
519
0
      const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
520
0
      const uint32_t __p = __POW10_OFFSET_2[__idx] + static_cast<uint32_t>(__i) - __MIN_BLOCK_2[__idx];
521
      // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
522
      // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
523
0
      __digits = (__p >= __POW10_OFFSET_2[__idx + 1]) ? 0 : __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
524
0
      if (__printedDigits != 0) {
525
0
        if (__printedDigits + 9 > __precision) {
526
0
          __availableDigits = 9;
527
0
          break;
528
0
        }
529
0
        if (_Last - _First < 9) {
530
0
          return { _Last, errc::value_too_large };
531
0
        }
532
0
        __append_nine_digits(__digits, _First);
533
0
        _First += 9;
534
0
        __printedDigits += 9;
535
0
      } else if (__digits != 0) {
536
0
        __availableDigits = __decimalLength9(__digits);
537
0
        __exp = -(__i + 1) * 9 + static_cast<int32_t>(__availableDigits) - 1;
538
0
        if (__availableDigits > __precision) {
539
0
          break;
540
0
        }
541
0
        if (__printDecimalPoint) {
542
0
          if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
543
0
            return { _Last, errc::value_too_large };
544
0
          }
545
0
          __append_d_digits(__availableDigits, __digits, _First);
546
0
          _First += __availableDigits + 1; // +1 for decimal point
547
0
        } else {
548
0
          if (_First == _Last) {
549
0
            return { _Last, errc::value_too_large };
550
0
          }
551
0
          *_First++ = static_cast<char>('0' + __digits);
552
0
        }
553
0
        __printedDigits = __availableDigits;
554
0
        __availableDigits = 0;
555
0
      }
556
0
    }
557
0
  }
558
559
0
  const uint32_t __maximum = __precision - __printedDigits;
560
0
  if (__availableDigits == 0) {
561
0
    __digits = 0;
562
0
  }
563
0
  uint32_t __lastDigit = 0;
564
0
  if (__availableDigits > __maximum) {
565
0
    for (uint32_t __k = 0; __k < __availableDigits - __maximum; ++__k) {
566
0
      __lastDigit = __digits % 10;
567
0
      __digits /= 10;
568
0
    }
569
0
  }
570
  // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
571
0
  int __roundUp = 0;
572
0
  if (__lastDigit != 5) {
573
0
    __roundUp = __lastDigit > 5;
574
0
  } else {
575
    // Is m * 2^__e2 * 10^(__precision + 1 - __exp) integer?
576
    // __precision was already increased by 1, so we don't need to write + 1 here.
577
0
    const int32_t __rexp = static_cast<int32_t>(__precision) - __exp;
578
0
    const int32_t __requiredTwos = -__e2 - __rexp;
579
0
    bool __trailingZeros = __requiredTwos <= 0
580
0
      || (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
581
0
    if (__rexp < 0) {
582
0
      const int32_t __requiredFives = -__rexp;
583
0
      __trailingZeros = __trailingZeros && __multipleOfPowerOf5(__m2, static_cast<uint32_t>(__requiredFives));
584
0
    }
585
0
    __roundUp = __trailingZeros ? 2 : 1;
586
0
  }
587
0
  if (__printedDigits != 0) {
588
0
    if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
589
0
      return { _Last, errc::value_too_large };
590
0
    }
591
0
    if (__digits == 0) {
592
0
      _VSTD::memset(_First, '0', __maximum);
593
0
    } else {
594
0
      __append_c_digits(__maximum, __digits, _First);
595
0
    }
596
0
    _First += __maximum;
597
0
  } else {
598
0
    if (__printDecimalPoint) {
599
0
      if (_Last - _First < static_cast<ptrdiff_t>(__maximum + 1)) {
600
0
        return { _Last, errc::value_too_large };
601
0
      }
602
0
      __append_d_digits(__maximum, __digits, _First);
603
0
      _First += __maximum + 1; // +1 for decimal point
604
0
    } else {
605
0
      if (_First == _Last) {
606
0
        return { _Last, errc::value_too_large };
607
0
      }
608
0
      *_First++ = static_cast<char>('0' + __digits);
609
0
    }
610
0
  }
611
0
  if (__roundUp != 0) {
612
0
    char* _Round = _First;
613
0
    while (true) {
614
0
      if (_Round == _Original_first) {
615
0
        _Round[0] = '1';
616
0
        ++__exp;
617
0
        break;
618
0
      }
619
0
      --_Round;
620
0
      const char __c = _Round[0];
621
0
      if (__c == '.') {
622
        // Keep going.
623
0
      } else if (__c == '9') {
624
0
        _Round[0] = '0';
625
0
        __roundUp = 1;
626
0
      } else {
627
0
        if (__roundUp == 1 || __c % 2 != 0) {
628
0
          _Round[0] = __c + 1;
629
0
        }
630
0
        break;
631
0
      }
632
0
    }
633
0
  }
634
635
0
  char _Sign_character;
636
637
0
  if (__exp < 0) {
638
0
    _Sign_character = '-';
639
0
    __exp = -__exp;
640
0
  } else {
641
0
    _Sign_character = '+';
642
0
  }
643
644
0
  const int _Exponent_part_length = __exp >= 100
645
0
    ? 5 // "e+NNN"
646
0
    : 4; // "e+NN"
647
648
0
  if (_Last - _First < _Exponent_part_length) {
649
0
    return { _Last, errc::value_too_large };
650
0
  }
651
652
0
  *_First++ = 'e';
653
0
  *_First++ = _Sign_character;
654
655
0
  if (__exp >= 100) {
656
0
    const int32_t __c = __exp % 10;
657
0
    _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * (__exp / 10), 2);
658
0
    _First[2] = static_cast<char>('0' + __c);
659
0
    _First += 3;
660
0
  } else {
661
0
    _VSTD::memcpy(_First, __DIGIT_TABLE + 2 * __exp, 2);
662
0
    _First += 2;
663
0
  }
664
665
0
  return { _First, errc{} };
666
0
}
667
668
_LIBCPP_END_NAMESPACE_STD
669
670
// clang-format on