The type of “i” is uint32_t.
for (in = b; in < input_length; ++out)
{
bias = adapt(i - oldi, out + 1, oldi == 0);
if (i / (out + 1) > maxint - n) return punycode_overflow;
n += i / (out + 1);
if (n > 0x10FFFF || (n >= 0xD800 && n <= 0xDBFF)) return punycode_bad_input;
i %= (out + 1);
if (out >= max_out) return punycode_big_output;
memmove(output + i + 1, output + i, (out - i) * sizeof *output);
output[i++] = n;
}
Specifically, I’m referring to the “i++” in “output[i++] = n”
My XOJO code is
Var inCount As UInt32 = basicCount
While (inCount < realLength)
oldi = i
k = BASE
w = 1
Var op1 As UInt32 = outputCount + 1
bias = AdaptForBias(i - oldi, op1, oldi = 0)
if ((i \ op1) > (MaxUInt32 - n)) Then
Raise New OverflowException
End If
n = n + (i \ op1)
If n > &h10FFFF Or (n > &hD7FF And n < &hDC00) Then
Raise New IllegalInputException
End If
i = i Mod op1
result = result + Encodings.UTF8.Chr(n).ToText
i = i + 1
outputCount = outputCount + 1
Wend
Specifically, is “result = result + Encodings.UTF8.Chr(n).ToText” a correct translation of “output[i++]=n” given the result is equivalent to output?
If not, do I need to use a MemoryBlock and store “n” at each byte then use “chr(n)” at the end of the loop?