0

I want to prove that the basic multiplication algorithm is correct when applied to binary numbers. I try to follow the steps described here and here but didn't succeed.

The basic implementation algorithm is the multiply function defined below, the other functions are given by completeness (this is F# but it reads as pseudo code):

let toBitsArray (x : int) =
    let bitArray = System.Collections.BitArray(System.BitConverter.GetBytes(x))
    let bits = Array.zeroCreate<bool> bitArray.Count
    bitArray.CopyTo(bits, 0)
    bits

(* Gives an array of bits (bool: 1 or 0) in little endian representation 
   for example 5 = 00000000000000000000000000000101 (32 bits)
   the function will give this array:
   [|true; false; true; false; false; false; false; false; false; false; 
     false; false; false; false; false; false; false; false; false;
     false; false; false; false; false; false; false; false; false;
     false; false; false; false|] 
   which represents 10100000000000000000000000000000 (32 bits) *)
let toLittleEndianBitsArray (x : int) = 
    let bits = toBitsArray x
    if System.BitConverter.IsLittleEndian
    then bits
    else Array.rev bits

let multiply (x : int) (y : int) =
    let by = toLittleEndianBitsArray y
    let rec loop (x : int64) (y : bool list) =
        if List.isEmpty y 
        then 0L
        else 
            let h = List.head y        
            let t = List.tail y
            if h
            then x + (loop (x <<< 1) t)
            else loop (x <<< 1) t

    loop (int64 x) (List.ofArray by)
sabotero
  • 101
  • 2

0 Answers0