[SCM] Samba Shared Repository - branch master updated - 6ba693b5de0ecf53638468b56ce3b93d2e33c919

Jelmer Vernooij jelmer at samba.org
Sun Oct 19 12:54:30 GMT 2008


The branch, master has been updated
       via  6ba693b5de0ecf53638468b56ce3b93d2e33c919 (commit)
       via  1ba9867e7ae2f517e47edd67d7d41e48b6f55e5c (commit)
       via  0b3cf400db35018000ceb84cdc20b55bc6c77aa3 (commit)
       via  7e088ec35424fc2d6c64b8699d9d22b3215275e2 (commit)
       via  e09c0c1185b4a4ad1d28023549074875028f3e55 (commit)
       via  33032d591f8e39edae0ce4b35ca1b6e25f04a04b (commit)
      from  085e7b0b6cc882b31c11b6d6ea7834235588e1ed (commit)

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 6ba693b5de0ecf53638468b56ce3b93d2e33c919
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sun Oct 19 14:54:16 2008 +0200

    Make sure crypt libs get included.

commit 1ba9867e7ae2f517e47edd67d7d41e48b6f55e5c
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sun Oct 19 14:53:55 2008 +0200

    Fix typo.

commit 0b3cf400db35018000ceb84cdc20b55bc6c77aa3
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sun Oct 19 14:50:25 2008 +0200

    Fix formatting in selftest help output.

commit 7e088ec35424fc2d6c64b8699d9d22b3215275e2
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sun Oct 19 14:41:10 2008 +0200

    Fix comment after paths have changed.

commit e09c0c1185b4a4ad1d28023549074875028f3e55
Merge: 33032d591f8e39edae0ce4b35ca1b6e25f04a04b 085e7b0b6cc882b31c11b6d6ea7834235588e1ed
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sun Oct 19 14:27:08 2008 +0200

    Merge branch 'master' of ssh://git.samba.org/data/git/samba into crypt

commit 33032d591f8e39edae0ce4b35ca1b6e25f04a04b
Author: Jelmer Vernooij <jelmer at samba.org>
Date:   Sat Oct 18 17:49:41 2008 +0200

    Move ufc to libreplace.

-----------------------------------------------------------------------

Summary of changes:
 lib/replace/crypt.c       |  770 +++++++++++++++++++++++++++++++++++++++++++++
 lib/replace/crypt.m4      |    6 +
 lib/replace/libreplace.m4 |    1 +
 lib/replace/replace.h     |    7 +
 lib/replace/samba.m4      |    2 +-
 selftest/output/plain.pm  |    2 +-
 selftest/selftest.pl      |    8 +-
 source3/Makefile.in       |    2 +-
 source3/configure.in      |    8 +-
 source3/include/proto.h   |    4 -
 source3/lib/ufc.c         |  770 ---------------------------------------------
 source4/selftest/tests.sh |    4 +-
 12 files changed, 795 insertions(+), 789 deletions(-)
 create mode 100644 lib/replace/crypt.c
 create mode 100644 lib/replace/crypt.m4
 delete mode 100644 source3/lib/ufc.c


Changeset truncated at 500 lines:

diff --git a/lib/replace/crypt.c b/lib/replace/crypt.c
new file mode 100644
index 0000000..22341ce
--- /dev/null
+++ b/lib/replace/crypt.c
@@ -0,0 +1,770 @@
+/*
+   This bit of code was derived from the UFC-crypt package which
+   carries the following copyright 
+   
+   Modified for use by Samba by Andrew Tridgell, October 1994
+
+   Note that this routine is only faster on some machines. Under Linux 1.1.51 
+   libc 4.5.26 I actually found this routine to be slightly slower.
+
+   Under SunOS I found a huge speedup by using these routines 
+   (a factor of 20 or so)
+
+   Warning: I've had a report from Steve Kennedy <steve at gbnet.org>
+   that this crypt routine may sometimes get the wrong answer. Only
+   use UFC_CRYT if you really need it.
+
+*/
+
+#include "replace.h"
+
+#ifndef HAVE_CRYPT
+
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991-1998, Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * @(#)crypt_util.c	2.31 02/08/92
+ *
+ * Support routines
+ *
+ */
+
+
+#ifndef long32
+#define long32 int32
+#endif
+
+#ifndef long64
+#define long64 int64
+#endif
+
+#ifndef ufc_long
+#define ufc_long unsigned
+#endif
+
+#ifndef _UFC_64_
+#define _UFC_32_
+#endif
+
+/* 
+ * Permutation done once on the 56 bit 
+ *  key derived from the original 8 byte ASCII key.
+ */
+static int pc1[56] = { 
+  57, 49, 41, 33, 25, 17,  9,  1, 58, 50, 42, 34, 26, 18,
+  10,  2, 59, 51, 43, 35, 27, 19, 11,  3, 60, 52, 44, 36,
+  63, 55, 47, 39, 31, 23, 15,  7, 62, 54, 46, 38, 30, 22,
+  14,  6, 61, 53, 45, 37, 29, 21, 13,  5, 28, 20, 12,  4
+};
+
+/*
+ * How much to rotate each 28 bit half of the pc1 permutated
+ *  56 bit key before using pc2 to give the i' key
+ */
+static int rots[16] = { 
+  1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 
+};
+
+/* 
+ * Permutation giving the key 
+ * of the i' DES round 
+ */
+static int pc2[48] = { 
+  14, 17, 11, 24,  1,  5,  3, 28, 15,  6, 21, 10,
+  23, 19, 12,  4, 26,  8, 16,  7, 27, 20, 13,  2,
+  41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48,
+  44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32
+};
+
+/*
+ * The E expansion table which selects
+ * bits from the 32 bit intermediate result.
+ */
+static int esel[48] = { 
+  32,  1,  2,  3,  4,  5,  4,  5,  6,  7,  8,  9,
+   8,  9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17,
+  16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25,
+  24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32,  1
+};
+static int e_inverse[64];
+
+/* 
+ * Permutation done on the 
+ * result of sbox lookups 
+ */
+static int perm32[32] = {
+  16,  7, 20, 21, 29, 12, 28, 17,  1, 15, 23, 26,  5, 18, 31, 10,
+  2,   8, 24, 14, 32, 27,  3,  9, 19, 13, 30,  6, 22, 11,  4, 25
+};
+
+/* 
+ * The sboxes
+ */
+static int sbox[8][4][16]= {
+        { { 14,  4, 13,  1,  2, 15, 11,  8,  3, 10,  6, 12,  5,  9,  0,  7 },
+          {  0, 15,  7,  4, 14,  2, 13,  1, 10,  6, 12, 11,  9,  5,  3,  8 },
+          {  4,  1, 14,  8, 13,  6,  2, 11, 15, 12,  9,  7,  3, 10,  5,  0 },
+          { 15, 12,  8,  2,  4,  9,  1,  7,  5, 11,  3, 14, 10,  0,  6, 13 }
+        },
+
+        { { 15,  1,  8, 14,  6, 11,  3,  4,  9,  7,  2, 13, 12,  0,  5, 10 },
+          {  3, 13,  4,  7, 15,  2,  8, 14, 12,  0,  1, 10,  6,  9, 11,  5 },
+          {  0, 14,  7, 11, 10,  4, 13,  1,  5,  8, 12,  6,  9,  3,  2, 15 },
+          { 13,  8, 10,  1,  3, 15,  4,  2, 11,  6,  7, 12,  0,  5, 14,  9 }
+        },
+
+        { { 10,  0,  9, 14,  6,  3, 15,  5,  1, 13, 12,  7, 11,  4,  2,  8 },
+          { 13,  7,  0,  9,  3,  4,  6, 10,  2,  8,  5, 14, 12, 11, 15,  1 },
+          { 13,  6,  4,  9,  8, 15,  3,  0, 11,  1,  2, 12,  5, 10, 14,  7 },
+          {  1, 10, 13,  0,  6,  9,  8,  7,  4, 15, 14,  3, 11,  5,  2, 12 }
+        },
+
+        { {  7, 13, 14,  3,  0,  6,  9, 10,  1,  2,  8,  5, 11, 12,  4, 15 },
+          { 13,  8, 11,  5,  6, 15,  0,  3,  4,  7,  2, 12,  1, 10, 14,  9 },
+          { 10,  6,  9,  0, 12, 11,  7, 13, 15,  1,  3, 14,  5,  2,  8,  4 },
+          {  3, 15,  0,  6, 10,  1, 13,  8,  9,  4,  5, 11, 12,  7,  2, 14 }
+        },
+
+        { {  2, 12,  4,  1,  7, 10, 11,  6,  8,  5,  3, 15, 13,  0, 14,  9 },
+          { 14, 11,  2, 12,  4,  7, 13,  1,  5,  0, 15, 10,  3,  9,  8,  6 },
+          {  4,  2,  1, 11, 10, 13,  7,  8, 15,  9, 12,  5,  6,  3,  0, 14 },
+          { 11,  8, 12,  7,  1, 14,  2, 13,  6, 15,  0,  9, 10,  4,  5,  3 }
+        },
+
+        { { 12,  1, 10, 15,  9,  2,  6,  8,  0, 13,  3,  4, 14,  7,  5, 11 },
+          { 10, 15,  4,  2,  7, 12,  9,  5,  6,  1, 13, 14,  0, 11,  3,  8 },
+          {  9, 14, 15,  5,  2,  8, 12,  3,  7,  0,  4, 10,  1, 13, 11,  6 },
+          {  4,  3,  2, 12,  9,  5, 15, 10, 11, 14,  1,  7,  6,  0,  8, 13 }
+        },
+
+        { {  4, 11,  2, 14, 15,  0,  8, 13,  3, 12,  9,  7,  5, 10,  6,  1 },
+          { 13,  0, 11,  7,  4,  9,  1, 10, 14,  3,  5, 12,  2, 15,  8,  6 },
+          {  1,  4, 11, 13, 12,  3,  7, 14, 10, 15,  6,  8,  0,  5,  9,  2 },
+          {  6, 11, 13,  8,  1,  4, 10,  7,  9,  5,  0, 15, 14,  2,  3, 12 }
+        },
+
+        { { 13,  2,  8,  4,  6, 15, 11,  1, 10,  9,  3, 14,  5,  0, 12,  7 },
+          {  1, 15, 13,  8, 10,  3,  7,  4, 12,  5,  6, 11,  0, 14,  9,  2 },
+          {  7, 11,  4,  1,  9, 12, 14,  2,  0,  6, 10, 13, 15,  3,  5,  8 },
+          {  2,  1, 14,  7,  4, 10,  8, 13, 15, 12,  9,  0,  3,  5,  6, 11 }
+        }
+};
+
+/* 
+ * This is the final 
+ * permutation matrix
+ */
+static int final_perm[64] = {
+  40,  8, 48, 16, 56, 24, 64, 32, 39,  7, 47, 15, 55, 23, 63, 31,
+  38,  6, 46, 14, 54, 22, 62, 30, 37,  5, 45, 13, 53, 21, 61, 29,
+  36,  4, 44, 12, 52, 20, 60, 28, 35,  3, 43, 11, 51, 19, 59, 27,
+  34,  2, 42, 10, 50, 18, 58, 26, 33,  1, 41,  9, 49, 17, 57, 25
+};
+
+/* 
+ * The 16 DES keys in BITMASK format 
+ */
+#ifdef _UFC_32_
+long32 _ufc_keytab[16][2];
+#endif
+
+#ifdef _UFC_64_
+long64 _ufc_keytab[16];
+#endif
+
+
+#define ascii_to_bin(c) ((c)>='a'?(c-59):(c)>='A'?((c)-53):(c)-'.')
+#define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.')
+
+/* Macro to set a bit (0..23) */
+#define BITMASK(i) ( (1<<(11-(i)%12+3)) << ((i)<12?16:0) )
+
+/*
+ * sb arrays:
+ *
+ * Workhorses of the inner loop of the DES implementation.
+ * They do sbox lookup, shifting of this  value, 32 bit
+ * permutation and E permutation for the next round.
+ *
+ * Kept in 'BITMASK' format.
+ */
+
+#ifdef _UFC_32_
+long32 _ufc_sb0[8192], _ufc_sb1[8192], _ufc_sb2[8192], _ufc_sb3[8192];
+static long32 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; 
+#endif
+
+#ifdef _UFC_64_
+long64 _ufc_sb0[4096], _ufc_sb1[4096], _ufc_sb2[4096], _ufc_sb3[4096];
+static long64 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; 
+#endif
+
+/* 
+ * eperm32tab: do 32 bit permutation and E selection
+ *
+ * The first index is the byte number in the 32 bit value to be permuted
+ *  -  second  -   is the value of this byte
+ *  -  third   -   selects the two 32 bit values
+ *
+ * The table is used and generated internally in init_des to speed it up
+ */
+static ufc_long eperm32tab[4][256][2];
+
+/* 
+ * do_pc1: permform pc1 permutation in the key schedule generation.
+ *
+ * The first   index is the byte number in the 8 byte ASCII key
+ *  -  second    -      -    the two 28 bits halfs of the result
+ *  -  third     -   selects the 7 bits actually used of each byte
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc1[8][2][128];
+
+/*
+ * do_pc2: permform pc2 permutation in the key schedule generation.
+ *
+ * The first   index is the septet number in the two 28 bit intermediate values
+ *  -  second    -    -  -  septet values
+ *
+ * Knowledge of the structure of the pc2 permutation is used.
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc2[8][128];
+
+/*
+ * efp: undo an extra e selection and do final
+ *      permutation giving the DES result.
+ * 
+ *      Invoked 6 bit a time on two 48 bit values
+ *      giving two 32 bit longs.
+ */
+static ufc_long efp[16][64][2];
+
+static unsigned char bytemask[8]  = {
+  0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
+};
+
+static ufc_long longmask[32] = {
+  0x80000000, 0x40000000, 0x20000000, 0x10000000,
+  0x08000000, 0x04000000, 0x02000000, 0x01000000,
+  0x00800000, 0x00400000, 0x00200000, 0x00100000,
+  0x00080000, 0x00040000, 0x00020000, 0x00010000,
+  0x00008000, 0x00004000, 0x00002000, 0x00001000,
+  0x00000800, 0x00000400, 0x00000200, 0x00000100,
+  0x00000080, 0x00000040, 0x00000020, 0x00000010,
+  0x00000008, 0x00000004, 0x00000002, 0x00000001
+};
+
+
+/*
+ * Silly rewrite of 'bzero'. I do so
+ * because some machines don't have
+ * bzero and some don't have memset.
+ */
+
+static void clearmem(char *start, int cnt)
+  { while(cnt--)
+      *start++ = '\0';
+  }
+
+static int initialized = 0;
+
+/* lookup a 6 bit value in sbox */
+
+#define s_lookup(i,s) sbox[(i)][(((s)>>4) & 0x2)|((s) & 0x1)][((s)>>1) & 0xf];
+
+/*
+ * Initialize unit - may be invoked directly
+ * by fcrypt users.
+ */
+
+static void ufc_init_des(void)
+  { int comes_from_bit;
+    int bit, sg;
+    ufc_long j;
+    ufc_long mask1, mask2;
+
+    /*
+     * Create the do_pc1 table used
+     * to affect pc1 permutation
+     * when generating keys
+     */
+    for(bit = 0; bit < 56; bit++) {
+      comes_from_bit  = pc1[bit] - 1;
+      mask1 = bytemask[comes_from_bit % 8 + 1];
+      mask2 = longmask[bit % 28 + 4];
+      for(j = 0; j < 128; j++) {
+	if(j & mask1) 
+	  do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
+      }
+    }
+
+    /*
+     * Create the do_pc2 table used
+     * to affect pc2 permutation when
+     * generating keys
+     */
+    for(bit = 0; bit < 48; bit++) {
+      comes_from_bit  = pc2[bit] - 1;
+      mask1 = bytemask[comes_from_bit % 7 + 1];
+      mask2 = BITMASK(bit % 24);
+      for(j = 0; j < 128; j++) {
+	if(j & mask1)
+	  do_pc2[comes_from_bit / 7][j] |= mask2;
+      }
+    }
+
+    /* 
+     * Now generate the table used to do combined
+     * 32 bit permutation and e expansion
+     *
+     * We use it because we have to permute 16384 32 bit
+     * longs into 48 bit in order to initialize sb.
+     *
+     * Looping 48 rounds per permutation becomes 
+     * just too slow...
+     *
+     */
+
+    clearmem((char*)eperm32tab, sizeof(eperm32tab));
+
+    for(bit = 0; bit < 48; bit++) {
+      ufc_long inner_mask1,comes_from;
+	
+      comes_from = perm32[esel[bit]-1]-1;
+      inner_mask1      = bytemask[comes_from % 8];
+	
+      for(j = 256; j--;) {
+	if(j & inner_mask1)
+	  eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK(bit % 24);
+      }
+    }
+    
+    /* 
+     * Create the sb tables:
+     *
+     * For each 12 bit segment of an 48 bit intermediate
+     * result, the sb table precomputes the two 4 bit
+     * values of the sbox lookups done with the two 6
+     * bit halves, shifts them to their proper place,
+     * sends them through perm32 and finally E expands
+     * them so that they are ready for the next
+     * DES round.
+     *
+     */
+    for(sg = 0; sg < 4; sg++) {
+      int j1, j2;
+      int s1, s2;
+    
+      for(j1 = 0; j1 < 64; j1++) {
+	s1 = s_lookup(2 * sg, j1);
+	for(j2 = 0; j2 < 64; j2++) {
+	  ufc_long to_permute, inx;
+    
+	  s2         = s_lookup(2 * sg + 1, j2);
+	  to_permute = ((s1 << 4)  | s2) << (24 - 8 * sg);
+
+#ifdef _UFC_32_
+	  inx = ((j1 << 6)  | j2) << 1;
+	  sb[sg][inx  ]  = eperm32tab[0][(to_permute >> 24) & 0xff][0];
+	  sb[sg][inx+1]  = eperm32tab[0][(to_permute >> 24) & 0xff][1];
+	  sb[sg][inx  ] |= eperm32tab[1][(to_permute >> 16) & 0xff][0];
+	  sb[sg][inx+1] |= eperm32tab[1][(to_permute >> 16) & 0xff][1];
+  	  sb[sg][inx  ] |= eperm32tab[2][(to_permute >>  8) & 0xff][0];
+	  sb[sg][inx+1] |= eperm32tab[2][(to_permute >>  8) & 0xff][1];
+	  sb[sg][inx  ] |= eperm32tab[3][(to_permute)       & 0xff][0];
+	  sb[sg][inx+1] |= eperm32tab[3][(to_permute)       & 0xff][1];
+#endif
+#ifdef _UFC_64_
+	  inx = ((j1 << 6)  | j2);
+	  sb[sg][inx]  = 
+	    ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
+	     (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
+	  sb[sg][inx] |=
+	    ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
+	     (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
+  	  sb[sg][inx] |= 
+	    ((long64)eperm32tab[2][(to_permute >>  8) & 0xff][0] << 32) |
+	     (long64)eperm32tab[2][(to_permute >>  8) & 0xff][1];
+	  sb[sg][inx] |=
+	    ((long64)eperm32tab[3][(to_permute)       & 0xff][0] << 32) |
+	     (long64)eperm32tab[3][(to_permute)       & 0xff][1];
+#endif
+	}
+      }
+    }  
+
+    /* 
+     * Create an inverse matrix for esel telling
+     * where to plug out bits if undoing it
+     */
+    for(bit=48; bit--;) {
+      e_inverse[esel[bit] - 1     ] = bit;
+      e_inverse[esel[bit] - 1 + 32] = bit + 48;
+    }
+
+    /* 
+     * create efp: the matrix used to
+     * undo the E expansion and effect final permutation
+     */
+    clearmem((char*)efp, sizeof efp);
+    for(bit = 0; bit < 64; bit++) {
+      int o_bit, o_long;
+      ufc_long word_value, inner_mask1, inner_mask2;
+      int comes_from_f_bit, comes_from_e_bit;
+      int comes_from_word, bit_within_word;
+
+      /* See where bit i belongs in the two 32 bit long's */
+      o_long = bit / 32; /* 0..1  */
+      o_bit  = bit % 32; /* 0..31 */
+
+      /* 
+       * And find a bit in the e permutated value setting this bit.
+       *
+       * Note: the e selection may have selected the same bit several
+       * times. By the initialization of e_inverse, we only look
+       * for one specific instance.
+       */
+      comes_from_f_bit = final_perm[bit] - 1;         /* 0..63 */
+      comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */
+      comes_from_word  = comes_from_e_bit / 6;        /* 0..15 */
+      bit_within_word  = comes_from_e_bit % 6;        /* 0..5  */
+
+      inner_mask1 = longmask[bit_within_word + 26];
+      inner_mask2 = longmask[o_bit];
+
+      for(word_value = 64; word_value--;) {
+	if(word_value & inner_mask1)
+	  efp[comes_from_word][word_value][o_long] |= inner_mask2;
+      }
+    }
+    initialized++;
+  }
+
+/* 
+ * Process the elements of the sb table permuting the
+ * bits swapped in the expansion by the current salt.
+ */
+
+#ifdef _UFC_32_
+static void shuffle_sb(long32 *k, ufc_long saltbits)
+  { ufc_long j;
+    long32 x;
+    for(j=4096; j--;) {
+      x = (k[0] ^ k[1]) & (long32)saltbits;
+      *k++ ^= x;
+      *k++ ^= x;
+    }
+  }
+#endif
+
+#ifdef _UFC_64_
+static void shuffle_sb(long64 *k, ufc_long saltbits)
+  { ufc_long j;
+    long64 x;
+    for(j=4096; j--;) {
+      x = ((*k >> 32) ^ *k) & (long64)saltbits;
+      *k++ ^= (x << 32) | x;
+    }
+  }
+#endif
+
+/* 
+ * Setup the unit for a new salt
+ * Hopefully we'll not see a new salt in each crypt call.
+ */


-- 
Samba Shared Repository


More information about the samba-cvs mailing list