pinentry-dmenu

a pinentry program based on dmenu
git clone anongit@rnpnr.xyz:pinentry-dmenu.git
Log | Files | Refs | Feed | README | LICENSE

secmem.c (9339B)


      1 /* secmem.c  -	memory allocation from a secure heap
      2  *	Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
      3  *      Copyright (C) 2015 g10 Code GmbH
      4  *
      5  * This file is part of GnuPG.
      6  *
      7  * GnuPG is free software; you can redistribute it and/or modify
      8  * it under the terms of the GNU General Public License as published by
      9  * the Free Software Foundation; either version 2 of the License, or
     10  * (at your option) any later version.
     11  *
     12  * GnuPG is distributed in the hope that it will be useful,
     13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15  * GNU General Public License for more details.
     16  *
     17  * You should have received a copy of the GNU General Public License
     18  * along with this program; if not, write to the Free Software
     19  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
     20  */
     21 
     22 #include <stdio.h>
     23 #include <stdlib.h>
     24 #include <errno.h>
     25 #include <stdarg.h>
     26 #include <unistd.h>
     27 #if defined(HAVE_MLOCK) || defined(HAVE_MMAP)
     28 # include <sys/mman.h>
     29 # include <sys/types.h>
     30 # include <fcntl.h>
     31 # ifdef USE_CAPABILITIES
     32 #  include <sys/capability.h>
     33 # endif
     34 #endif
     35 #include <string.h>
     36 
     37 #include "memory.h"
     38 
     39 #ifdef ORIGINAL_GPG_VERSION
     40 #include "types.h"
     41 #include "util.h"
     42 #else /* ORIGINAL_GPG_VERSION */
     43 
     44 #include "util.h"
     45 
     46 typedef union {
     47     int a;
     48     short b;
     49     char c[1];
     50     long d;
     51 #ifdef HAVE_U64_TYPEDEF
     52     u64 e;
     53 #endif
     54     float f;
     55     double g;
     56 } PROPERLY_ALIGNED_TYPE;
     57 
     58 #define log_error log_info
     59 #define log_bug log_fatal
     60 
     61 void 
     62 log_info(char *template, ...)
     63 {
     64   va_list args;
     65   
     66   va_start(args, template);
     67   vfprintf(stderr, template, args);
     68   va_end(args);
     69 }
     70 
     71 void 
     72 log_fatal(char *template, ...)
     73 {
     74   va_list args;
     75   
     76   va_start(args, template);
     77   vfprintf(stderr, template, args);
     78   va_end(args);
     79   exit(EXIT_FAILURE);
     80 }
     81 
     82 #endif /* ORIGINAL_GPG_VERSION */
     83 
     84 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
     85 #  define MAP_ANONYMOUS MAP_ANON
     86 #endif
     87 
     88 #define DEFAULT_POOLSIZE 16384
     89 
     90 typedef struct memblock_struct MEMBLOCK;
     91 struct memblock_struct {
     92     unsigned size;
     93     union {
     94 	MEMBLOCK *next;
     95 	PROPERLY_ALIGNED_TYPE aligned;
     96     } u;
     97 };
     98 
     99 
    100 
    101 static void  *pool;
    102 static volatile int pool_okay; /* may be checked in an atexit function */
    103 static int   pool_is_mmapped;
    104 static size_t poolsize; /* allocated length */
    105 static size_t poollen;	/* used length */
    106 static MEMBLOCK *unused_blocks;
    107 static unsigned max_alloced;
    108 static unsigned cur_alloced;
    109 static unsigned max_blocks;
    110 static unsigned cur_blocks;
    111 static int disable_secmem;
    112 static int show_warning;
    113 static int no_warning;
    114 static int suspend_warning;
    115 
    116 
    117 static void
    118 print_warn(void)
    119 {
    120     if( !no_warning )
    121 	log_info("Warning: using insecure memory!\n");
    122 }
    123 
    124 
    125 static void
    126 lock_pool( void *p, size_t n )
    127 {
    128 #if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK)
    129     int err;
    130 
    131     cap_set_proc( cap_from_text("cap_ipc_lock+ep") );
    132     err = mlock( p, n );
    133     if( err && errno )
    134 	err = errno;
    135     cap_set_proc( cap_from_text("cap_ipc_lock+p") );
    136 
    137     if( err ) {
    138 	if( errno != EPERM
    139 	  #ifdef EAGAIN  /* OpenBSD returns this */
    140 	    && errno != EAGAIN
    141 	  #endif
    142 	  )
    143 	    log_error("can't lock memory: %s\n", strerror(err));
    144 	show_warning = 1;
    145     }
    146 
    147 #elif defined(HAVE_MLOCK)
    148     uid_t uid;
    149     int err;
    150 
    151     uid = getuid();
    152 
    153 #ifdef HAVE_BROKEN_MLOCK
    154     if( uid ) {
    155 	errno = EPERM;
    156 	err = errno;
    157     }
    158     else {
    159 	err = mlock( p, n );
    160 	if( err && errno )
    161 	    err = errno;
    162     }
    163 #else
    164     err = mlock( p, n );
    165     if( err && errno )
    166 	err = errno;
    167 #endif
    168 
    169     if( uid && !geteuid() ) {
    170 	if( setuid( uid ) || getuid() != geteuid()  )
    171 	    log_fatal("failed to reset uid: %s\n", strerror(errno));
    172     }
    173 
    174     if( err ) {
    175 	if( errno != EPERM
    176 #ifdef EAGAIN  /* OpenBSD returns this */
    177 	    && errno != EAGAIN
    178 #endif
    179 	  )
    180 	    log_error("can't lock memory: %s\n", strerror(err));
    181 	show_warning = 1;
    182     }
    183 
    184 #else
    185     log_info("Please note that you don't have secure memory on this system\n");
    186 #endif
    187 }
    188 
    189 
    190 static void
    191 init_pool( size_t n)
    192 {
    193     size_t pgsize;
    194 
    195     poolsize = n;
    196 
    197     if( disable_secmem )
    198 	log_bug("secure memory is disabled");
    199 
    200 #ifdef HAVE_GETPAGESIZE
    201     pgsize = getpagesize();
    202 #else
    203     pgsize = 4096;
    204 #endif
    205 
    206 #if HAVE_MMAP
    207     poolsize = (poolsize + pgsize -1 ) & ~(pgsize-1);
    208 # ifdef MAP_ANONYMOUS
    209        pool = mmap( 0, poolsize, PROT_READ|PROT_WRITE,
    210 				 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
    211 # else /* map /dev/zero instead */
    212     {	int fd;
    213 
    214 	fd = open("/dev/zero", O_RDWR);
    215 	if( fd == -1 ) {
    216 	    log_error("can't open /dev/zero: %s\n", strerror(errno) );
    217 	    pool = (void*)-1;
    218 	}
    219 	else {
    220 	    pool = mmap( 0, poolsize, PROT_READ|PROT_WRITE,
    221 				      MAP_PRIVATE, fd, 0);
    222 	    close (fd);
    223 	}
    224     }
    225 # endif
    226     if( pool == (void*)-1 )
    227 	log_info("can't mmap pool of %u bytes: %s - using malloc\n",
    228 			    (unsigned)poolsize, strerror(errno));
    229     else {
    230 	pool_is_mmapped = 1;
    231 	pool_okay = 1;
    232     }
    233 
    234 #endif
    235     if( !pool_okay ) {
    236 	pool = malloc( poolsize );
    237 	if( !pool )
    238 	    log_fatal("can't allocate memory pool of %u bytes\n",
    239 						       (unsigned)poolsize);
    240 	else
    241 	    pool_okay = 1;
    242     }
    243     lock_pool( pool, poolsize );
    244     poollen = 0;
    245 }
    246 
    247 
    248 /* concatenate unused blocks */
    249 static void
    250 compress_pool(void)
    251 {
    252     /* fixme: we really should do this */
    253 }
    254 
    255 void
    256 secmem_set_flags( unsigned flags )
    257 {
    258     int was_susp = suspend_warning;
    259 
    260     no_warning = flags & 1;
    261     suspend_warning = flags & 2;
    262 
    263     /* and now issue the warning if it is not longer suspended */
    264     if( was_susp && !suspend_warning && show_warning ) {
    265 	show_warning = 0;
    266 	print_warn();
    267     }
    268 }
    269 
    270 unsigned
    271 secmem_get_flags(void)
    272 {
    273     unsigned flags;
    274 
    275     flags  = no_warning      ? 1:0;
    276     flags |= suspend_warning ? 2:0;
    277     return flags;
    278 }
    279 
    280 void
    281 secmem_init( size_t n )
    282 {
    283     if( !n ) {
    284 #ifdef USE_CAPABILITIES
    285 	/* drop all capabilities */
    286 	cap_set_proc( cap_from_text("all-eip") );
    287 
    288 #elif !defined(HAVE_DOSISH_SYSTEM)
    289 	uid_t uid;
    290 
    291 	disable_secmem=1;
    292 	uid = getuid();
    293 	if( uid != geteuid() ) {
    294 	    if( setuid( uid ) || getuid() != geteuid() )
    295 		log_fatal("failed to drop setuid\n" );
    296 	}
    297 #endif
    298     }
    299     else {
    300 	if( n < DEFAULT_POOLSIZE )
    301 	    n = DEFAULT_POOLSIZE;
    302 	if( !pool_okay )
    303 	    init_pool(n);
    304 	else
    305 	    log_error("Oops, secure memory pool already initialized\n");
    306     }
    307 }
    308 
    309 
    310 void *
    311 secmem_malloc( size_t size )
    312 {
    313     MEMBLOCK *mb, *mb2;
    314     int compressed=0;
    315 
    316     if( !pool_okay ) {
    317 	log_info(
    318 	"operation is not possible without initialized secure memory\n");
    319 	log_info("(you may have used the wrong program for this task)\n");
    320 	exit(2);
    321     }
    322     if( show_warning && !suspend_warning ) {
    323 	show_warning = 0;
    324 	print_warn();
    325     }
    326 
    327     /* blocks are always a multiple of 32 */
    328     size += sizeof(MEMBLOCK);
    329     size = ((size + 31) / 32) * 32;
    330 
    331   retry:
    332     /* try to get it from the used blocks */
    333     for(mb = unused_blocks,mb2=NULL; mb; mb2=mb, mb = mb->u.next )
    334 	if( mb->size >= size ) {
    335 	    if( mb2 )
    336 		mb2->u.next = mb->u.next;
    337 	    else
    338 		unused_blocks = mb->u.next;
    339 	    goto leave;
    340 	}
    341     /* allocate a new block */
    342     if( (poollen + size <= poolsize) ) {
    343 	mb = (void*)((char*)pool + poollen);
    344 	poollen += size;
    345 	mb->size = size;
    346     }
    347     else if( !compressed ) {
    348 	compressed=1;
    349 	compress_pool();
    350 	goto retry;
    351     }
    352     else
    353 	return NULL;
    354 
    355   leave:
    356     cur_alloced += mb->size;
    357     cur_blocks++;
    358     if( cur_alloced > max_alloced )
    359 	max_alloced = cur_alloced;
    360     if( cur_blocks > max_blocks )
    361 	max_blocks = cur_blocks;
    362 
    363     memset (&mb->u.aligned.c, 0,
    364 	    size - (size_t) &((struct memblock_struct *) 0)->u.aligned.c);
    365 
    366     return &mb->u.aligned.c;
    367 }
    368 
    369 
    370 void *
    371 secmem_realloc( void *p, size_t newsize )
    372 {
    373     MEMBLOCK *mb;
    374     size_t size;
    375     void *a;
    376 
    377     if (! p)
    378       return secmem_malloc(newsize);
    379 
    380     mb = (MEMBLOCK*)((char*)p - ((size_t) &((MEMBLOCK*)0)->u.aligned.c));
    381     size = mb->size;
    382     if( newsize < size )
    383 	return p; /* it is easier not to shrink the memory */
    384     a = secmem_malloc( newsize );
    385     memcpy(a, p, size);
    386     memset((char*)a+size, 0, newsize-size);
    387     secmem_free(p);
    388     return a;
    389 }
    390 
    391 
    392 void
    393 secmem_free( void *a )
    394 {
    395     MEMBLOCK *mb;
    396     size_t size;
    397 
    398     if( !a )
    399 	return;
    400 
    401     mb = (MEMBLOCK*)((char*)a - ((size_t) &((MEMBLOCK*)0)->u.aligned.c));
    402     size = mb->size;
    403     /* This does not make much sense: probably this memory is held in the
    404      * cache. We do it anyway: */
    405     wipememory2(mb, 0xff, size );
    406     wipememory2(mb, 0xaa, size );
    407     wipememory2(mb, 0x55, size );
    408     wipememory2(mb, 0x00, size );
    409     mb->size = size;
    410     mb->u.next = unused_blocks;
    411     unused_blocks = mb;
    412     cur_blocks--;
    413     cur_alloced -= size;
    414 }
    415 
    416 int
    417 m_is_secure( const void *p )
    418 {
    419     return p >= pool && p < (void*)((char*)pool+poolsize);
    420 }
    421 
    422 void
    423 secmem_term()
    424 {
    425     if( !pool_okay )
    426 	return;
    427 
    428     wipememory2( pool, 0xff, poolsize);
    429     wipememory2( pool, 0xaa, poolsize);
    430     wipememory2( pool, 0x55, poolsize);
    431     wipememory2( pool, 0x00, poolsize);
    432 #if HAVE_MMAP
    433     if( pool_is_mmapped )
    434 	munmap( pool, poolsize );
    435 #endif
    436     pool = NULL;
    437     pool_okay = 0;
    438     poolsize=0;
    439     poollen=0;
    440     unused_blocks=NULL;
    441 }
    442 
    443 
    444 void
    445 secmem_dump_stats()
    446 {
    447     if( disable_secmem )
    448 	return;
    449     fprintf(stderr,
    450 		"secmem usage: %u/%u bytes in %u/%u blocks of pool %lu/%lu\n",
    451 		cur_alloced, max_alloced, cur_blocks, max_blocks,
    452 		(ulong)poollen, (ulong)poolsize );
    453 }
    454 
    455 
    456 size_t 
    457 secmem_get_max_size (void)
    458 {
    459   return poolsize;
    460 }