2012-09-28 00:59:25 -04:00
/*
Copyright ( C ) 2012 , Aaron Lindsay < aaron @ aclindsay . com >
This file is part of Aedrix .
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License along
with this program ; if not , write to the Free Software Foundation , Inc . ,
51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
*/
# include <kmalloc.h>
# include <mm.h>
# include <list.h>
# include <print.h>
struct kmalloc_region {
struct dlist_node list ;
void * end ;
} ;
# define KMALLOC_MIN_ALIGNMENT 4
# define KMALLOC_REGION_SIZE (sizeof(struct kmalloc_region) + (sizeof(struct kmalloc_region) % KMALLOC_MIN_ALIGNMENT ? KMALLOC_MIN_ALIGNMENT - sizeof(struct kmalloc_region) % KMALLOC_MIN_ALIGNMENT : 0)) //round actual struct size up so its aligned
# define KMALLOC_MAX_TRIES 3 //Max number of times the heap can be grown for a single request
2012-09-29 23:24:05 -04:00
unsigned int curr_page_power ;
2012-09-28 00:59:25 -04:00
struct dlist_node kmalloc_free_regions ;
void kmalloc_init ( ) {
init_list ( & kmalloc_free_regions ) ;
2012-09-29 23:24:05 -04:00
curr_page_power = 0 ;
2012-09-28 00:59:25 -04:00
}
struct kmalloc_region * find_free_region ( unsigned int bytes ) {
struct kmalloc_region * it ;
for_each_list ( it , & kmalloc_free_regions , struct kmalloc_region , list ) {
unsigned int size = ( char * ) it - > end - ( char * ) it + 1 ;
if ( size > = KMALLOC_REGION_SIZE + bytes ) {
return it ;
}
}
return ( struct kmalloc_region * ) 0 ;
}
struct kmalloc_region * join_regions ( struct kmalloc_region * first , struct kmalloc_region * second ) {
first - > end = second - > end ;
remove ( & second - > list ) ;
//TODO free pages here if we get a big enough free space
return first ;
}
void coalesce ( struct kmalloc_region * region ) {
struct kmalloc_region * prev = 0 , * next = 0 ;
if ( region - > list . next ! = & kmalloc_free_regions )
next = container ( region - > list . next , struct kmalloc_region , list ) ;
if ( region - > list . prev ! = & kmalloc_free_regions )
prev = container ( region - > list . prev , struct kmalloc_region , list ) ;
if ( next & & ( char * ) next = = ( char * ) region - > end + 1 ) {
region = join_regions ( region , next ) ;
coalesce ( region ) ;
}
if ( prev & & ( char * ) region = = ( char * ) prev - > end + 1 ) {
region = join_regions ( prev , region ) ;
coalesce ( region ) ;
}
}
void add_region ( struct kmalloc_region * region ) {
struct kmalloc_region * it ;
for_each_list ( it , & kmalloc_free_regions , struct kmalloc_region , list ) {
if ( region < it ) {
insert_before ( & it - > list , & region - > list ) ;
goto coalesce ;
}
}
insert_before ( & kmalloc_free_regions , & region - > list ) ;
coalesce :
coalesce ( region ) ;
}
void * _kmalloc ( unsigned int bytes , unsigned int tries ) ;
void * grow_and_retry ( unsigned int bytes , unsigned int tries ) {
struct page * p ;
if ( ( p = mm_get_free_pages ( curr_page_power ) ) ) {
struct kmalloc_region * region = ( struct kmalloc_region * ) p - > address ;
//TODO don't throw away p, but keep it in a list (allocate a little at the beginning of this chunk for a list element), so we can free pages later if we want to
region - > end = ( char * ) region + MM_PAGE_SIZE * ( 1 < < curr_page_power ) - 1 ;
add_region ( region ) ;
curr_page_power + + ;
return _kmalloc ( bytes , tries ) ;
}
return 0 ;
}
void * _kmalloc ( unsigned int bytes , unsigned int tries ) {
struct kmalloc_region * region ;
if ( tries > KMALLOC_MAX_TRIES )
return 0 ;
if ( bytes % KMALLOC_MIN_ALIGNMENT )
bytes + = KMALLOC_MIN_ALIGNMENT - ( bytes % KMALLOC_MIN_ALIGNMENT ) ;
if ( ( region = find_free_region ( bytes ) ) ) {
//if there's enough space leftover in the region after this allocation
//for another, split the region.
if ( ( unsigned int ) ( ( char * ) region - > end - ( char * ) region )
> = 2 * KMALLOC_REGION_SIZE + KMALLOC_MIN_ALIGNMENT + bytes ) {
struct kmalloc_region * leftovers ;
leftovers = ( struct kmalloc_region * ) ( ( char * ) region + KMALLOC_REGION_SIZE + bytes ) ;
leftovers - > end = region - > end ;
region - > end = ( char * ) leftovers - 1 ;
insert_after ( & region - > list , & leftovers - > list ) ;
}
remove ( & region - > list ) ;
return ( char * ) region + KMALLOC_REGION_SIZE ;
} else {
return grow_and_retry ( bytes , tries + 1 ) ;
}
}
void * kmalloc ( unsigned int bytes ) {
return _kmalloc ( bytes , 1 ) ;
}
void kfree ( void * p ) {
struct kmalloc_region * region ;
if ( ! p ) {
print ( " Error: kfree was passed a null pointer. Ignoring. This indicates a possible memory leak. \n " ) ;
return ;
}
region = ( struct kmalloc_region * ) ( ( char * ) p - KMALLOC_REGION_SIZE ) ;
add_region ( region ) ;
}