reiserfs: cleanup, reformat comments to normal kernel style

This patch reformats comments in the reiserfs code to fit in 80 columns and
to follow the style rules.

There is no functional change but it helps make my eyes bleed less.

Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: Jan Kara <jack@suse.cz>
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index c3de650..70daba6 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -50,8 +50,10 @@
 				   unsigned int *bmap_nr,
 				   unsigned int *offset)
 {
-	/* It is in the bitmap block number equal to the block
-	 * number divided by the number of bits in a block. */
+	/*
+	 * It is in the bitmap block number equal to the block
+	 * number divided by the number of bits in a block.
+	 */
 	*bmap_nr = block >> (s->s_blocksize_bits + 3);
 	/* Within that bitmap block it is located at bit offset *offset. */
 	*offset = block & ((s->s_blocksize << 3) - 1);
@@ -71,8 +73,10 @@
 
 	get_bit_address(s, block, &bmap, &offset);
 
-	/* Old format filesystem? Unlikely, but the bitmaps are all up front so
-	 * we need to account for it. */
+	/*
+	 * Old format filesystem? Unlikely, but the bitmaps are all
+	 * up front so we need to account for it.
+	 */
 	if (unlikely(test_bit(REISERFS_OLD_FORMAT,
 			      &(REISERFS_SB(s)->s_properties)))) {
 		b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
@@ -108,8 +112,11 @@
 	return 1;
 }
 
-/* searches in journal structures for a given block number (bmap, off). If block
-   is found in reiserfs journal it suggests next free block candidate to test. */
+/*
+ * Searches in journal structures for a given block number (bmap, off).
+ * If block is found in reiserfs journal it suggests next free block
+ * candidate to test.
+ */
 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
 				      int off, int *next)
 {
@@ -120,7 +127,7 @@
 			*next = tmp;
 			PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
 		} else {
-			(*next) = off + 1;	/* inc offset to avoid looping. */
+			(*next) = off + 1;  /* inc offset to avoid looping. */
 			PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
 		}
 		PROC_INFO_INC(s, scan_bitmap.retry);
@@ -129,8 +136,10 @@
 	return 0;
 }
 
-/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
- * block; */
+/*
+ * Searches for a window of zero bits with given minimum and maximum
+ * lengths in one bitmap block
+ */
 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
 			     unsigned int bmap_n, int *beg, int boundary,
 			     int min, int max, int unfm)
@@ -146,10 +155,6 @@
 	RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
 	       "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
 	PROC_INFO_INC(s, scan_bitmap.bmap);
-/* this is unclear and lacks comments, explain how journal bitmaps
-   work here for the reader.  Convey a sense of the design here. What
-   is a window? */
-/* - I mean `a window of zero bits' as in description of this function - Zam. */
 
 	if (!bi) {
 		reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
@@ -165,15 +170,18 @@
 	      cont:
 		if (bi->free_count < min) {
 			brelse(bh);
-			return 0;	// No free blocks in this bitmap
+			return 0;	/* No free blocks in this bitmap */
 		}
 
 		/* search for a first zero bit -- beginning of a window */
 		*beg = reiserfs_find_next_zero_le_bit
 		    ((unsigned long *)(bh->b_data), boundary, *beg);
 
-		if (*beg + min > boundary) {	/* search for a zero bit fails or the rest of bitmap block
-						 * cannot contain a zero window of minimum size */
+		/*
+		 * search for a zero bit fails or the rest of bitmap block
+		 * cannot contain a zero window of minimum size
+		 */
+		if (*beg + min > boundary) {
 			brelse(bh);
 			return 0;
 		}
@@ -187,37 +195,63 @@
 				next = end;
 				break;
 			}
-			/* finding the other end of zero bit window requires looking into journal structures (in
-			 * case of searching for free blocks for unformatted nodes) */
+
+			/*
+			 * finding the other end of zero bit window requires
+			 * looking into journal structures (in case of
+			 * searching for free blocks for unformatted nodes)
+			 */
 			if (unfm && is_block_in_journal(s, bmap_n, end, &next))
 				break;
 		}
 
-		/* now (*beg) points to beginning of zero bits window,
-		 * (end) points to one bit after the window end */
-		if (end - *beg >= min) {	/* it seems we have found window of proper size */
+		/*
+		 * now (*beg) points to beginning of zero bits window,
+		 * (end) points to one bit after the window end
+		 */
+
+		/* found window of proper size */
+		if (end - *beg >= min) {
 			int i;
 			reiserfs_prepare_for_journal(s, bh, 1);
-			/* try to set all blocks used checking are they still free */
+			/*
+			 * try to set all blocks used checking are
+			 * they still free
+			 */
 			for (i = *beg; i < end; i++) {
-				/* It seems that we should not check in journal again. */
+				/* Don't check in journal again. */
 				if (reiserfs_test_and_set_le_bit
 				    (i, bh->b_data)) {
-					/* bit was set by another process
-					 * while we slept in prepare_for_journal() */
+					/*
+					 * bit was set by another process while
+					 * we slept in prepare_for_journal()
+					 */
 					PROC_INFO_INC(s, scan_bitmap.stolen);
-					if (i >= *beg + min) {	/* we can continue with smaller set of allocated blocks,
-								 * if length of this set is more or equal to `min' */
+
+					/*
+					 * we can continue with smaller set
+					 * of allocated blocks, if length of
+					 * this set is more or equal to `min'
+					 */
+					if (i >= *beg + min) {
 						end = i;
 						break;
 					}
-					/* otherwise we clear all bit were set ... */
+
+					/*
+					 * otherwise we clear all bit
+					 * were set ...
+					 */
 					while (--i >= *beg)
 						reiserfs_clear_le_bit
 						    (i, bh->b_data);
 					reiserfs_restore_prepared_buffer(s, bh);
 					*beg = org;
-					/* ... and search again in current block from beginning */
+
+					/*
+					 * Search again in current block
+					 * from beginning
+					 */
 					goto cont;
 				}
 			}
@@ -268,11 +302,13 @@
 	int bm = bmap_hash_id(s, id);
 	struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
 
-	/* If we don't have cached information on this bitmap block, we're
+	/*
+	 * If we don't have cached information on this bitmap block, we're
 	 * going to have to load it later anyway. Loading it here allows us
 	 * to make a better decision. This favors long-term performance gain
 	 * with a better on-disk layout vs. a short term gain of skipping the
-	 * read and potentially having a bad placement. */
+	 * read and potentially having a bad placement.
+	 */
 	if (info->free_count == UINT_MAX) {
 		struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
 		brelse(bh);
@@ -305,17 +341,16 @@
 	return packing;
 }
 
-/* Tries to find contiguous zero bit window (given size) in given region of
- * bitmap and place new blocks there. Returns number of allocated blocks. */
+/*
+ * Tries to find contiguous zero bit window (given size) in given region of
+ * bitmap and place new blocks there. Returns number of allocated blocks.
+ */
 static int scan_bitmap(struct reiserfs_transaction_handle *th,
 		       b_blocknr_t * start, b_blocknr_t finish,
 		       int min, int max, int unfm, sector_t file_block)
 {
 	int nr_allocated = 0;
 	struct super_block *s = th->t_super;
-	/* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
-	 * - Hans, it is not a block number - Zam. */
-
 	unsigned int bm, off;
 	unsigned int end_bm, end_off;
 	unsigned int off_max = s->s_blocksize << 3;
@@ -323,8 +358,10 @@
 	BUG_ON(!th->t_trans_id);
 
 	PROC_INFO_INC(s, scan_bitmap.call);
+
+	/* No point in looking for more free blocks */
 	if (SB_FREE_BLOCKS(s) <= 0)
-		return 0;	// No point in looking for more free blocks
+		return 0;
 
 	get_bit_address(s, *start, &bm, &off);
 	get_bit_address(s, finish, &end_bm, &end_off);
@@ -333,7 +370,8 @@
 	if (end_bm > reiserfs_bmap_count(s))
 		end_bm = reiserfs_bmap_count(s);
 
-	/* When the bitmap is more than 10% free, anyone can allocate.
+	/*
+	 * When the bitmap is more than 10% free, anyone can allocate.
 	 * When it's less than 10% free, only files that already use the
 	 * bitmap are allowed. Once we pass 80% full, this restriction
 	 * is lifted.
@@ -532,7 +570,8 @@
 {
 	char *this_char, *value;
 
-	REISERFS_SB(s)->s_alloc_options.bits = 0;	/* clear default settings */
+	/* clear default settings */
+	REISERFS_SB(s)->s_alloc_options.bits = 0;
 
 	while ((this_char = strsep(&options, ":")) != NULL) {
 		if ((value = strchr(this_char, '=')) != NULL)
@@ -733,7 +772,7 @@
 		hash_in = (char *)&hint->key.k_dir_id;
 	} else {
 		if (!hint->inode) {
-			//hint->search_start = hint->beg;
+			/*hint->search_start = hint->beg;*/
 			hash_in = (char *)&hint->key.k_dir_id;
 		} else
 		    if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
@@ -786,7 +825,8 @@
 
 		dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
 
-		/* keep the root dir and it's first set of subdirs close to
+		/*
+		 * keep the root dir and it's first set of subdirs close to
 		 * the start of the disk
 		 */
 		if (dirid <= 2)
@@ -800,7 +840,8 @@
 	}
 }
 
-/* returns 1 if it finds an indirect item and gets valid hint info
+/*
+ * returns 1 if it finds an indirect item and gets valid hint info
  * from it, otherwise 0
  */
 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
@@ -812,8 +853,11 @@
 	__le32 *item;
 	int ret = 0;
 
-	if (!hint->path)	/* reiserfs code can call this function w/o pointer to path
-				 * structure supplied; then we rely on supplied search_start */
+	/*
+	 * reiserfs code can call this function w/o pointer to path
+	 * structure supplied; then we rely on supplied search_start
+	 */
+	if (!hint->path)
 		return 0;
 
 	path = hint->path;
@@ -825,12 +869,13 @@
 
 	hint->search_start = bh->b_blocknr;
 
+	/*
+	 * for indirect item: go to left and look for the first non-hole entry
+	 * in the indirect item
+	 */
 	if (!hint->formatted_node && is_indirect_le_ih(ih)) {
-		/* for indirect item: go to left and look for the first non-hole entry
-		   in the indirect item */
 		if (pos_in_item == I_UNFM_NUM(ih))
 			pos_in_item--;
-//          pos_in_item = I_UNFM_NUM (ih) - 1;
 		while (pos_in_item >= 0) {
 			int t = get_block_num(item, pos_in_item);
 			if (t) {
@@ -846,10 +891,12 @@
 	return ret;
 }
 
-/* should be, if formatted node, then try to put on first part of the device
-   specified as number of percent with mount option device, else try to put
-   on last of device.  This is not to say it is good code to do so,
-   but the effect should be measured.  */
+/*
+ * should be, if formatted node, then try to put on first part of the device
+ * specified as number of percent with mount option device, else try to put
+ * on last of device.  This is not to say it is good code to do so,
+ * but the effect should be measured.
+ */
 static inline void set_border_in_hint(struct super_block *s,
 				      reiserfs_blocknr_hint_t * hint)
 {
@@ -975,21 +1022,27 @@
 		set_border_in_hint(s, hint);
 
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
-	/* whenever we create a new directory, we displace it.  At first we will
-	   hash for location, later we might look for a moderately empty place for
-	   it */
+	/*
+	 * whenever we create a new directory, we displace it.  At first
+	 * we will hash for location, later we might look for a moderately
+	 * empty place for it
+	 */
 	if (displacing_new_packing_localities(s)
 	    && hint->th->displace_new_blocks) {
 		displace_new_packing_locality(hint);
 
-		/* we do not continue determine_search_start,
-		 * if new packing locality is being displaced */
+		/*
+		 * we do not continue determine_search_start,
+		 * if new packing locality is being displaced
+		 */
 		return;
 	}
 #endif
 
-	/* all persons should feel encouraged to add more special cases here and
-	 * test them */
+	/*
+	 * all persons should feel encouraged to add more special cases
+	 * here and test them
+	 */
 
 	if (displacing_large_files(s) && !hint->formatted_node
 	    && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
@@ -997,8 +1050,10 @@
 		return;
 	}
 
-	/* if none of our special cases is relevant, use the left neighbor in the
-	   tree order of the new node we are allocating for */
+	/*
+	 * if none of our special cases is relevant, use the left
+	 * neighbor in the tree order of the new node we are allocating for
+	 */
 	if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
 		hash_formatted_node(hint);
 		return;
@@ -1006,10 +1061,13 @@
 
 	unfm_hint = get_left_neighbor(hint);
 
-	/* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
-	   new blocks are displaced based on directory ID. Also, if suggested search_start
-	   is less than last preallocated block, we start searching from it, assuming that
-	   HDD dataflow is faster in forward direction */
+	/*
+	 * Mimic old block allocator behaviour, that is if VFS allowed for
+	 * preallocation, new blocks are displaced based on directory ID.
+	 * Also, if suggested search_start is less than last preallocated
+	 * block, we start searching from it, assuming that HDD dataflow
+	 * is faster in forward direction
+	 */
 	if (TEST_OPTION(old_way, s)) {
 		if (!hint->formatted_node) {
 			if (!reiserfs_hashed_relocation(s))
@@ -1038,11 +1096,13 @@
 	    TEST_OPTION(old_hashed_relocation, s)) {
 		old_hashed_relocation(hint);
 	}
+
 	/* new_hashed_relocation works with both formatted/unformatted nodes */
 	if ((!unfm_hint || hint->formatted_node) &&
 	    TEST_OPTION(new_hashed_relocation, s)) {
 		new_hashed_relocation(hint);
 	}
+
 	/* dirid grouping works only on unformatted nodes */
 	if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
 		dirid_groups(hint);
@@ -1080,8 +1140,6 @@
 	return CARRY_ON;
 }
 
-/* XXX I know it could be merged with upper-level function;
-   but may be result function would be too complex. */
 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
 						 b_blocknr_t * new_blocknrs,
 						 b_blocknr_t start,
@@ -1109,7 +1167,10 @@
 
 		/* do we have something to fill prealloc. array also ? */
 		if (nr_allocated > 0) {
-			/* it means prealloc_size was greater that 0 and we do preallocation */
+			/*
+			 * it means prealloc_size was greater that 0 and
+			 * we do preallocation
+			 */
 			list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
 				 &SB_JOURNAL(hint->th->t_super)->
 				 j_prealloc_list);
@@ -1177,7 +1238,8 @@
 			start = 0;
 			finish = hint->beg;
 			break;
-		default:	/* We've tried searching everywhere, not enough space */
+		default:
+			/* We've tried searching everywhere, not enough space */
 			/* Free the blocks */
 			if (!hint->formatted_node) {
 #ifdef REISERQUOTA_DEBUG
@@ -1262,8 +1324,11 @@
 	return amount_needed;
 }
 
-int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us	/* Amount of blocks we have
-																	   already reserved */ )
+int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
+			       b_blocknr_t *new_blocknrs,
+			       int amount_needed,
+			       /* Amount of blocks we have already reserved */
+			       int reserved_by_us)
 {
 	int initial_amount_needed = amount_needed;
 	int ret;
@@ -1275,15 +1340,21 @@
 		return NO_DISK_SPACE;
 	/* should this be if !hint->inode &&  hint->preallocate? */
 	/* do you mean hint->formatted_node can be removed ? - Zam */
-	/* hint->formatted_node cannot be removed because we try to access
-	   inode information here, and there is often no inode assotiated with
-	   metadata allocations - green */
+	/*
+	 * hint->formatted_node cannot be removed because we try to access
+	 * inode information here, and there is often no inode associated with
+	 * metadata allocations - green
+	 */
 
 	if (!hint->formatted_node && hint->preallocate) {
 		amount_needed = use_preallocated_list_if_available
 		    (hint, new_blocknrs, amount_needed);
-		if (amount_needed == 0)	/* all blocknrs we need we got from
-					   prealloc. list */
+
+		/*
+		 * We have all the block numbers we need from the
+		 * prealloc list
+		 */
+		if (amount_needed == 0)
 			return CARRY_ON;
 		new_blocknrs += (initial_amount_needed - amount_needed);
 	}
@@ -1297,10 +1368,12 @@
 	ret = blocknrs_and_prealloc_arrays_from_search_start
 	    (hint, new_blocknrs, amount_needed);
 
-	/* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
-	 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
-	 * variant) */
-
+	/*
+	 * We used prealloc. list to fill (partially) new_blocknrs array.
+	 * If final allocation fails we need to return blocks back to
+	 * prealloc. list or just free them. -- Zam (I chose second
+	 * variant)
+	 */
 	if (ret != CARRY_ON) {
 		while (amount_needed++ < initial_amount_needed) {
 			reiserfs_free_block(hint->th, hint->inode,
@@ -1339,8 +1412,10 @@
 	struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
 	struct buffer_head *bh;
 
-	/* Way old format filesystems had the bitmaps packed up front.
-	 * I doubt there are any of these left, but just in case... */
+	/*
+	 * Way old format filesystems had the bitmaps packed up front.
+	 * I doubt there are any of these left, but just in case...
+	 */
 	if (unlikely(test_bit(REISERFS_OLD_FORMAT,
 	                      &(REISERFS_SB(sb)->s_properties))))
 		block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 1fe5cde..8d51f28 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -59,7 +59,10 @@
 
 int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
 {
-	struct cpu_key pos_key;	/* key of current position in the directory (key of directory entry) */
+
+	/* key of current position in the directory (key of directory entry) */
+	struct cpu_key pos_key;
+
 	INITIALIZE_PATH(path_to_entry);
 	struct buffer_head *bh;
 	int item_num, entry_num;
@@ -77,21 +80,28 @@
 
 	reiserfs_check_lock_depth(inode->i_sb, "readdir");
 
-	/* form key for search the next directory entry using f_pos field of
-	   file structure */
+	/*
+	 * form key for search the next directory entry using
+	 * f_pos field of file structure
+	 */
 	make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
 	next_pos = cpu_key_k_offset(&pos_key);
 
 	path_to_entry.reada = PATH_READA;
 	while (1) {
 	      research:
-		/* search the directory item, containing entry with specified key */
+		/*
+		 * search the directory item, containing entry with
+		 * specified key
+		 */
 		search_res =
 		    search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
 					&de);
 		if (search_res == IO_ERROR) {
-			// FIXME: we could just skip part of directory which could
-			// not be read
+			/*
+			 * FIXME: we could just skip part of directory
+			 * which could not be read
+			 */
 			ret = -EIO;
 			goto out;
 		}
@@ -109,14 +119,20 @@
 		       "vs-9005 item_num == %d, item amount == %d",
 		       item_num, B_NR_ITEMS(bh));
 
-		/* and entry must be not more than number of entries in the item */
+		/*
+		 * and entry must be not more than number of entries
+		 * in the item
+		 */
 		RFALSE(ih_entry_count(ih) < entry_num,
 		       "vs-9010: entry number is too big %d (%d)",
 		       entry_num, ih_entry_count(ih));
 
+		/*
+		 * go through all entries in the directory item beginning
+		 * from the entry, that has been found
+		 */
 		if (search_res == POSITION_FOUND
 		    || entry_num < ih_entry_count(ih)) {
-			/* go through all entries in the directory item beginning from the entry, that has been found */
 			struct reiserfs_de_head *deh =
 			    B_I_DEH(bh, ih) + entry_num;
 
@@ -127,16 +143,18 @@
 				ino_t d_ino;
 				loff_t cur_pos = deh_offset(deh);
 
+				/* it is hidden entry */
 				if (!de_visible(deh))
-					/* it is hidden entry */
 					continue;
 				d_reclen = entry_length(bh, ih, entry_num);
 				d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
 
 				if (d_reclen <= 0 ||
 				    d_name + d_reclen > bh->b_data + bh->b_size) {
-					/* There is corrupted data in entry,
-					 * We'd better stop here */
+					/*
+					 * There is corrupted data in entry,
+					 * We'd better stop here
+					 */
 					pathrelse(&path_to_entry);
 					ret = -EIO;
 					goto out;
@@ -145,10 +163,10 @@
 				if (!d_name[d_reclen - 1])
 					d_reclen = strlen(d_name);
 
+				/* too big to send back to VFS */
 				if (d_reclen >
 				    REISERFS_MAX_NAME(inode->i_sb->
 						      s_blocksize)) {
-					/* too big to send back to VFS */
 					continue;
 				}
 
@@ -173,10 +191,14 @@
 						goto research;
 					}
 				}
-				// Note, that we copy name to user space via temporary
-				// buffer (local_buf) because filldir will block if
-				// user space buffer is swapped out. At that time
-				// entry can move to somewhere else
+
+				/*
+				 * Note, that we copy name to user space via
+				 * temporary buffer (local_buf) because
+				 * filldir will block if user space buffer is
+				 * swapped out. At that time entry can move to
+				 * somewhere else
+				 */
 				memcpy(local_buf, d_name, d_reclen);
 
 				/*
@@ -209,22 +231,26 @@
 			}	/* for */
 		}
 
+		/* end of directory has been reached */
 		if (item_num != B_NR_ITEMS(bh) - 1)
-			// end of directory has been reached
 			goto end;
 
-		/* item we went through is last item of node. Using right
-		   delimiting key check is it directory end */
+		/*
+		 * item we went through is last item of node. Using right
+		 * delimiting key check is it directory end
+		 */
 		rkey = get_rkey(&path_to_entry, inode->i_sb);
 		if (!comp_le_keys(rkey, &MIN_KEY)) {
-			/* set pos_key to key, that is the smallest and greater
-			   that key of the last entry in the item */
+			/*
+			 * set pos_key to key, that is the smallest and greater
+			 * that key of the last entry in the item
+			 */
 			set_cpu_key_k_offset(&pos_key, next_pos);
 			continue;
 		}
 
+		/* end of directory has been reached */
 		if (COMP_SHORT_KEYS(rkey, &pos_key)) {
-			// end of directory has been reached
 			goto end;
 		}
 
@@ -248,9 +274,10 @@
 	return reiserfs_readdir_inode(file_inode(file), ctx);
 }
 
-/* compose directory item containing "." and ".." entries (entries are
-   not aligned to 4 byte boundary) */
-/* the last four params are LE */
+/*
+ * compose directory item containing "." and ".." entries (entries are
+ * not aligned to 4 byte boundary)
+ */
 void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
 			    __le32 par_dirid, __le32 par_objid)
 {
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 80b2b1b..399b200 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -2,18 +2,13 @@
  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  */
 
-/* Now we have all buffers that must be used in balancing of the tree 	*/
-/* Further calculations can not cause schedule(), and thus the buffer 	*/
-/* tree will be stable until the balancing will be finished 		*/
-/* balance the tree according to the analysis made before,		*/
-/* and using buffers obtained after all above.				*/
-
-/**
- ** balance_leaf_when_delete
- ** balance_leaf
- ** do_balance
- **
- **/
+/*
+ * Now we have all buffers that must be used in balancing of the tree
+ * Further calculations can not cause schedule(), and thus the buffer
+ * tree will be stable until the balancing will be finished
+ * balance the tree according to the analysis made before,
+ * and using buffers obtained after all above.
+ */
 
 #include <asm/uaccess.h>
 #include <linux/time.h>
@@ -68,35 +63,39 @@
 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
 
-/* summary:
- if deleting something ( tb->insert_size[0] < 0 )
-   return(balance_leaf_when_delete()); (flag d handled here)
- else
-   if lnum is larger than 0 we put items into the left node
-   if rnum is larger than 0 we put items into the right node
-   if snum1 is larger than 0 we put items into the new node s1
-   if snum2 is larger than 0 we put items into the new node s2
-Note that all *num* count new items being created.
+/*
+ * summary:
+ *  if deleting something ( tb->insert_size[0] < 0 )
+ *    return(balance_leaf_when_delete()); (flag d handled here)
+ *  else
+ *    if lnum is larger than 0 we put items into the left node
+ *    if rnum is larger than 0 we put items into the right node
+ *    if snum1 is larger than 0 we put items into the new node s1
+ *    if snum2 is larger than 0 we put items into the new node s2
+ * Note that all *num* count new items being created.
+ *
+ * It would be easier to read balance_leaf() if each of these summary
+ * lines was a separate procedure rather than being inlined.  I think
+ * that there are many passages here and in balance_leaf_when_delete() in
+ * which two calls to one procedure can replace two passages, and it
+ * might save cache space and improve software maintenance costs to do so.
+ *
+ * Vladimir made the perceptive comment that we should offload most of
+ * the decision making in this function into fix_nodes/check_balance, and
+ * then create some sort of structure in tb that says what actions should
+ * be performed by do_balance.
+ *
+ * -Hans
+ */
 
-It would be easier to read balance_leaf() if each of these summary
-lines was a separate procedure rather than being inlined.  I think
-that there are many passages here and in balance_leaf_when_delete() in
-which two calls to one procedure can replace two passages, and it
-might save cache space and improve software maintenance costs to do so.
-
-Vladimir made the perceptive comment that we should offload most of
-the decision making in this function into fix_nodes/check_balance, and
-then create some sort of structure in tb that says what actions should
-be performed by do_balance.
-
--Hans */
-
-/* Balance leaf node in case of delete or cut: insert_size[0] < 0
+/*
+ * Balance leaf node in case of delete or cut: insert_size[0] < 0
  *
  * lnum, rnum can have values >= -1
  *	-1 means that the neighbor must be joined with S
  *	 0 means that nothing should be done with the neighbor
- *	>0 means to shift entirely or partly the specified number of items to the neighbor
+ *	>0 means to shift entirely or partly the specified number of items
+ *         to the neighbor
  */
 static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
 {
@@ -149,8 +148,16 @@
 	case M_CUT:{		/* cut item in S[0] */
 			if (is_direntry_le_ih(ih)) {
 
-				/* UFS unlink semantics are such that you can only delete one directory entry at a time. */
-				/* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
+				/*
+				 * UFS unlink semantics are such that you
+				 * can only delete one directory entry at
+				 * a time.
+				 */
+
+				/*
+				 * when we cut a directory tb->insert_size[0]
+				 * means number of entries to be cut (always 1)
+				 */
 				tb->insert_size[0] = -1;
 				leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
 						     -tb->insert_size[0]);
@@ -183,13 +190,22 @@
 						      "UNKNOWN"), flag);
 	}
 
-	/* the rule is that no shifting occurs unless by shifting a node can be freed */
+	/*
+	 * the rule is that no shifting occurs unless by shifting
+	 * a node can be freed
+	 */
 	n = B_NR_ITEMS(tbS0);
-	if (tb->lnum[0]) {	/* L[0] takes part in balancing */
-		if (tb->lnum[0] == -1) {	/* L[0] must be joined with S[0] */
-			if (tb->rnum[0] == -1) {	/* R[0] must be also joined with S[0] */
+	/* L[0] takes part in balancing */
+	if (tb->lnum[0]) {
+		/* L[0] must be joined with S[0] */
+		if (tb->lnum[0] == -1) {
+			/* R[0] must be also joined with S[0] */
+			if (tb->rnum[0] == -1) {
 				if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
-					/* all contents of all the 3 buffers will be in L[0] */
+					/*
+					 * all contents of all the 3 buffers
+					 * will be in L[0]
+					 */
 					if (PATH_H_POSITION(tb->tb_path, 1) == 0
 					    && 1 < B_NR_ITEMS(tb->FR[0]))
 						replace_key(tb, tb->CFL[0],
@@ -208,7 +224,10 @@
 
 					return 0;
 				}
-				/* all contents of all the 3 buffers will be in R[0] */
+				/*
+				 * all contents of all the 3 buffers will
+				 * be in R[0]
+				 */
 				leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1,
 						NULL);
 				leaf_move_items(LEAF_FROM_L_TO_R, tb,
@@ -233,7 +252,11 @@
 
 			return 0;
 		}
-		/* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
+
+		/*
+		 * a part of contents of S[0] will be in L[0] and the
+		 * rest part of S[0] will be in R[0]
+		 */
 
 		RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
 		       (tb->lnum[0] + tb->rnum[0] > n + 1),
@@ -1178,9 +1201,7 @@
 	return tb->used[i];
 }
 
-/* This is now used because reiserfs_free_block has to be able to
-** schedule.
-*/
+/* This is now used because reiserfs_free_block has to be able to schedule. */
 static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
 {
 	int i;
@@ -1335,8 +1356,10 @@
 			       "mount point.");
 	}
 
-	/* double check that buffers that we will modify are unlocked. (fix_nodes should already have
-	   prepped all of these for us). */
+	/*
+	 * double check that buffers that we will modify are unlocked.
+	 * (fix_nodes should already have prepped all of these for us).
+	 */
 	if (tb->lnum[0]) {
 		retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
 		retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
@@ -1429,49 +1452,51 @@
 
 #endif
 
-/* Now we have all of the buffers that must be used in balancing of
-   the tree.  We rely on the assumption that schedule() will not occur
-   while do_balance works. ( Only interrupt handlers are acceptable.)
-   We balance the tree according to the analysis made before this,
-   using buffers already obtained.  For SMP support it will someday be
-   necessary to add ordered locking of tb. */
+/*
+ * Now we have all of the buffers that must be used in balancing of
+ * the tree.  We rely on the assumption that schedule() will not occur
+ * while do_balance works. ( Only interrupt handlers are acceptable.)
+ * We balance the tree according to the analysis made before this,
+ * using buffers already obtained.  For SMP support it will someday be
+ * necessary to add ordered locking of tb.
+ */
 
-/* Some interesting rules of balancing:
-
-   we delete a maximum of two nodes per level per balancing: we never
-   delete R, when we delete two of three nodes L, S, R then we move
-   them into R.
-
-   we only delete L if we are deleting two nodes, if we delete only
-   one node we delete S
-
-   if we shift leaves then we shift as much as we can: this is a
-   deliberate policy of extremism in node packing which results in
-   higher average utilization after repeated random balance operations
-   at the cost of more memory copies and more balancing as a result of
-   small insertions to full nodes.
-
-   if we shift internal nodes we try to evenly balance the node
-   utilization, with consequent less balancing at the cost of lower
-   utilization.
-
-   one could argue that the policy for directories in leaves should be
-   that of internal nodes, but we will wait until another day to
-   evaluate this....  It would be nice to someday measure and prove
-   these assumptions as to what is optimal....
-
-*/
+/*
+ * Some interesting rules of balancing:
+ * we delete a maximum of two nodes per level per balancing: we never
+ * delete R, when we delete two of three nodes L, S, R then we move
+ * them into R.
+ *
+ * we only delete L if we are deleting two nodes, if we delete only
+ * one node we delete S
+ *
+ * if we shift leaves then we shift as much as we can: this is a
+ * deliberate policy of extremism in node packing which results in
+ * higher average utilization after repeated random balance operations
+ * at the cost of more memory copies and more balancing as a result of
+ * small insertions to full nodes.
+ *
+ * if we shift internal nodes we try to evenly balance the node
+ * utilization, with consequent less balancing at the cost of lower
+ * utilization.
+ *
+ * one could argue that the policy for directories in leaves should be
+ * that of internal nodes, but we will wait until another day to
+ * evaluate this....  It would be nice to someday measure and prove
+ * these assumptions as to what is optimal....
+ */
 
 static inline void do_balance_starts(struct tree_balance *tb)
 {
-	/* use print_cur_tb() to see initial state of struct
-	   tree_balance */
+	/* use print_cur_tb() to see initial state of struct tree_balance */
 
 	/* store_print_tb (tb); */
 
 	/* do not delete, just comment it out */
-/*    print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
-	     "check");*/
+	/*
+	print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
+		 tb->tb_path->pos_in_item, tb, "check");
+	*/
 	RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
 #ifdef CONFIG_REISERFS_CHECK
 	REISERFS_SB(tb->tb_sb)->cur_tb = tb;
@@ -1487,9 +1512,10 @@
 	REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
 #endif
 
-	/* reiserfs_free_block is no longer schedule safe.  So, we need to
-	 ** put the buffers we want freed on the thrown list during do_balance,
-	 ** and then free them now
+	/*
+	 * reiserfs_free_block is no longer schedule safe.  So, we need to
+	 * put the buffers we want freed on the thrown list during do_balance,
+	 * and then free them now
 	 */
 
 	REISERFS_SB(tb->tb_sb)->s_do_balance++;
@@ -1500,36 +1526,40 @@
 	free_thrown(tb);
 }
 
-void do_balance(struct tree_balance *tb,	/* tree_balance structure */
-		struct item_head *ih,	/* item header of inserted item */
-		const char *body,	/* body  of inserted item or bytes to paste */
-		int flag)
-{				/* i - insert, d - delete
-				   c - cut, p - paste
+/*
+ * do_balance - balance the tree
+ *
+ * @tb: tree_balance structure
+ * @ih: item header of inserted item
+ * @body: body of inserted item or bytes to paste
+ * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
+ *
+ * Cut means delete part of an item (includes removing an entry from a
+ * directory).
+ *
+ * Delete means delete whole item.
+ *
+ * Insert means add a new item into the tree.
+ *
+ * Paste means to append to the end of an existing file or to
+ * insert a directory entry.
+ */
+void do_balance(struct tree_balance *tb, struct item_head *ih,
+		const char *body, int flag)
+{
+	int child_pos;		/* position of a child node in its parent */
+	int h;			/* level of the tree being processed */
 
-				   Cut means delete part of an item
-				   (includes removing an entry from a
-				   directory).
+	/*
+	 * in our processing of one level we sometimes determine what
+	 * must be inserted into the next higher level.  This insertion
+	 * consists of a key or two keys and their corresponding
+	 * pointers
+	 */
+	struct item_head insert_key[2];
 
-				   Delete means delete whole item.
-
-				   Insert means add a new item into the
-				   tree.
-
-				   Paste means to append to the end of an
-				   existing file or to insert a directory
-				   entry.  */
-	int child_pos,		/* position of a child node in its parent */
-	 h;			/* level of the tree being processed */
-	struct item_head insert_key[2];	/* in our processing of one level
-					   we sometimes determine what
-					   must be inserted into the next
-					   higher level.  This insertion
-					   consists of a key or two keys
-					   and their corresponding
-					   pointers */
-	struct buffer_head *insert_ptr[2];	/* inserted node-ptrs for the next
-						   level */
+	/* inserted node-ptrs for the next level */
+	struct buffer_head *insert_ptr[2];
 
 	tb->tb_mode = flag;
 	tb->need_balance_dirty = 0;
@@ -1549,9 +1579,11 @@
 	atomic_inc(&(fs_generation(tb->tb_sb)));
 	do_balance_starts(tb);
 
-	/* balance leaf returns 0 except if combining L R and S into
-	   one node.  see balance_internal() for explanation of this
-	   line of code. */
+	/*
+	 * balance_leaf returns 0 except if combining L R and S into
+	 * one node.  see balance_internal() for explanation of this
+	 * line of code.
+	 */
 	child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
 	    balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
 
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ed58d84..2739943 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -15,20 +15,20 @@
 #include <linux/quotaops.h>
 
 /*
-** We pack the tails of files on file close, not at the time they are written.
-** This implies an unnecessary copy of the tail and an unnecessary indirect item
-** insertion/balancing, for files that are written in one write.
-** It avoids unnecessary tail packings (balances) for files that are written in
-** multiple writes and are small enough to have tails.
-**
-** file_release is called by the VFS layer when the file is closed.  If
-** this is the last open file descriptor, and the file
-** small enough to have a tail, and the tail is currently in an
-** unformatted node, the tail is converted back into a direct item.
-**
-** We use reiserfs_truncate_file to pack the tail, since it already has
-** all the conditions coded.
-*/
+ * We pack the tails of files on file close, not at the time they are written.
+ * This implies an unnecessary copy of the tail and an unnecessary indirect item
+ * insertion/balancing, for files that are written in one write.
+ * It avoids unnecessary tail packings (balances) for files that are written in
+ * multiple writes and are small enough to have tails.
+ *
+ * file_release is called by the VFS layer when the file is closed.  If
+ * this is the last open file descriptor, and the file
+ * small enough to have a tail, and the tail is currently in an
+ * unformatted node, the tail is converted back into a direct item.
+ *
+ * We use reiserfs_truncate_file to pack the tail, since it already has
+ * all the conditions coded.
+ */
 static int reiserfs_file_release(struct inode *inode, struct file *filp)
 {
 
@@ -57,14 +57,16 @@
 	}
 
 	reiserfs_write_lock(inode->i_sb);
-	/* freeing preallocation only involves relogging blocks that
+	/*
+	 * freeing preallocation only involves relogging blocks that
 	 * are already in the current transaction.  preallocation gets
 	 * freed at the end of each transaction, so it is impossible for
 	 * us to log any additional blocks (including quota blocks)
 	 */
 	err = journal_begin(&th, inode->i_sb, 1);
 	if (err) {
-		/* uh oh, we can't allow the inode to go away while there
+		/*
+		 * uh oh, we can't allow the inode to go away while there
 		 * is still preallocation blocks pending.  Try to join the
 		 * aborted transaction
 		 */
@@ -72,11 +74,13 @@
 		err = journal_join_abort(&th, inode->i_sb, 1);
 
 		if (err) {
-			/* hmpf, our choices here aren't good.  We can pin the inode
-			 * which will disallow unmount from every happening, we can
-			 * do nothing, which will corrupt random memory on unmount,
-			 * or we can forcibly remove the file from the preallocation
-			 * list, which will leak blocks on disk.  Lets pin the inode
+			/*
+			 * hmpf, our choices here aren't good.  We can pin
+			 * the inode which will disallow unmount from ever
+			 * happening, we can do nothing, which will corrupt
+			 * random memory on unmount, or we can forcibly
+			 * remove the file from the preallocation list, which
+			 * will leak blocks on disk.  Lets pin the inode
 			 * and let the admin know what is going on.
 			 */
 			igrab(inode);
@@ -102,10 +106,12 @@
 	    (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
 	    tail_has_to_be_packed(inode)) {
 
-		/* if regular file is released by last holder and it has been
-		   appended (we append by unformatted node only) or its direct
-		   item(s) had to be converted, then it may have to be
-		   indirect2direct converted */
+		/*
+		 * if regular file is released by last holder and it has been
+		 * appended (we append by unformatted node only) or its direct
+		 * item(s) had to be converted, then it may have to be
+		 * indirect2direct converted
+		 */
 		err = reiserfs_truncate_file(inode, 0);
 	}
       out:
@@ -117,8 +123,9 @@
 static int reiserfs_file_open(struct inode *inode, struct file *file)
 {
 	int err = dquot_file_open(inode, file);
+
+	/* somebody might be tailpacking on final close; wait for it */
         if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
-		/* somebody might be tailpacking on final close; wait for it */
 		mutex_lock(&(REISERFS_I(inode)->tailpack));
 		atomic_inc(&REISERFS_I(inode)->openers);
 		mutex_unlock(&(REISERFS_I(inode)->tailpack));
@@ -208,7 +215,8 @@
 				journal_mark_dirty(&th, s, bh);
 			} else if (!buffer_dirty(bh)) {
 				mark_buffer_dirty(bh);
-				/* do data=ordered on any page past the end
+				/*
+				 * do data=ordered on any page past the end
 				 * of file and any buffer marked BH_New.
 				 */
 				if (reiserfs_data_ordered(inode->i_sb) &&
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index b6a05a7..144bd62 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -2,59 +2,32 @@
  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  */
 
-/**
- ** old_item_num
- ** old_entry_num
- ** set_entry_sizes
- ** create_virtual_node
- ** check_left
- ** check_right
- ** directory_part_size
- ** get_num_ver
- ** set_parameters
- ** is_leaf_removable
- ** are_leaves_removable
- ** get_empty_nodes
- ** get_lfree
- ** get_rfree
- ** is_left_neighbor_in_cache
- ** decrement_key
- ** get_far_parent
- ** get_parents
- ** can_node_be_removed
- ** ip_check_balance
- ** dc_check_balance_internal
- ** dc_check_balance_leaf
- ** dc_check_balance
- ** check_balance
- ** get_direct_parent
- ** get_neighbors
- ** fix_nodes
- **
- **
- **/
-
 #include <linux/time.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include "reiserfs.h"
 #include <linux/buffer_head.h>
 
-/* To make any changes in the tree we find a node, that contains item
-   to be changed/deleted or position in the node we insert a new item
-   to. We call this node S. To do balancing we need to decide what we
-   will shift to left/right neighbor, or to a new node, where new item
-   will be etc. To make this analysis simpler we build virtual
-   node. Virtual node is an array of items, that will replace items of
-   node S. (For instance if we are going to delete an item, virtual
-   node does not contain it). Virtual node keeps information about
-   item sizes and types, mergeability of first and last items, sizes
-   of all entries in directory item. We use this array of items when
-   calculating what we can shift to neighbors and how many nodes we
-   have to have if we do not any shiftings, if we shift to left/right
-   neighbor or to both. */
+/*
+ * To make any changes in the tree we find a node that contains item
+ * to be changed/deleted or position in the node we insert a new item
+ * to. We call this node S. To do balancing we need to decide what we
+ * will shift to left/right neighbor, or to a new node, where new item
+ * will be etc. To make this analysis simpler we build virtual
+ * node. Virtual node is an array of items, that will replace items of
+ * node S. (For instance if we are going to delete an item, virtual
+ * node does not contain it). Virtual node keeps information about
+ * item sizes and types, mergeability of first and last items, sizes
+ * of all entries in directory item. We use this array of items when
+ * calculating what we can shift to neighbors and how many nodes we
+ * have to have if we do not any shiftings, if we shift to left/right
+ * neighbor or to both.
+ */
 
-/* taking item number in virtual node, returns number of item, that it has in source buffer */
+/*
+ * Takes item number in virtual node, returns number of item
+ * that it has in source buffer
+ */
 static inline int old_item_num(int new_num, int affected_item_num, int mode)
 {
 	if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
@@ -112,7 +85,10 @@
 	    && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
 		vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
 
-	/* go through all items those remain in the virtual node (except for the new (inserted) one) */
+	/*
+	 * go through all items that remain in the virtual
+	 * node (except for the new (inserted) one)
+	 */
 	for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
 		int j;
 		struct virtual_item *vi = vn->vn_vi + new_num;
@@ -131,8 +107,10 @@
 		vi->vi_item = ih_item_body(Sh, ih + j);
 		vi->vi_uarea = vn->vn_free_ptr;
 
-		// FIXME: there is no check, that item operation did not
-		// consume too much memory
+		/*
+		 * FIXME: there is no check that item operation did not
+		 * consume too much memory
+		 */
 		vn->vn_free_ptr +=
 		    op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
 		if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
@@ -145,7 +123,8 @@
 
 		if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
 			vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
-			vi->vi_new_data = vn->vn_data;	// pointer to data which is going to be pasted
+			/* pointer to data which is going to be pasted */
+			vi->vi_new_data = vn->vn_data;
 		}
 	}
 
@@ -164,7 +143,10 @@
 			     tb->insert_size[0]);
 	}
 
-	/* set right merge flag we take right delimiting key and check whether it is a mergeable item */
+	/*
+	 * set right merge flag we take right delimiting key and
+	 * check whether it is a mergeable item
+	 */
 	if (tb->CFR[0]) {
 		struct reiserfs_key *key;
 
@@ -179,12 +161,19 @@
 		if (op_is_left_mergeable(key, Sh->b_size) &&
 		    !(vn->vn_mode != M_DELETE
 		      || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
-			/* we delete last item and it could be merged with right neighbor's first item */
+			/*
+			 * we delete last item and it could be merged
+			 * with right neighbor's first item
+			 */
 			if (!
 			    (B_NR_ITEMS(Sh) == 1
 			     && is_direntry_le_ih(item_head(Sh, 0))
 			     && ih_entry_count(item_head(Sh, 0)) == 1)) {
-				/* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
+				/*
+				 * node contains more than 1 item, or item
+				 * is not directory item, or this item
+				 * contains more than 1 entry
+				 */
 				print_block(Sh, 0, -1, -1);
 				reiserfs_panic(tb->tb_sb, "vs-8045",
 					       "rdkey %k, affected item==%d "
@@ -198,8 +187,10 @@
 	}
 }
 
-/* using virtual node check, how many items can be shifted to left
-   neighbor */
+/*
+ * Using virtual node check, how many items can be
+ * shifted to left neighbor
+ */
 static void check_left(struct tree_balance *tb, int h, int cur_free)
 {
 	int i;
@@ -259,9 +250,13 @@
 		}
 
 		/* the item cannot be shifted entirely, try to split it */
-		/* check whether L[0] can hold ih and at least one byte of the item body */
+		/*
+		 * check whether L[0] can hold ih and at least one byte
+		 * of the item body
+		 */
+
+		/* cannot shift even a part of the current item */
 		if (cur_free <= ih_size) {
-			/* cannot shift even a part of the current item */
 			tb->lbytes = -1;
 			return;
 		}
@@ -278,8 +273,10 @@
 	return;
 }
 
-/* using virtual node check, how many items can be shifted to right
-   neighbor */
+/*
+ * Using virtual node check, how many items can be
+ * shifted to right neighbor
+ */
 static void check_right(struct tree_balance *tb, int h, int cur_free)
 {
 	int i;
@@ -338,13 +335,21 @@
 			continue;
 		}
 
-		/* check whether R[0] can hold ih and at least one byte of the item body */
-		if (cur_free <= ih_size) {	/* cannot shift even a part of the current item */
+		/*
+		 * check whether R[0] can hold ih and at least one
+		 * byte of the item body
+		 */
+
+		/* cannot shift even a part of the current item */
+		if (cur_free <= ih_size) {
 			tb->rbytes = -1;
 			return;
 		}
 
-		/* R[0] can hold the header of the item and at least one byte of its body */
+		/*
+		 * R[0] can hold the header of the item and at least
+		 * one byte of its body
+		 */
 		cur_free -= ih_size;	/* cur_free is still > 0 */
 
 		tb->rbytes = op_check_right(vi, cur_free);
@@ -361,45 +366,64 @@
 /*
  * from - number of items, which are shifted to left neighbor entirely
  * to - number of item, which are shifted to right neighbor entirely
- * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
- * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
+ * from_bytes - number of bytes of boundary item (or directory entries)
+ *              which are shifted to left neighbor
+ * to_bytes - number of bytes of boundary item (or directory entries)
+ *            which are shifted to right neighbor
+ */
 static int get_num_ver(int mode, struct tree_balance *tb, int h,
 		       int from, int from_bytes,
 		       int to, int to_bytes, short *snum012, int flow)
 {
 	int i;
 	int cur_free;
-	//    int bytes;
 	int units;
 	struct virtual_node *vn = tb->tb_vn;
-	//    struct virtual_item * vi;
-
 	int total_node_size, max_node_size, current_item_size;
 	int needed_nodes;
-	int start_item,		/* position of item we start filling node from */
-	 end_item,		/* position of item we finish filling node by */
-	 start_bytes,		/* number of first bytes (entries for directory) of start_item-th item
-				   we do not include into node that is being filled */
-	 end_bytes;		/* number of last bytes (entries for directory) of end_item-th item
-				   we do node include into node that is being filled */
-	int split_item_positions[2];	/* these are positions in virtual item of
-					   items, that are split between S[0] and
-					   S1new and S1new and S2new */
+
+	/* position of item we start filling node from */
+	int start_item;
+
+	/* position of item we finish filling node by */
+	int end_item;
+
+	/*
+	 * number of first bytes (entries for directory) of start_item-th item
+	 * we do not include into node that is being filled
+	 */
+	int start_bytes;
+
+	/*
+	 * number of last bytes (entries for directory) of end_item-th item
+	 * we do node include into node that is being filled
+	 */
+	int end_bytes;
+
+	/*
+	 * these are positions in virtual item of items, that are split
+	 * between S[0] and S1new and S1new and S2new
+	 */
+	int split_item_positions[2];
 
 	split_item_positions[0] = -1;
 	split_item_positions[1] = -1;
 
-	/* We only create additional nodes if we are in insert or paste mode
-	   or we are in replace mode at the internal level. If h is 0 and
-	   the mode is M_REPLACE then in fix_nodes we change the mode to
-	   paste or insert before we get here in the code.  */
+	/*
+	 * We only create additional nodes if we are in insert or paste mode
+	 * or we are in replace mode at the internal level. If h is 0 and
+	 * the mode is M_REPLACE then in fix_nodes we change the mode to
+	 * paste or insert before we get here in the code.
+	 */
 	RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
 	       "vs-8100: insert_size < 0 in overflow");
 
 	max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
 
-	/* snum012 [0-2] - number of items, that lay
-	   to S[0], first new node and second new node */
+	/*
+	 * snum012 [0-2] - number of items, that lay
+	 * to S[0], first new node and second new node
+	 */
 	snum012[3] = -1;	/* s1bytes */
 	snum012[4] = -1;	/* s2bytes */
 
@@ -416,20 +440,22 @@
 	total_node_size = 0;
 	cur_free = max_node_size;
 
-	// start from 'from'-th item
+	/* start from 'from'-th item */
 	start_item = from;
-	// skip its first 'start_bytes' units
+	/* skip its first 'start_bytes' units */
 	start_bytes = ((from_bytes != -1) ? from_bytes : 0);
 
-	// last included item is the 'end_item'-th one
+	/* last included item is the 'end_item'-th one */
 	end_item = vn->vn_nr_item - to - 1;
-	// do not count last 'end_bytes' units of 'end_item'-th item
+	/* do not count last 'end_bytes' units of 'end_item'-th item */
 	end_bytes = (to_bytes != -1) ? to_bytes : 0;
 
-	/* go through all item beginning from the start_item-th item and ending by
-	   the end_item-th item. Do not count first 'start_bytes' units of
-	   'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
-
+	/*
+	 * go through all item beginning from the start_item-th item
+	 * and ending by the end_item-th item. Do not count first
+	 * 'start_bytes' units of 'start_item'-th item and last
+	 * 'end_bytes' of 'end_item'-th item
+	 */
 	for (i = start_item; i <= end_item; i++) {
 		struct virtual_item *vi = vn->vn_vi + i;
 		int skip_from_end = ((i == end_item) ? end_bytes : 0);
@@ -439,7 +465,10 @@
 		/* get size of current item */
 		current_item_size = vi->vi_item_len;
 
-		/* do not take in calculation head part (from_bytes) of from-th item */
+		/*
+		 * do not take in calculation head part (from_bytes)
+		 * of from-th item
+		 */
 		current_item_size -=
 		    op_part_size(vi, 0 /*from start */ , start_bytes);
 
@@ -455,9 +484,11 @@
 			continue;
 		}
 
+		/*
+		 * virtual item length is longer, than max size of item in
+		 * a node. It is impossible for direct item
+		 */
 		if (current_item_size > max_node_size) {
-			/* virtual item length is longer, than max size of item in
-			   a node. It is impossible for direct item */
 			RFALSE(is_direct_le_ih(vi->vi_ih),
 			       "vs-8110: "
 			       "direct item length is %d. It can not be longer than %d",
@@ -466,15 +497,18 @@
 			flow = 1;
 		}
 
+		/* as we do not split items, take new node and continue */
 		if (!flow) {
-			/* as we do not split items, take new node and continue */
 			needed_nodes++;
 			i--;
 			total_node_size = 0;
 			continue;
 		}
-		// calculate number of item units which fit into node being
-		// filled
+
+		/*
+		 * calculate number of item units which fit into node being
+		 * filled
+		 */
 		{
 			int free_space;
 
@@ -482,17 +516,17 @@
 			units =
 			    op_check_left(vi, free_space, start_bytes,
 					  skip_from_end);
+			/*
+			 * nothing fits into current node, take new
+			 * node and continue
+			 */
 			if (units == -1) {
-				/* nothing fits into current node, take new node and continue */
 				needed_nodes++, i--, total_node_size = 0;
 				continue;
 			}
 		}
 
 		/* something fits into the current node */
-		//if (snum012[3] != -1 || needed_nodes != 1)
-		//  reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
-		//snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
 		start_bytes += units;
 		snum012[needed_nodes - 1 + 3] = units;
 
@@ -508,9 +542,11 @@
 		total_node_size = 0;
 	}
 
-	// sum012[4] (if it is not -1) contains number of units of which
-	// are to be in S1new, snum012[3] - to be in S0. They are supposed
-	// to be S1bytes and S2bytes correspondingly, so recalculate
+	/*
+	 * sum012[4] (if it is not -1) contains number of units of which
+	 * are to be in S1new, snum012[3] - to be in S0. They are supposed
+	 * to be S1bytes and S2bytes correspondingly, so recalculate
+	 */
 	if (snum012[4] > 0) {
 		int split_item_num;
 		int bytes_to_r, bytes_to_l;
@@ -527,7 +563,7 @@
 		    ((split_item_positions[0] ==
 		      split_item_positions[1]) ? snum012[3] : 0);
 
-		// s2bytes
+		/* s2bytes */
 		snum012[4] =
 		    op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
 		    bytes_to_r - bytes_to_l - bytes_to_S1new;
@@ -555,7 +591,7 @@
 		    ((split_item_positions[0] == split_item_positions[1]
 		      && snum012[4] != -1) ? snum012[4] : 0);
 
-		// s1bytes
+		/* s1bytes */
 		snum012[3] =
 		    op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
 		    bytes_to_r - bytes_to_l - bytes_to_S2new;
@@ -565,7 +601,8 @@
 }
 
 
-/* Set parameters for balancing.
+/*
+ * Set parameters for balancing.
  * Performs write of results of analysis of balancing into structure tb,
  * where it will later be used by the functions that actually do the balancing.
  * Parameters:
@@ -575,11 +612,12 @@
  *	rnum	number of items from S[h] that must be shifted to R[h];
  *	blk_num	number of blocks that S[h] will be splitted into;
  *	s012	number of items that fall into splitted nodes.
- *	lbytes	number of bytes which flow to the left neighbor from the item that is not
- *		not shifted entirely
- *	rbytes	number of bytes which flow to the right neighbor from the item that is not
- *		not shifted entirely
- *	s1bytes	number of bytes which flow to the first  new node when S[0] splits (this number is contained in s012 array)
+ *	lbytes	number of bytes which flow to the left neighbor from the
+ *              item that is not not shifted entirely
+ *	rbytes	number of bytes which flow to the right neighbor from the
+ *              item that is not not shifted entirely
+ *	s1bytes	number of bytes which flow to the first  new node when
+ *              S[0] splits (this number is contained in s012 array)
  */
 
 static void set_parameters(struct tree_balance *tb, int h, int lnum,
@@ -590,7 +628,8 @@
 	tb->rnum[h] = rnum;
 	tb->blknum[h] = blk_num;
 
-	if (h == 0) {		/* only for leaf level */
+	/* only for leaf level */
+	if (h == 0) {
 		if (s012 != NULL) {
 			tb->s0num = *s012++,
 			    tb->s1num = *s012++, tb->s2num = *s012++;
@@ -607,8 +646,10 @@
 	PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
 }
 
-/* check, does node disappear if we shift tb->lnum[0] items to left
-   neighbor and tb->rnum[0] to the right one. */
+/*
+ * check if node disappears if we shift tb->lnum[0] items to left
+ * neighbor and tb->rnum[0] to the right one.
+ */
 static int is_leaf_removable(struct tree_balance *tb)
 {
 	struct virtual_node *vn = tb->tb_vn;
@@ -616,8 +657,10 @@
 	int size;
 	int remain_items;
 
-	/* number of items, that will be shifted to left (right) neighbor
-	   entirely */
+	/*
+	 * number of items that will be shifted to left (right) neighbor
+	 * entirely
+	 */
 	to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
 	to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
 	remain_items = vn->vn_nr_item;
@@ -625,18 +668,18 @@
 	/* how many items remain in S[0] after shiftings to neighbors */
 	remain_items -= (to_left + to_right);
 
+	/* all content of node can be shifted to neighbors */
 	if (remain_items < 1) {
-		/* all content of node can be shifted to neighbors */
 		set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
 			       NULL, -1, -1);
 		return 1;
 	}
 
+	/* S[0] is not removable */
 	if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
-		/* S[0] is not removable */
 		return 0;
 
-	/* check, whether we can divide 1 remaining item between neighbors */
+	/* check whether we can divide 1 remaining item between neighbors */
 
 	/* get size of remaining item (in item units) */
 	size = op_unit_num(&(vn->vn_vi[to_left]));
@@ -680,18 +723,23 @@
 		    && !comp_short_le_keys(&(ih->ih_key),
 					   internal_key(tb->CFR[0],
 							  tb->rkey[0])))
+			/*
+			 * Directory must be in correct state here: that is
+			 * somewhere at the left side should exist first
+			 * directory item. But the item being deleted can
+			 * not be that first one because its right neighbor
+			 * is item of the same directory. (But first item
+			 * always gets deleted in last turn). So, neighbors
+			 * of deleted item can be merged, so we can save
+			 * ih_size
+			 */
 			if (is_direntry_le_ih(ih)) {
-				/* Directory must be in correct state here: that is
-				   somewhere at the left side should exist first directory
-				   item. But the item being deleted can not be that first
-				   one because its right neighbor is item of the same
-				   directory. (But first item always gets deleted in last
-				   turn). So, neighbors of deleted item can be merged, so
-				   we can save ih_size */
 				ih_size = IH_SIZE;
 
-				/* we might check that left neighbor exists and is of the
-				   same directory */
+				/*
+				 * we might check that left neighbor exists
+				 * and is of the same directory
+				 */
 				RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
 				       "vs-8130: first directory item can not be removed until directory is not empty");
 			}
@@ -770,7 +818,8 @@
 	}
 }
 
-/* Get new buffers for storing new nodes that are created while balancing.
+/*
+ * Get new buffers for storing new nodes that are created while balancing.
  * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
  *	        CARRY_ON - schedule didn't occur while the function worked;
  *	        NO_DISK_SPACE - no disk space.
@@ -778,28 +827,33 @@
 /* The function is NOT SCHEDULE-SAFE! */
 static int get_empty_nodes(struct tree_balance *tb, int h)
 {
-	struct buffer_head *new_bh,
-	    *Sh = PATH_H_PBUFFER(tb->tb_path, h);
+	struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h);
 	b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
-	int counter, number_of_freeblk, amount_needed,	/* number of needed empty blocks */
-	 retval = CARRY_ON;
+	int counter, number_of_freeblk;
+	int  amount_needed;	/* number of needed empty blocks */
+	int  retval = CARRY_ON;
 	struct super_block *sb = tb->tb_sb;
 
-	/* number_of_freeblk is the number of empty blocks which have been
-	   acquired for use by the balancing algorithm minus the number of
-	   empty blocks used in the previous levels of the analysis,
-	   number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
-	   after empty blocks are acquired, and the balancing analysis is
-	   then restarted, amount_needed is the number needed by this level
-	   (h) of the balancing analysis.
+	/*
+	 * number_of_freeblk is the number of empty blocks which have been
+	 * acquired for use by the balancing algorithm minus the number of
+	 * empty blocks used in the previous levels of the analysis,
+	 * number_of_freeblk = tb->cur_blknum can be non-zero if a schedule
+	 * occurs after empty blocks are acquired, and the balancing analysis
+	 * is then restarted, amount_needed is the number needed by this
+	 * level (h) of the balancing analysis.
+	 *
+	 * Note that for systems with many processes writing, it would be
+	 * more layout optimal to calculate the total number needed by all
+	 * levels and then to run reiserfs_new_blocks to get all of them at
+	 * once.
+	 */
 
-	   Note that for systems with many processes writing, it would be
-	   more layout optimal to calculate the total number needed by all
-	   levels and then to run reiserfs_new_blocks to get all of them at once.  */
-
-	/* Initiate number_of_freeblk to the amount acquired prior to the restart of
-	   the analysis or 0 if not restarted, then subtract the amount needed
-	   by all of the levels of the tree below h. */
+	/*
+	 * Initiate number_of_freeblk to the amount acquired prior to the
+	 * restart of the analysis or 0 if not restarted, then subtract the
+	 * amount needed by all of the levels of the tree below h.
+	 */
 	/* blknum includes S[h], so we subtract 1 in this calculation */
 	for (counter = 0, number_of_freeblk = tb->cur_blknum;
 	     counter < h; counter++)
@@ -810,13 +864,19 @@
 	/* Allocate missing empty blocks. */
 	/* if Sh == 0  then we are getting a new root */
 	amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
-	/*  Amount_needed = the amount that we need more than the amount that we have. */
+	/*
+	 * Amount_needed = the amount that we need more than the
+	 * amount that we have.
+	 */
 	if (amount_needed > number_of_freeblk)
 		amount_needed -= number_of_freeblk;
-	else			/* If we have enough already then there is nothing to do. */
+	else	/* If we have enough already then there is nothing to do. */
 		return CARRY_ON;
 
-	/* No need to check quota - is not allocated for blocks used for formatted nodes */
+	/*
+	 * No need to check quota - is not allocated for blocks used
+	 * for formatted nodes
+	 */
 	if (reiserfs_new_form_blocknrs(tb, blocknrs,
 				       amount_needed) == NO_DISK_SPACE)
 		return NO_DISK_SPACE;
@@ -849,8 +909,10 @@
 	return retval;
 }
 
-/* Get free space of the left neighbor, which is stored in the parent
- * node of the left neighbor.  */
+/*
+ * Get free space of the left neighbor, which is stored in the parent
+ * node of the left neighbor.
+ */
 static int get_lfree(struct tree_balance *tb, int h)
 {
 	struct buffer_head *l, *f;
@@ -870,7 +932,8 @@
 	return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
 }
 
-/* Get free space of the right neighbor,
+/*
+ * Get free space of the right neighbor,
  * which is stored in the parent node of the right neighbor.
  */
 static int get_rfree(struct tree_balance *tb, int h)
@@ -916,7 +979,10 @@
 	       "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
 	       father, tb->FL[h]);
 
-	/* Get position of the pointer to the left neighbor into the left father. */
+	/*
+	 * Get position of the pointer to the left neighbor
+	 * into the left father.
+	 */
 	left_neighbor_position = (father == tb->FL[h]) ?
 	    tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
 	/* Get left neighbor block number. */
@@ -940,17 +1006,20 @@
 
 static void decrement_key(struct cpu_key *key)
 {
-	// call item specific function for this key
+	/* call item specific function for this key */
 	item_ops[cpu_key_k_type(key)]->decrement_key(key);
 }
 
-/* Calculate far left/right parent of the left/right neighbor of the current node, that
- * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
+/*
+ * Calculate far left/right parent of the left/right neighbor of the
+ * current node, that is calculate the left/right (FL[h]/FR[h]) neighbor
+ * of the parent F[h].
  * Calculate left/right common parent of the current node and L[h]/R[h].
  * Calculate left/right delimiting key position.
- * Returns:	PATH_INCORRECT   - path in the tree is not correct;
- 		SCHEDULE_OCCURRED - schedule occurred while the function worked;
- *	        CARRY_ON         - schedule didn't occur while the function worked;
+ * Returns:	PATH_INCORRECT    - path in the tree is not correct
+ *		SCHEDULE_OCCURRED - schedule occurred while the function worked
+ *	        CARRY_ON          - schedule didn't occur while the function
+ *				    worked
  */
 static int get_far_parent(struct tree_balance *tb,
 			  int h,
@@ -966,8 +1035,10 @@
 	    first_last_position = 0,
 	    path_offset = PATH_H_PATH_OFFSET(path, h);
 
-	/* Starting from F[h] go upwards in the tree, and look for the common
-	   ancestor of F[h], and its neighbor l/r, that should be obtained. */
+	/*
+	 * Starting from F[h] go upwards in the tree, and look for the common
+	 * ancestor of F[h], and its neighbor l/r, that should be obtained.
+	 */
 
 	counter = path_offset;
 
@@ -975,21 +1046,33 @@
 	       "PAP-8180: invalid path length");
 
 	for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
-		/* Check whether parent of the current buffer in the path is really parent in the tree. */
+		/*
+		 * Check whether parent of the current buffer in the path
+		 * is really parent in the tree.
+		 */
 		if (!B_IS_IN_TREE
 		    (parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
 			return REPEAT_SEARCH;
+
 		/* Check whether position in the parent is correct. */
 		if ((position =
 		     PATH_OFFSET_POSITION(path,
 					  counter - 1)) >
 		    B_NR_ITEMS(parent))
 			return REPEAT_SEARCH;
-		/* Check whether parent at the path really points to the child. */
+
+		/*
+		 * Check whether parent at the path really points
+		 * to the child.
+		 */
 		if (B_N_CHILD_NUM(parent, position) !=
 		    PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
 			return REPEAT_SEARCH;
-		/* Return delimiting key if position in the parent is not equal to first/last one. */
+
+		/*
+		 * Return delimiting key if position in the parent is not
+		 * equal to first/last one.
+		 */
 		if (c_lr_par == RIGHT_PARENTS)
 			first_last_position = B_NR_ITEMS(parent);
 		if (position != first_last_position) {
@@ -1002,7 +1085,10 @@
 
 	/* if we are in the root of the tree, then there is no common father */
 	if (counter == FIRST_PATH_ELEMENT_OFFSET) {
-		/* Check whether first buffer in the path is the root of the tree. */
+		/*
+		 * Check whether first buffer in the path is the
+		 * root of the tree.
+		 */
 		if (PATH_OFFSET_PBUFFER
 		    (tb->tb_path,
 		     FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
@@ -1031,8 +1117,11 @@
 		}
 	}
 
-	/* So, we got common parent of the current node and its left/right neighbor.
-	   Now we are geting the parent of the left/right neighbor. */
+	/*
+	 * So, we got common parent of the current node and its
+	 * left/right neighbor.  Now we are getting the parent of the
+	 * left/right neighbor.
+	 */
 
 	/* Form key to get parent of the left/right neighbor. */
 	le_key2cpu_key(&s_lr_father_key,
@@ -1050,7 +1139,7 @@
 	if (search_by_key
 	    (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
 	     h + 1) == IO_ERROR)
-		// path is released
+		/* path is released */
 		return IO_ERROR;
 
 	if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -1071,12 +1160,15 @@
 	return CARRY_ON;
 }
 
-/* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
- * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
- * FR[path_offset], CFL[path_offset], CFR[path_offset].
- * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
- * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
- *	        CARRY_ON - schedule didn't occur while the function worked;
+/*
+ * Get parents of neighbors of node in the path(S[path_offset]) and
+ * common parents of S[path_offset] and L[path_offset]/R[path_offset]:
+ * F[path_offset], FL[path_offset], FR[path_offset], CFL[path_offset],
+ * CFR[path_offset].
+ * Calculate numbers of left and right delimiting keys position:
+ * lkey[path_offset], rkey[path_offset].
+ * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked
+ *	        CARRY_ON - schedule didn't occur while the function worked
  */
 static int get_parents(struct tree_balance *tb, int h)
 {
@@ -1088,8 +1180,11 @@
 
 	/* Current node is the root of the tree or will be root of the tree */
 	if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
-		/* The root can not have parents.
-		   Release nodes which previously were obtained as parents of the current node neighbors. */
+		/*
+		 * The root can not have parents.
+		 * Release nodes which previously were obtained as
+		 * parents of the current node neighbors.
+		 */
 		brelse(tb->FL[h]);
 		brelse(tb->CFL[h]);
 		brelse(tb->FR[h]);
@@ -1111,10 +1206,14 @@
 		get_bh(curf);
 		tb->lkey[h] = position - 1;
 	} else {
-		/* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
-		   Calculate current common parent of L[path_offset] and the current node. Note that
-		   CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
-		   Calculate lkey[path_offset]. */
+		/*
+		 * Calculate current parent of L[path_offset], which is the
+		 * left neighbor of the current node.  Calculate current
+		 * common parent of L[path_offset] and the current node.
+		 * Note that CFL[path_offset] not equal FL[path_offset] and
+		 * CFL[path_offset] not equal F[path_offset].
+		 * Calculate lkey[path_offset].
+		 */
 		if ((ret = get_far_parent(tb, h + 1, &curf,
 						  &curcf,
 						  LEFT_PARENTS)) != CARRY_ON)
@@ -1130,19 +1229,22 @@
 	       (curcf && !B_IS_IN_TREE(curcf)),
 	       "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
 
-/* Get parent FR[h] of R[h]. */
+	/* Get parent FR[h] of R[h]. */
 
-/* Current node is the last child of F[h]. FR[h] != F[h]. */
+	/* Current node is the last child of F[h]. FR[h] != F[h]. */
 	if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
-/* Calculate current parent of R[h], which is the right neighbor of F[h].
-   Calculate current common parent of R[h] and current node. Note that CFR[h]
-   not equal FR[path_offset] and CFR[h] not equal F[h]. */
+		/*
+		 * Calculate current parent of R[h], which is the right
+		 * neighbor of F[h].  Calculate current common parent of
+		 * R[h] and current node. Note that CFR[h] not equal
+		 * FR[path_offset] and CFR[h] not equal F[h].
+		 */
 		if ((ret =
 		     get_far_parent(tb, h + 1, &curf, &curcf,
 				    RIGHT_PARENTS)) != CARRY_ON)
 			return ret;
 	} else {
-/* Current node is not the last child of its parent F[h]. */
+		/* Current node is not the last child of its parent F[h]. */
 		curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
 		curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
 		get_bh(curf);
@@ -1165,8 +1267,10 @@
 	return CARRY_ON;
 }
 
-/* it is possible to remove node as result of shiftings to
-   neighbors even when we insert or paste item. */
+/*
+ * it is possible to remove node as result of shiftings to
+ * neighbors even when we insert or paste item.
+ */
 static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
 				      struct tree_balance *tb, int h)
 {
@@ -1189,7 +1293,8 @@
 	      && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
 	    + ((h) ? KEY_SIZE : 0)) {
 		/* node can not be removed */
-		if (sfree >= levbytes) {	/* new item fits into node S[h] without any shifting */
+		if (sfree >= levbytes) {
+			/* new item fits into node S[h] without any shifting */
 			if (!h)
 				tb->s0num =
 				    B_NR_ITEMS(Sh) +
@@ -1202,7 +1307,8 @@
 	return !NO_BALANCING_NEEDED;
 }
 
-/* Check whether current node S[h] is balanced when increasing its size by
+/*
+ * Check whether current node S[h] is balanced when increasing its size by
  * Inserting or Pasting.
  * Calculate parameters for balancing for current level h.
  * Parameters:
@@ -1219,39 +1325,48 @@
 static int ip_check_balance(struct tree_balance *tb, int h)
 {
 	struct virtual_node *vn = tb->tb_vn;
-	int levbytes,		/* Number of bytes that must be inserted into (value
-				   is negative if bytes are deleted) buffer which
-				   contains node being balanced.  The mnemonic is
-				   that the attempted change in node space used level
-				   is levbytes bytes. */
-	 ret;
+	/*
+	 * Number of bytes that must be inserted into (value is negative
+	 * if bytes are deleted) buffer which contains node being balanced.
+	 * The mnemonic is that the attempted change in node space used
+	 * level is levbytes bytes.
+	 */
+	int levbytes;
+	int ret;
 
 	int lfree, sfree, rfree /* free space in L, S and R */ ;
 
-	/* nver is short for number of vertixes, and lnver is the number if
-	   we shift to the left, rnver is the number if we shift to the
-	   right, and lrnver is the number if we shift in both directions.
-	   The goal is to minimize first the number of vertixes, and second,
-	   the number of vertixes whose contents are changed by shifting,
-	   and third the number of uncached vertixes whose contents are
-	   changed by shifting and must be read from disk.  */
+	/*
+	 * nver is short for number of vertixes, and lnver is the number if
+	 * we shift to the left, rnver is the number if we shift to the
+	 * right, and lrnver is the number if we shift in both directions.
+	 * The goal is to minimize first the number of vertixes, and second,
+	 * the number of vertixes whose contents are changed by shifting,
+	 * and third the number of uncached vertixes whose contents are
+	 * changed by shifting and must be read from disk.
+	 */
 	int nver, lnver, rnver, lrnver;
 
-	/* used at leaf level only, S0 = S[0] is the node being balanced,
-	   sInum [ I = 0,1,2 ] is the number of items that will
-	   remain in node SI after balancing.  S1 and S2 are new
-	   nodes that might be created. */
-
-	/* we perform 8 calls to get_num_ver().  For each call we calculate five parameters.
-	   where 4th parameter is s1bytes and 5th - s2bytes
+	/*
+	 * used at leaf level only, S0 = S[0] is the node being balanced,
+	 * sInum [ I = 0,1,2 ] is the number of items that will
+	 * remain in node SI after balancing.  S1 and S2 are new
+	 * nodes that might be created.
 	 */
-	short snum012[40] = { 0, };	/* s0num, s1num, s2num for 8 cases
-					   0,1 - do not shift and do not shift but bottle
-					   2 - shift only whole item to left
-					   3 - shift to left and bottle as much as possible
-					   4,5 - shift to right (whole items and as much as possible
-					   6,7 - shift to both directions (whole items and as much as possible)
-					 */
+
+	/*
+	 * we perform 8 calls to get_num_ver().  For each call we
+	 * calculate five parameters.  where 4th parameter is s1bytes
+	 * and 5th - s2bytes
+	 *
+	 * s0num, s1num, s2num for 8 cases
+	 * 0,1 - do not shift and do not shift but bottle
+	 * 2   - shift only whole item to left
+	 * 3   - shift to left and bottle as much as possible
+	 * 4,5 - shift to right (whole items and as much as possible
+	 * 6,7 - shift to both directions (whole items and as much as possible)
+	 */
+	short snum012[40] = { 0, };
 
 	/* Sh is the node whose balance is currently being checked */
 	struct buffer_head *Sh;
@@ -1265,9 +1380,10 @@
 			reiserfs_panic(tb->tb_sb, "vs-8210",
 				       "S[0] can not be 0");
 		switch (ret = get_empty_nodes(tb, h)) {
+		/* no balancing for higher levels needed */
 		case CARRY_ON:
 			set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
-			return NO_BALANCING_NEEDED;	/* no balancing for higher levels needed */
+			return NO_BALANCING_NEEDED;
 
 		case NO_DISK_SPACE:
 		case REPEAT_SEARCH:
@@ -1278,7 +1394,9 @@
 		}
 	}
 
-	if ((ret = get_parents(tb, h)) != CARRY_ON)	/* get parents of S[h] neighbors. */
+	/* get parents of S[h] neighbors. */
+	ret = get_parents(tb, h);
+	if (ret != CARRY_ON)
 		return ret;
 
 	sfree = B_FREE_SPACE(Sh);
@@ -1287,38 +1405,44 @@
 	rfree = get_rfree(tb, h);
 	lfree = get_lfree(tb, h);
 
+	/* and new item fits into node S[h] without any shifting */
 	if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
 	    NO_BALANCING_NEEDED)
-		/* and new item fits into node S[h] without any shifting */
 		return NO_BALANCING_NEEDED;
 
 	create_virtual_node(tb, h);
 
 	/*
-	   determine maximal number of items we can shift to the left neighbor (in tb structure)
-	   and the maximal number of bytes that can flow to the left neighbor
-	   from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
+	 * determine maximal number of items we can shift to the left
+	 * neighbor (in tb structure) and the maximal number of bytes
+	 * that can flow to the left neighbor from the left most liquid
+	 * item that cannot be shifted from S[0] entirely (returned value)
 	 */
 	check_left(tb, h, lfree);
 
 	/*
-	   determine maximal number of items we can shift to the right neighbor (in tb structure)
-	   and the maximal number of bytes that can flow to the right neighbor
-	   from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
+	 * determine maximal number of items we can shift to the right
+	 * neighbor (in tb structure) and the maximal number of bytes
+	 * that can flow to the right neighbor from the right most liquid
+	 * item that cannot be shifted from S[0] entirely (returned value)
 	 */
 	check_right(tb, h, rfree);
 
-	/* all contents of internal node S[h] can be moved into its
-	   neighbors, S[h] will be removed after balancing */
+	/*
+	 * all contents of internal node S[h] can be moved into its
+	 * neighbors, S[h] will be removed after balancing
+	 */
 	if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
 		int to_r;
 
-		/* Since we are working on internal nodes, and our internal
-		   nodes have fixed size entries, then we can balance by the
-		   number of items rather than the space they consume.  In this
-		   routine we set the left node equal to the right node,
-		   allowing a difference of less than or equal to 1 child
-		   pointer. */
+		/*
+		 * Since we are working on internal nodes, and our internal
+		 * nodes have fixed size entries, then we can balance by the
+		 * number of items rather than the space they consume.  In this
+		 * routine we set the left node equal to the right node,
+		 * allowing a difference of less than or equal to 1 child
+		 * pointer.
+		 */
 		to_r =
 		    ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
 		     vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
@@ -1328,7 +1452,10 @@
 		return CARRY_ON;
 	}
 
-	/* this checks balance condition, that any two neighboring nodes can not fit in one node */
+	/*
+	 * this checks balance condition, that any two neighboring nodes
+	 * can not fit in one node
+	 */
 	RFALSE(h &&
 	       (tb->lnum[h] >= vn->vn_nr_item + 1 ||
 		tb->rnum[h] >= vn->vn_nr_item + 1),
@@ -1337,16 +1464,22 @@
 		      (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
 	       "vs-8225: tree is not balanced on leaf level");
 
-	/* all contents of S[0] can be moved into its neighbors
-	   S[0] will be removed after balancing. */
+	/*
+	 * all contents of S[0] can be moved into its neighbors
+	 * S[0] will be removed after balancing.
+	 */
 	if (!h && is_leaf_removable(tb))
 		return CARRY_ON;
 
-	/* why do we perform this check here rather than earlier??
-	   Answer: we can win 1 node in some cases above. Moreover we
-	   checked it above, when we checked, that S[0] is not removable
-	   in principle */
-	if (sfree >= levbytes) {	/* new item fits into node S[h] without any shifting */
+	/*
+	 * why do we perform this check here rather than earlier??
+	 * Answer: we can win 1 node in some cases above. Moreover we
+	 * checked it above, when we checked, that S[0] is not removable
+	 * in principle
+	 */
+
+	 /* new item fits into node S[h] without any shifting */
+	if (sfree >= levbytes) {
 		if (!h)
 			tb->s0num = vn->vn_nr_item;
 		set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
@@ -1355,18 +1488,19 @@
 
 	{
 		int lpar, rpar, nset, lset, rset, lrset;
-		/*
-		 * regular overflowing of the node
-		 */
+		/* regular overflowing of the node */
 
-		/* get_num_ver works in 2 modes (FLOW & NO_FLOW)
-		   lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
-		   nset, lset, rset, lrset - shows, whether flowing items give better packing
+		/*
+		 * get_num_ver works in 2 modes (FLOW & NO_FLOW)
+		 * lpar, rpar - number of items we can shift to left/right
+		 *              neighbor (including splitting item)
+		 * nset, lset, rset, lrset - shows, whether flowing items
+		 *                           give better packing
 		 */
 #define FLOW 1
 #define NO_FLOW 0		/* do not any splitting */
 
-		/* we choose one the following */
+		/* we choose one of the following */
 #define NOTHING_SHIFT_NO_FLOW	0
 #define NOTHING_SHIFT_FLOW	5
 #define LEFT_SHIFT_NO_FLOW	10
@@ -1379,10 +1513,13 @@
 		lpar = tb->lnum[h];
 		rpar = tb->rnum[h];
 
-		/* calculate number of blocks S[h] must be split into when
-		   nothing is shifted to the neighbors,
-		   as well as number of items in each part of the split node (s012 numbers),
-		   and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
+		/*
+		 * calculate number of blocks S[h] must be split into when
+		 * nothing is shifted to the neighbors, as well as number of
+		 * items in each part of the split node (s012 numbers),
+		 * and number of bytes (s1bytes) of the shared drop which
+		 * flow to S1 if any
+		 */
 		nset = NOTHING_SHIFT_NO_FLOW;
 		nver = get_num_ver(vn->vn_mode, tb, h,
 				   0, -1, h ? vn->vn_nr_item : 0, -1,
@@ -1391,7 +1528,10 @@
 		if (!h) {
 			int nver1;
 
-			/* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
+			/*
+			 * note, that in this case we try to bottle
+			 * between S[0] and S1 (S1 - the first new node)
+			 */
 			nver1 = get_num_ver(vn->vn_mode, tb, h,
 					    0, -1, 0, -1,
 					    snum012 + NOTHING_SHIFT_FLOW, FLOW);
@@ -1399,11 +1539,13 @@
 				nset = NOTHING_SHIFT_FLOW, nver = nver1;
 		}
 
-		/* calculate number of blocks S[h] must be split into when
-		   l_shift_num first items and l_shift_bytes of the right most
-		   liquid item to be shifted are shifted to the left neighbor,
-		   as well as number of items in each part of the splitted node (s012 numbers),
-		   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+		/*
+		 * calculate number of blocks S[h] must be split into when
+		 * l_shift_num first items and l_shift_bytes of the right
+		 * most liquid item to be shifted are shifted to the left
+		 * neighbor, as well as number of items in each part of the
+		 * splitted node (s012 numbers), and number of bytes
+		 * (s1bytes) of the shared drop which flow to S1 if any
 		 */
 		lset = LEFT_SHIFT_NO_FLOW;
 		lnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1422,11 +1564,13 @@
 				lset = LEFT_SHIFT_FLOW, lnver = lnver1;
 		}
 
-		/* calculate number of blocks S[h] must be split into when
-		   r_shift_num first items and r_shift_bytes of the left most
-		   liquid item to be shifted are shifted to the right neighbor,
-		   as well as number of items in each part of the splitted node (s012 numbers),
-		   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+		/*
+		 * calculate number of blocks S[h] must be split into when
+		 * r_shift_num first items and r_shift_bytes of the left most
+		 * liquid item to be shifted are shifted to the right neighbor,
+		 * as well as number of items in each part of the splitted
+		 * node (s012 numbers), and number of bytes (s1bytes) of the
+		 * shared drop which flow to S1 if any
 		 */
 		rset = RIGHT_SHIFT_NO_FLOW;
 		rnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1451,10 +1595,12 @@
 				rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
 		}
 
-		/* calculate number of blocks S[h] must be split into when
-		   items are shifted in both directions,
-		   as well as number of items in each part of the splitted node (s012 numbers),
-		   and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+		/*
+		 * calculate number of blocks S[h] must be split into when
+		 * items are shifted in both directions, as well as number
+		 * of items in each part of the splitted node (s012 numbers),
+		 * and number of bytes (s1bytes) of the shared drop which
+		 * flow to S1 if any
 		 */
 		lrset = LR_SHIFT_NO_FLOW;
 		lrnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1481,10 +1627,12 @@
 				lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
 		}
 
-		/* Our general shifting strategy is:
-		   1) to minimized number of new nodes;
-		   2) to minimized number of neighbors involved in shifting;
-		   3) to minimized number of disk reads; */
+		/*
+		 * Our general shifting strategy is:
+		 * 1) to minimized number of new nodes;
+		 * 2) to minimized number of neighbors involved in shifting;
+		 * 3) to minimized number of disk reads;
+		 */
 
 		/* we can win TWO or ONE nodes by shifting in both directions */
 		if (lrnver < lnver && lrnver < rnver) {
@@ -1508,42 +1656,59 @@
 			return CARRY_ON;
 		}
 
-		/* if shifting doesn't lead to better packing then don't shift */
+		/*
+		 * if shifting doesn't lead to better packing
+		 * then don't shift
+		 */
 		if (nver == lrnver) {
 			set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
 				       -1);
 			return CARRY_ON;
 		}
 
-		/* now we know that for better packing shifting in only one
-		   direction either to the left or to the right is required */
+		/*
+		 * now we know that for better packing shifting in only one
+		 * direction either to the left or to the right is required
+		 */
 
-		/*  if shifting to the left is better than shifting to the right */
+		/*
+		 * if shifting to the left is better than
+		 * shifting to the right
+		 */
 		if (lnver < rnver) {
 			SET_PAR_SHIFT_LEFT;
 			return CARRY_ON;
 		}
 
-		/* if shifting to the right is better than shifting to the left */
+		/*
+		 * if shifting to the right is better than
+		 * shifting to the left
+		 */
 		if (lnver > rnver) {
 			SET_PAR_SHIFT_RIGHT;
 			return CARRY_ON;
 		}
 
-		/* now shifting in either direction gives the same number
-		   of nodes and we can make use of the cached neighbors */
+		/*
+		 * now shifting in either direction gives the same number
+		 * of nodes and we can make use of the cached neighbors
+		 */
 		if (is_left_neighbor_in_cache(tb, h)) {
 			SET_PAR_SHIFT_LEFT;
 			return CARRY_ON;
 		}
 
-		/* shift to the right independently on whether the right neighbor in cache or not */
+		/*
+		 * shift to the right independently on whether the
+		 * right neighbor in cache or not
+		 */
 		SET_PAR_SHIFT_RIGHT;
 		return CARRY_ON;
 	}
 }
 
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
  * Deleting or Cutting for INTERNAL node of S+tree.
  * Calculate parameters for balancing for current level h.
  * Parameters:
@@ -1563,8 +1728,10 @@
 {
 	struct virtual_node *vn = tb->tb_vn;
 
-	/* Sh is the node whose balance is currently being checked,
-	   and Fh is its father.  */
+	/*
+	 * Sh is the node whose balance is currently being checked,
+	 * and Fh is its father.
+	 */
 	struct buffer_head *Sh, *Fh;
 	int maxsize, ret;
 	int lfree, rfree /* free space in L and R */ ;
@@ -1574,19 +1741,25 @@
 
 	maxsize = MAX_CHILD_SIZE(Sh);
 
-/*   using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
-/*   new_nr_item = number of items node would have if operation is */
-/* 	performed without balancing (new_nr_item); */
+	/*
+	 * using tb->insert_size[h], which is negative in this case,
+	 * create_virtual_node calculates:
+	 * new_nr_item = number of items node would have if operation is
+	 * performed without balancing (new_nr_item);
+	 */
 	create_virtual_node(tb, h);
 
 	if (!Fh) {		/* S[h] is the root. */
+		/* no balancing for higher levels needed */
 		if (vn->vn_nr_item > 0) {
 			set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
-			return NO_BALANCING_NEEDED;	/* no balancing for higher levels needed */
+			return NO_BALANCING_NEEDED;
 		}
-		/* new_nr_item == 0.
+		/*
+		 * new_nr_item == 0.
 		 * Current root will be deleted resulting in
-		 * decrementing the tree height. */
+		 * decrementing the tree height.
+		 */
 		set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
 		return CARRY_ON;
 	}
@@ -1602,12 +1775,18 @@
 	check_left(tb, h, lfree);
 	check_right(tb, h, rfree);
 
-	if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) {	/* Balance condition for the internal node is valid.
-						 * In this case we balance only if it leads to better packing. */
-		if (vn->vn_nr_item == MIN_NR_KEY(Sh)) {	/* Here we join S[h] with one of its neighbors,
-							 * which is impossible with greater values of new_nr_item. */
+	/*
+	 * Balance condition for the internal node is valid.
+	 * In this case we balance only if it leads to better packing.
+	 */
+	if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) {
+		/*
+		 * Here we join S[h] with one of its neighbors,
+		 * which is impossible with greater values of new_nr_item.
+		 */
+		if (vn->vn_nr_item == MIN_NR_KEY(Sh)) {
+			/* All contents of S[h] can be moved to L[h]. */
 			if (tb->lnum[h] >= vn->vn_nr_item + 1) {
-				/* All contents of S[h] can be moved to L[h]. */
 				int n;
 				int order_L;
 
@@ -1623,8 +1802,8 @@
 				return CARRY_ON;
 			}
 
+			/* All contents of S[h] can be moved to R[h]. */
 			if (tb->rnum[h] >= vn->vn_nr_item + 1) {
-				/* All contents of S[h] can be moved to R[h]. */
 				int n;
 				int order_R;
 
@@ -1641,8 +1820,11 @@
 			}
 		}
 
+		/*
+		 * All contents of S[h] can be moved to the neighbors
+		 * (L[h] & R[h]).
+		 */
 		if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
-			/* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
 			int to_r;
 
 			to_r =
@@ -1659,7 +1841,10 @@
 		return NO_BALANCING_NEEDED;
 	}
 
-	/* Current node contain insufficient number of items. Balancing is required. */
+	/*
+	 * Current node contain insufficient number of items.
+	 * Balancing is required.
+	 */
 	/* Check whether we can merge S[h] with left neighbor. */
 	if (tb->lnum[h] >= vn->vn_nr_item + 1)
 		if (is_left_neighbor_in_cache(tb, h)
@@ -1726,7 +1911,8 @@
 	return CARRY_ON;
 }
 
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
  * Deleting or Truncating for LEAF node of S+tree.
  * Calculate parameters for balancing for current level h.
  * Parameters:
@@ -1743,15 +1929,21 @@
 {
 	struct virtual_node *vn = tb->tb_vn;
 
-	/* Number of bytes that must be deleted from
-	   (value is negative if bytes are deleted) buffer which
-	   contains node being balanced.  The mnemonic is that the
-	   attempted change in node space used level is levbytes bytes. */
+	/*
+	 * Number of bytes that must be deleted from
+	 * (value is negative if bytes are deleted) buffer which
+	 * contains node being balanced.  The mnemonic is that the
+	 * attempted change in node space used level is levbytes bytes.
+	 */
 	int levbytes;
+
 	/* the maximal item size */
 	int maxsize, ret;
-	/* S0 is the node whose balance is currently being checked,
-	   and F0 is its father.  */
+
+	/*
+	 * S0 is the node whose balance is currently being checked,
+	 * and F0 is its father.
+	 */
 	struct buffer_head *S0, *F0;
 	int lfree, rfree /* free space in L and R */ ;
 
@@ -1784,9 +1976,11 @@
 	if (are_leaves_removable(tb, lfree, rfree))
 		return CARRY_ON;
 
-	/* determine maximal number of items we can shift to the left/right  neighbor
-	   and the maximal number of bytes that can flow to the left/right neighbor
-	   from the left/right most liquid item that cannot be shifted from S[0] entirely
+	/*
+	 * determine maximal number of items we can shift to the left/right
+	 * neighbor and the maximal number of bytes that can flow to the
+	 * left/right neighbor from the left/right most liquid item that
+	 * cannot be shifted from S[0] entirely
 	 */
 	check_left(tb, h, lfree);
 	check_right(tb, h, rfree);
@@ -1810,7 +2004,10 @@
 		return CARRY_ON;
 	}
 
-	/* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
+	/*
+	 * All contents of S[0] can be moved to the neighbors (L[0] & R[0]).
+	 * Set parameters and return
+	 */
 	if (is_leaf_removable(tb))
 		return CARRY_ON;
 
@@ -1820,7 +2017,8 @@
 	return NO_BALANCING_NEEDED;
 }
 
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
  * Deleting or Cutting.
  * Calculate parameters for balancing for current level h.
  * Parameters:
@@ -1844,15 +2042,16 @@
 		return dc_check_balance_leaf(tb, h);
 }
 
-/* Check whether current node S[h] is balanced.
+/*
+ * Check whether current node S[h] is balanced.
  * Calculate parameters for balancing for current level h.
  * Parameters:
  *
  *	tb	tree_balance structure:
  *
- *              tb is a large structure that must be read about in the header file
- *              at the same time as this procedure if the reader is to successfully
- *              understand this procedure
+ *              tb is a large structure that must be read about in the header
+ *		file at the same time as this procedure if the reader is
+ *		to successfully understand this procedure
  *
  *	h	current level of the node;
  *	inum	item number in S[h];
@@ -1882,8 +2081,8 @@
 	RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
 	       "vs-8255: ins_ih can not be 0 in insert mode");
 
+	/* Calculate balance parameters when size of node is increasing. */
 	if (tb->insert_size[h] > 0)
-		/* Calculate balance parameters when size of node is increasing. */
 		return ip_check_balance(tb, h);
 
 	/* Calculate balance parameters when  size of node is decreasing. */
@@ -1911,21 +2110,23 @@
 			PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
 			return CARRY_ON;
 		}
-		return REPEAT_SEARCH;	/* Root is changed and we must recalculate the path. */
+		/* Root is changed and we must recalculate the path. */
+		return REPEAT_SEARCH;
 	}
 
+	/* Parent in the path is not in the tree. */
 	if (!B_IS_IN_TREE
 	    (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
-		return REPEAT_SEARCH;	/* Parent in the path is not in the tree. */
+		return REPEAT_SEARCH;
 
 	if ((position =
 	     PATH_OFFSET_POSITION(path,
 				  path_offset - 1)) > B_NR_ITEMS(bh))
 		return REPEAT_SEARCH;
 
+	/* Parent in the path is not parent of the current node in the tree. */
 	if (B_N_CHILD_NUM(bh, position) !=
 	    PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
-		/* Parent in the path is not parent of the current node in the tree. */
 		return REPEAT_SEARCH;
 
 	if (buffer_locked(bh)) {
@@ -1936,10 +2137,15 @@
 			return REPEAT_SEARCH;
 	}
 
-	return CARRY_ON;	/* Parent in the path is unlocked and really parent of the current node.  */
+	/*
+	 * Parent in the path is unlocked and really parent
+	 * of the current node.
+	 */
+	return CARRY_ON;
 }
 
-/* Using lnum[h] and rnum[h] we should determine what neighbors
+/*
+ * Using lnum[h] and rnum[h] we should determine what neighbors
  * of S[h] we
  * need in order to balance S[h], and get them if necessary.
  * Returns:	SCHEDULE_OCCURRED - schedule occurred while the function worked;
@@ -1997,7 +2203,7 @@
 	}
 
 	/* We need right neighbor to balance S[path_offset]. */
-	if (tb->rnum[h]) {	/* We need right neighbor to balance S[path_offset]. */
+	if (tb->rnum[h]) {
 		PROC_INFO_INC(sb, need_r_neighbor[h]);
 		bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
 
@@ -2053,9 +2259,11 @@
 		(max_num_of_entries - 1) * sizeof(__u16));
 }
 
-/* maybe we should fail balancing we are going to perform when kmalloc
-   fails several times. But now it will loop until kmalloc gets
-   required memory */
+/*
+ * maybe we should fail balancing we are going to perform when kmalloc
+ * fails several times. But now it will loop until kmalloc gets
+ * required memory
+ */
 static int get_mem_for_virtual_node(struct tree_balance *tb)
 {
 	int check_fs = 0;
@@ -2064,8 +2272,8 @@
 
 	size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
 
+	/* we have to allocate more memory for virtual node */
 	if (size > tb->vn_buf_size) {
-		/* we have to allocate more memory for virtual node */
 		if (tb->vn_buf) {
 			/* free memory allocated before */
 			kfree(tb->vn_buf);
@@ -2079,10 +2287,12 @@
 		/* get memory for virtual item */
 		buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
 		if (!buf) {
-			/* getting memory with GFP_KERNEL priority may involve
-			   balancing now (due to indirect_to_direct conversion on
-			   dcache shrinking). So, release path and collected
-			   resources here */
+			/*
+			 * getting memory with GFP_KERNEL priority may involve
+			 * balancing now (due to indirect_to_direct conversion
+			 * on dcache shrinking). So, release path and collected
+			 * resources here
+			 */
 			free_buffers_in_tb(tb);
 			buf = kmalloc(size, GFP_NOFS);
 			if (!buf) {
@@ -2168,8 +2378,10 @@
 		for (i = tb->tb_path->path_length;
 		     !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
 			if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
-				/* if I understand correctly, we can only be sure the last buffer
-				 ** in the path is in the tree --clm
+				/*
+				 * if I understand correctly, we can only
+				 * be sure the last buffer in the path is
+				 * in the tree --clm
 				 */
 #ifdef CONFIG_REISERFS_CHECK
 				if (PATH_PLAST_BUFFER(tb->tb_path) ==
@@ -2256,13 +2468,15 @@
 				}
 			}
 		}
-		/* as far as I can tell, this is not required.  The FEB list seems
-		 ** to be full of newly allocated nodes, which will never be locked,
-		 ** dirty, or anything else.
-		 ** To be safe, I'm putting in the checks and waits in.  For the moment,
-		 ** they are needed to keep the code in journal.c from complaining
-		 ** about the buffer.  That code is inside CONFIG_REISERFS_CHECK as well.
-		 ** --clm
+
+		/*
+		 * as far as I can tell, this is not required.  The FEB list
+		 * seems to be full of newly allocated nodes, which will
+		 * never be locked, dirty, or anything else.
+		 * To be safe, I'm putting in the checks and waits in.
+		 * For the moment, they are needed to keep the code in
+		 * journal.c from complaining about the buffer.
+		 * That code is inside CONFIG_REISERFS_CHECK as well.  --clm
 		 */
 		for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
 			if (tb->FEB[i]) {
@@ -2300,7 +2514,8 @@
 	return CARRY_ON;
 }
 
-/* Prepare for balancing, that is
+/*
+ * Prepare for balancing, that is
  *	get all necessary parents, and neighbors;
  *	analyze what and where should be moved;
  *	get sufficient number of new nodes;
@@ -2309,13 +2524,14 @@
  * When ported to SMP kernels, only at the last moment after all needed nodes
  * are collected in cache, will the resources be locked using the usual
  * textbook ordered lock acquisition algorithms.  Note that ensuring that
- * this code neither write locks what it does not need to write lock nor locks out of order
- * will be a pain in the butt that could have been avoided.  Grumble grumble. -Hans
+ * this code neither write locks what it does not need to write lock nor locks
+ * out of order will be a pain in the butt that could have been avoided.
+ * Grumble grumble. -Hans
  *
  * fix is meant in the sense of render unchanging
  *
- * Latency might be improved by first gathering a list of what buffers are needed
- * and then getting as many of them in parallel as possible? -Hans
+ * Latency might be improved by first gathering a list of what buffers
+ * are needed and then getting as many of them in parallel as possible? -Hans
  *
  * Parameters:
  *	op_mode	i - insert, d - delete, c - cut (truncate), p - paste (append)
@@ -2335,8 +2551,9 @@
 	int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
 	int pos_in_item;
 
-	/* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
-	 ** during wait_tb_buffers_run
+	/*
+	 * we set wait_tb_buffers_run when we have to restore any dirty
+	 * bits cleared during wait_tb_buffers_run
 	 */
 	int wait_tb_buffers_run = 0;
 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
@@ -2347,10 +2564,11 @@
 
 	tb->fs_gen = get_generation(tb->tb_sb);
 
-	/* we prepare and log the super here so it will already be in the
-	 ** transaction when do_balance needs to change it.
-	 ** This way do_balance won't have to schedule when trying to prepare
-	 ** the super for logging
+	/*
+	 * we prepare and log the super here so it will already be in the
+	 * transaction when do_balance needs to change it.
+	 * This way do_balance won't have to schedule when trying to prepare
+	 * the super for logging
 	 */
 	reiserfs_prepare_for_journal(tb->tb_sb,
 				     SB_BUFFER_WITH_SB(tb->tb_sb), 1);
@@ -2408,7 +2626,7 @@
 #endif
 
 	if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
-		// FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+		/* FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat */
 		return REPEAT_SEARCH;
 
 	/* Starting from the leaf level; for all levels h of the tree. */
@@ -2427,7 +2645,10 @@
 					goto repeat;
 				if (h != MAX_HEIGHT - 1)
 					tb->insert_size[h + 1] = 0;
-				/* ok, analysis and resource gathering are complete */
+				/*
+				 * ok, analysis and resource gathering
+				 * are complete
+				 */
 				break;
 			}
 			goto repeat;
@@ -2437,15 +2658,19 @@
 		if (ret != CARRY_ON)
 			goto repeat;
 
-		/* No disk space, or schedule occurred and analysis may be
-		 * invalid and needs to be redone. */
+		/*
+		 * No disk space, or schedule occurred and analysis may be
+		 * invalid and needs to be redone.
+		 */
 		ret = get_empty_nodes(tb, h);
 		if (ret != CARRY_ON)
 			goto repeat;
 
+		/*
+		 * We have a positive insert size but no nodes exist on this
+		 * level, this means that we are creating a new root.
+		 */
 		if (!PATH_H_PBUFFER(tb->tb_path, h)) {
-			/* We have a positive insert size but no nodes exist on this
-			   level, this means that we are creating a new root. */
 
 			RFALSE(tb->blknum[h] != 1,
 			       "PAP-8350: creating new empty root");
@@ -2453,11 +2678,13 @@
 			if (h < MAX_HEIGHT - 1)
 				tb->insert_size[h + 1] = 0;
 		} else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
+			/*
+			 * The tree needs to be grown, so this node S[h]
+			 * which is the root node is split into two nodes,
+			 * and a new node (S[h+1]) will be created to
+			 * become the root node.
+			 */
 			if (tb->blknum[h] > 1) {
-				/* The tree needs to be grown, so this node S[h]
-				   which is the root node is split into two nodes,
-				   and a new node (S[h+1]) will be created to
-				   become the root node.  */
 
 				RFALSE(h == MAX_HEIGHT - 1,
 				       "PAP-8355: attempt to create too high of a tree");
@@ -2488,11 +2715,13 @@
 	}
 
       repeat:
-	// fix_nodes was unable to perform its calculation due to
-	// filesystem got changed under us, lack of free disk space or i/o
-	// failure. If the first is the case - the search will be
-	// repeated. For now - free all resources acquired so far except
-	// for the new allocated nodes
+	/*
+	 * fix_nodes was unable to perform its calculation due to
+	 * filesystem got changed under us, lack of free disk space or i/o
+	 * failure. If the first is the case - the search will be
+	 * repeated. For now - free all resources acquired so far except
+	 * for the new allocated nodes
+	 */
 	{
 		int i;
 
@@ -2548,8 +2777,6 @@
 
 }
 
-/* Anatoly will probably forgive me renaming tb to tb. I just
-   wanted to make lines shorter */
 void unfix_nodes(struct tree_balance *tb)
 {
 	int i;
@@ -2578,8 +2805,10 @@
 	for (i = 0; i < MAX_FEB_SIZE; i++) {
 		if (tb->FEB[i]) {
 			b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
-			/* de-allocated block which was not used by balancing and
-			   bforget about buffer for it */
+			/*
+			 * de-allocated block which was not used by
+			 * balancing and bforget about buffer for it
+			 */
 			brelse(tb->FEB[i]);
 			reiserfs_free_block(tb->transaction_handle, NULL,
 					    blocknr, 0);
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
index 91b0cc1..7a26c4f 100644
--- a/fs/reiserfs/hashes.c
+++ b/fs/reiserfs/hashes.c
@@ -12,12 +12,6 @@
  * Yura's function is added (04/07/2000)
  */
 
-//
-// keyed_hash
-// yura_hash
-// r5_hash
-//
-
 #include <linux/kernel.h>
 #include "reiserfs.h"
 #include <asm/types.h>
@@ -56,7 +50,7 @@
 	u32 pad;
 	int i;
 
-	//      assert(len >= 0 && len < 256);
+	/*      assert(len >= 0 && len < 256); */
 
 	pad = (u32) len | ((u32) len << 8);
 	pad |= pad << 16;
@@ -127,9 +121,10 @@
 	return h0 ^ h1;
 }
 
-/* What follows in this file is copyright 2000 by Hans Reiser, and the
- * licensing of what follows is governed by reiserfs/README */
-
+/*
+ * What follows in this file is copyright 2000 by Hans Reiser, and the
+ * licensing of what follows is governed by reiserfs/README
+ */
 u32 yura_hash(const signed char *msg, int len)
 {
 	int j, pow;
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index ae26a27..c4a6967 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -12,7 +12,10 @@
 int balance_internal(struct tree_balance *,
 		     int, int, struct item_head *, struct buffer_head **);
 
-/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */
+/*
+ * modes of internal_shift_left, internal_shift_right and
+ * internal_insert_childs
+ */
 #define INTERNAL_SHIFT_FROM_S_TO_L 0
 #define INTERNAL_SHIFT_FROM_R_TO_S 1
 #define INTERNAL_SHIFT_FROM_L_TO_S 2
@@ -32,7 +35,9 @@
 	memset(src_bi, 0, sizeof(struct buffer_info));
 	/* define dest, src, dest parent, dest position */
 	switch (shift_mode) {
-	case INTERNAL_SHIFT_FROM_S_TO_L:	/* used in internal_shift_left */
+
+	/* used in internal_shift_left */
+	case INTERNAL_SHIFT_FROM_S_TO_L:
 		src_bi->tb = tb;
 		src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
@@ -52,12 +57,14 @@
 		dest_bi->tb = tb;
 		dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
 		dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
-		dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);	/* dest position is analog of dest->b_item_order */
+		/* dest position is analog of dest->b_item_order */
+		dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
 		*d_key = tb->lkey[h];
 		*cf = tb->CFL[h];
 		break;
 
-	case INTERNAL_SHIFT_FROM_R_TO_S:	/* used in internal_shift_left */
+	/* used in internal_shift_left */
+	case INTERNAL_SHIFT_FROM_R_TO_S:
 		src_bi->tb = tb;
 		src_bi->bi_bh = tb->R[h];
 		src_bi->bi_parent = tb->FR[h];
@@ -111,7 +118,8 @@
 	}
 }
 
-/* Insert count node pointers into buffer cur before position to + 1.
+/*
+ * Insert count node pointers into buffer cur before position to + 1.
  * Insert count items into buffer cur before position to.
  * Items and node pointers are specified by inserted and bh respectively.
  */
@@ -190,8 +198,10 @@
 
 }
 
-/* Delete del_num items and node pointers from buffer cur starting from *
- * the first_i'th item and first_p'th pointers respectively.		*/
+/*
+ * Delete del_num items and node pointers from buffer cur starting from
+ * the first_i'th item and first_p'th pointers respectively.
+ */
 static void internal_delete_pointers_items(struct buffer_info *cur_bi,
 					   int first_p,
 					   int first_i, int del_num)
@@ -270,22 +280,30 @@
 
 	i_from = (from == 0) ? from : from - 1;
 
-	/* delete n pointers starting from `from' position in CUR;
-	   delete n keys starting from 'i_from' position in CUR;
+	/*
+	 * delete n pointers starting from `from' position in CUR;
+	 * delete n keys starting from 'i_from' position in CUR;
 	 */
 	internal_delete_pointers_items(cur_bi, from, i_from, n);
 }
 
-/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
-* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
- * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
+/*
+ * copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer
+ * dest
+ * last_first == FIRST_TO_LAST means that we copy first items
+ *                             from src to tail of dest
+ * last_first == LAST_TO_FIRST means that we copy last items
+ *                             from src to head of dest
  */
 static void internal_copy_pointers_items(struct buffer_info *dest_bi,
 					 struct buffer_head *src,
 					 int last_first, int cpy_num)
 {
-	/* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST *
-	 * as delimiting key have already inserted to buffer dest.*/
+	/*
+	 * ATTENTION! Number of node pointers in DEST is equal to number
+	 * of items in DEST  as delimiting key have already inserted to
+	 * buffer dest.
+	 */
 	struct buffer_head *dest = dest_bi->bi_bh;
 	int nr_dest, nr_src;
 	int dest_order, src_order;
@@ -366,7 +384,9 @@
 
 }
 
-/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest.
+/*
+ * Copy cpy_num node pointers and cpy_num - 1 items from buffer src to
+ * buffer dest.
  * Delete cpy_num - del_par items and node pointers from buffer src.
  * last_first == FIRST_TO_LAST means, that we copy/delete first items from src.
  * last_first == LAST_TO_FIRST means, that we copy/delete last items from src.
@@ -385,8 +405,10 @@
 	if (last_first == FIRST_TO_LAST) {	/* shift_left occurs */
 		first_pointer = 0;
 		first_item = 0;
-		/* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
-		   for key - with first_item */
+		/*
+		 * delete cpy_num - del_par pointers and keys starting for
+		 * pointers with first_pointer, for key - with first_item
+		 */
 		internal_delete_pointers_items(src_bi, first_pointer,
 					       first_item, cpy_num - del_par);
 	} else {		/* shift_right occurs */
@@ -404,7 +426,9 @@
 }
 
 /* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */
-static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_before,	/* insert key before key with n_dest number */
+static void internal_insert_key(struct buffer_info *dest_bi,
+				/* insert key before key with n_dest number */
+				int dest_position_before,
 				struct buffer_head *src, int src_position)
 {
 	struct buffer_head *dest = dest_bi->bi_bh;
@@ -453,13 +477,19 @@
 	}
 }
 
-/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
- * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
+/*
+ * Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
+ * Copy pointer_amount node pointers and pointer_amount - 1 items from
+ * buffer src to buffer dest.
  * Replace  d_key'th key in buffer cfl.
  * Delete pointer_amount items and node pointers from buffer src.
  */
 /* this can be invoked both to shift from S to L and from R to S */
-static void internal_shift_left(int mode,	/* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */
+static void internal_shift_left(
+				/*
+				 * INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S
+				 */
+				int mode,
 				struct tree_balance *tb,
 				int h, int pointer_amount)
 {
@@ -473,7 +503,10 @@
 	/*printk("pointer_amount = %d\n",pointer_amount); */
 
 	if (pointer_amount) {
-		/* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */
+		/*
+		 * insert delimiting key from common father of dest and
+		 * src to node dest into position B_NR_ITEM(dest)
+		 */
 		internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
 				    d_key_position);
 
@@ -492,7 +525,8 @@
 
 }
 
-/* Insert delimiting key to L[h].
+/*
+ * Insert delimiting key to L[h].
  * Copy n node pointers and n - 1 items from buffer S[h] to L[h].
  * Delete n - 1 items and node pointers from buffer S[h].
  */
@@ -507,23 +541,27 @@
 	internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
 				       &dest_bi, &src_bi, &d_key_position, &cf);
 
-	if (pointer_amount > 0)	/* insert lkey[h]-th key  from CFL[h] to left neighbor L[h] */
+	/* insert lkey[h]-th key  from CFL[h] to left neighbor L[h] */
+	if (pointer_amount > 0)
 		internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
 				    d_key_position);
-	/*            internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]); */
 
 	/* last parameter is del_parameter */
 	internal_move_pointers_items(&dest_bi, &src_bi, FIRST_TO_LAST,
 				     pointer_amount, 1);
-	/*    internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
 }
 
-/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
+/*
+ * Insert d_key'th (delimiting) key from buffer cfr to head of dest.
  * Copy n node pointers and n - 1 items from buffer src to buffer dest.
  * Replace  d_key'th key in buffer cfr.
  * Delete n items and node pointers from buffer src.
  */
-static void internal_shift_right(int mode,	/* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */
+static void internal_shift_right(
+				 /*
+				  * INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S
+				  */
+				 int mode,
 				 struct tree_balance *tb,
 				 int h, int pointer_amount)
 {
@@ -538,7 +576,10 @@
 	nr = B_NR_ITEMS(src_bi.bi_bh);
 
 	if (pointer_amount > 0) {
-		/* insert delimiting key from common father of dest and src to dest node into position 0 */
+		/*
+		 * insert delimiting key from common father of dest
+		 * and src to dest node into position 0
+		 */
 		internal_insert_key(&dest_bi, 0, cf, d_key_position);
 		if (nr == pointer_amount - 1) {
 			RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ ||
@@ -559,7 +600,8 @@
 				     pointer_amount, 0);
 }
 
-/* Insert delimiting key to R[h].
+/*
+ * Insert delimiting key to R[h].
  * Copy n node pointers and n - 1 items from buffer S[h] to R[h].
  * Delete n - 1 items and node pointers from buffer S[h].
  */
@@ -574,18 +616,19 @@
 	internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
 				       &dest_bi, &src_bi, &d_key_position, &cf);
 
-	if (pointer_amount > 0)	/* insert rkey from CFR[h] to right neighbor R[h] */
+	/* insert rkey from CFR[h] to right neighbor R[h] */
+	if (pointer_amount > 0)
 		internal_insert_key(&dest_bi, 0, cf, d_key_position);
-	/*            internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]); */
 
 	/* last parameter is del_parameter */
 	internal_move_pointers_items(&dest_bi, &src_bi, LAST_TO_FIRST,
 				     pointer_amount, 1);
-	/*    internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1); */
 }
 
-/* Delete insert_num node pointers together with their left items
- * and balance current node.*/
+/*
+ * Delete insert_num node pointers together with their left items
+ * and balance current node.
+ */
 static void balance_internal_when_delete(struct tree_balance *tb,
 					 int h, int child_pos)
 {
@@ -626,9 +669,11 @@
 				new_root = tb->R[h - 1];
 			else
 				new_root = tb->L[h - 1];
-			/* switch super block's tree root block number to the new value */
+			/*
+			 * switch super block's tree root block
+			 * number to the new value */
 			PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr);
-			//REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --;
+			/*REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --; */
 			PUT_SB_TREE_HEIGHT(tb->tb_sb,
 					   SB_TREE_HEIGHT(tb->tb_sb) - 1);
 
@@ -636,8 +681,8 @@
 						 REISERFS_SB(tb->tb_sb)->s_sbh,
 						 1);
 			/*&&&&&&&&&&&&&&&&&&&&&& */
+			/* use check_internal if new root is an internal node */
 			if (h > 1)
-				/* use check_internal if new root is an internal node */
 				check_internal(new_root);
 			/*&&&&&&&&&&&&&&&&&&&&&& */
 
@@ -648,7 +693,8 @@
 		return;
 	}
 
-	if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) {	/* join S[h] with L[h] */
+	/* join S[h] with L[h] */
+	if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) {
 
 		RFALSE(tb->rnum[h] != 0,
 		       "invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
@@ -660,7 +706,8 @@
 		return;
 	}
 
-	if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) {	/* join S[h] with R[h] */
+	/* join S[h] with R[h] */
+	if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) {
 		RFALSE(tb->lnum[h] != 0,
 		       "invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
 		       h, tb->lnum[h]);
@@ -671,17 +718,18 @@
 		return;
 	}
 
-	if (tb->lnum[h] < 0) {	/* borrow from left neighbor L[h] */
+	/* borrow from left neighbor L[h] */
+	if (tb->lnum[h] < 0) {
 		RFALSE(tb->rnum[h] != 0,
 		       "wrong tb->rnum[%d]==%d when borrow from L[h]", h,
 		       tb->rnum[h]);
-		/*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]); */
 		internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
 				     -tb->lnum[h]);
 		return;
 	}
 
-	if (tb->rnum[h] < 0) {	/* borrow from right neighbor R[h] */
+	/* borrow from right neighbor R[h] */
+	if (tb->rnum[h] < 0) {
 		RFALSE(tb->lnum[h] != 0,
 		       "invalid tb->lnum[%d]==%d when borrow from R[h]",
 		       h, tb->lnum[h]);
@@ -689,7 +737,8 @@
 		return;
 	}
 
-	if (tb->lnum[h] > 0) {	/* split S[h] into two parts and put them into neighbors */
+	/* split S[h] into two parts and put them into neighbors */
+	if (tb->lnum[h] > 0) {
 		RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
 		       "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
 		       h, tb->lnum[h], h, tb->rnum[h], n);
@@ -737,29 +786,36 @@
 	do_balance_mark_internal_dirty(tb, tb->CFR[h], 0);
 }
 
-int balance_internal(struct tree_balance *tb,	/* tree_balance structure               */
-		     int h,	/* level of the tree                    */
-		     int child_pos, struct item_head *insert_key,	/* key for insertion on higher level    */
-		     struct buffer_head **insert_ptr	/* node for insertion on higher level */
-    )
-    /* if inserting/pasting
-       {
-       child_pos is the position of the node-pointer in S[h] that        *
-       pointed to S[h-1] before balancing of the h-1 level;              *
-       this means that new pointers and items must be inserted AFTER *
-       child_pos
-       }
-       else
-       {
-       it is the position of the leftmost pointer that must be deleted (together with
-       its corresponding key to the left of the pointer)
-       as a result of the previous level's balancing.
-       }
-     */
+
+/*
+ * if inserting/pasting {
+ *   child_pos is the position of the node-pointer in S[h] that
+ *   pointed to S[h-1] before balancing of the h-1 level;
+ *   this means that new pointers and items must be inserted AFTER
+ *   child_pos
+ * } else {
+ *   it is the position of the leftmost pointer that must be deleted
+ *   (together with its corresponding key to the left of the pointer)
+ *   as a result of the previous level's balancing.
+ * }
+ */
+
+int balance_internal(struct tree_balance *tb,
+		     int h,	/* level of the tree */
+		     int child_pos,
+		     /* key for insertion on higher level    */
+		     struct item_head *insert_key,
+		     /* node for insertion on higher level */
+		     struct buffer_head **insert_ptr)
 {
 	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
 	struct buffer_info bi;
-	int order;		/* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
+
+	/*
+	 * we return this: it is 0 if there is no S[h],
+	 * else it is tb->S[h]->b_item_order
+	 */
+	int order;
 	int insert_num, n, k;
 	struct buffer_head *S_new;
 	struct item_head new_insert_key;
@@ -774,8 +830,10 @@
 	    (tbSh) ? PATH_H_POSITION(tb->tb_path,
 				     h + 1) /*tb->S[h]->b_item_order */ : 0;
 
-	/* Using insert_size[h] calculate the number insert_num of items
-	   that must be inserted to or deleted from S[h]. */
+	/*
+	 * Using insert_size[h] calculate the number insert_num of items
+	 * that must be inserted to or deleted from S[h].
+	 */
 	insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));
 
 	/* Check whether insert_num is proper * */
@@ -794,23 +852,21 @@
 
 	k = 0;
 	if (tb->lnum[h] > 0) {
-		/* shift lnum[h] items from S[h] to the left neighbor L[h].
-		   check how many of new items fall into L[h] or CFL[h] after
-		   shifting */
+		/*
+		 * shift lnum[h] items from S[h] to the left neighbor L[h].
+		 * check how many of new items fall into L[h] or CFL[h] after
+		 * shifting
+		 */
 		n = B_NR_ITEMS(tb->L[h]);	/* number of items in L[h] */
 		if (tb->lnum[h] <= child_pos) {
 			/* new items don't fall into L[h] or CFL[h] */
 			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
 					    tb->lnum[h]);
-			/*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
 			child_pos -= tb->lnum[h];
 		} else if (tb->lnum[h] > child_pos + insert_num) {
 			/* all new items fall into L[h] */
 			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
 					    tb->lnum[h] - insert_num);
-			/*                  internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
-			   tb->lnum[h]-insert_num);
-			 */
 			/* insert insert_num keys and node-pointers into L[h] */
 			bi.tb = tb;
 			bi.bi_bh = tb->L[h];
@@ -826,7 +882,10 @@
 		} else {
 			struct disk_child *dc;
 
-			/* some items fall into L[h] or CFL[h], but some don't fall */
+			/*
+			 * some items fall into L[h] or CFL[h],
+			 * but some don't fall
+			 */
 			internal_shift1_left(tb, h, child_pos + 1);
 			/* calculate number of new items that fall into L[h] */
 			k = tb->lnum[h] - child_pos - 1;
@@ -841,7 +900,10 @@
 
 			replace_lkey(tb, h, insert_key + k);
 
-			/* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
+			/*
+			 * replace the first node-ptr in S[h] by
+			 * node-ptr to insert_ptr[k]
+			 */
 			dc = B_N_CHILD(tbSh, 0);
 			put_dc_size(dc,
 				    MAX_CHILD_SIZE(insert_ptr[k]) -
@@ -860,17 +922,17 @@
 	/* tb->lnum[h] > 0 */
 	if (tb->rnum[h] > 0) {
 		/*shift rnum[h] items from S[h] to the right neighbor R[h] */
-		/* check how many of new items fall into R or CFR after shifting */
+		/*
+		 * check how many of new items fall into R or CFR
+		 * after shifting
+		 */
 		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
 		if (n - tb->rnum[h] >= child_pos)
 			/* new items fall into S[h] */
-			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
 			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
 					     tb->rnum[h]);
 		else if (n + insert_num - tb->rnum[h] < child_pos) {
 			/* all new items fall into R[h] */
-			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
-			   tb->rnum[h] - insert_num); */
 			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
 					     tb->rnum[h] - insert_num);
 
@@ -904,7 +966,10 @@
 
 			replace_rkey(tb, h, insert_key + insert_num - k - 1);
 
-			/* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
+			/*
+			 * replace the first node-ptr in R[h] by
+			 * node-ptr insert_ptr[insert_num-k-1]
+			 */
 			dc = B_N_CHILD(tb->R[h], 0);
 			put_dc_size(dc,
 				    MAX_CHILD_SIZE(insert_ptr
@@ -921,7 +986,7 @@
 		}
 	}
 
-    /** Fill new node that appears instead of S[h] **/
+	/** Fill new node that appears instead of S[h] **/
 	RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
 	RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");
 
@@ -1002,11 +1067,13 @@
 			/* last parameter is del_par */
 			internal_move_pointers_items(&dest_bi, &src_bi,
 						     LAST_TO_FIRST, snum, 0);
-			/*            internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
 		} else if (n + insert_num - snum < child_pos) {
 			/* all new items fall into S_new */
 			/*  store the delimiting key for the next level */
-			/* new_insert_key = (n + insert_item - snum)'th key in S[h] */
+			/*
+			 * new_insert_key = (n + insert_item - snum)'th
+			 * key in S[h]
+			 */
 			memcpy(&new_insert_key,
 			       internal_key(tbSh, n + insert_num - snum),
 			       KEY_SIZE);
@@ -1014,9 +1081,11 @@
 			internal_move_pointers_items(&dest_bi, &src_bi,
 						     LAST_TO_FIRST,
 						     snum - insert_num, 0);
-			/*                  internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */
 
-			/* insert insert_num keys and node-pointers into S_new */
+			/*
+			 * insert insert_num keys and node-pointers
+			 * into S_new
+			 */
 			internal_insert_childs(&dest_bi,
 					       /*S_new,tb->S[h-1]->b_next, */
 					       child_pos - n - insert_num +
@@ -1033,7 +1102,6 @@
 			internal_move_pointers_items(&dest_bi, &src_bi,
 						     LAST_TO_FIRST,
 						     n - child_pos + 1, 1);
-			/*                  internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
 			/* calculate number of new items that fall into S_new */
 			k = snum - n + child_pos - 1;
 
@@ -1043,7 +1111,10 @@
 			/* new_insert_key = insert_key[insert_num - k - 1] */
 			memcpy(&new_insert_key, insert_key + insert_num - k - 1,
 			       KEY_SIZE);
-			/* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
+			/*
+			 * replace first node-ptr in S_new by node-ptr
+			 * to insert_ptr[insert_num-k-1]
+			 */
 
 			dc = B_N_CHILD(S_new, 0);
 			put_dc_size(dc,
@@ -1066,7 +1137,7 @@
 		       || buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
 		       S_new);
 
-		// S_new is released in unfix_nodes
+		/* S_new is released in unfix_nodes */
 	}
 
 	n = B_NR_ITEMS(tbSh);	/*number of items in S[h] */
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index b8d3ffb..cc20959 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -25,7 +25,10 @@
 
 void reiserfs_evict_inode(struct inode *inode)
 {
-	/* We need blocks for transaction + (user+group) quota update (possibly delete) */
+	/*
+	 * We need blocks for transaction + (user+group) quota
+	 * update (possibly delete)
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 2 +
 	    2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
@@ -39,8 +42,12 @@
 	if (inode->i_nlink)
 		goto no_delete;
 
-	/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
-	if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {	/* also handles bad_inode case */
+	/*
+	 * The = 0 happens when we abort creating a new inode
+	 * for some reason like lack of space..
+	 * also handles bad_inode case
+	 */
+	if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
 
 		reiserfs_delete_xattrs(inode);
 
@@ -54,9 +61,11 @@
 
 		err = reiserfs_delete_object(&th, inode);
 
-		/* Do quota update inside a transaction for journaled quotas. We must do that
-		 * after delete_object so that quota updates go into the same transaction as
-		 * stat data deletion */
+		/*
+		 * Do quota update inside a transaction for journaled quotas.
+		 * We must do that after delete_object so that quota updates
+		 * go into the same transaction as stat data deletion
+		 */
 		if (!err) {
 			int depth = reiserfs_write_unlock_nested(inode->i_sb);
 			dquot_free_inode(inode);
@@ -66,22 +75,29 @@
 		if (journal_end(&th, inode->i_sb, jbegin_count))
 			goto out;
 
-		/* check return value from reiserfs_delete_object after
+		/*
+		 * check return value from reiserfs_delete_object after
 		 * ending the transaction
 		 */
 		if (err)
 		    goto out;
 
-		/* all items of file are deleted, so we can remove "save" link */
-		remove_save_link(inode, 0 /* not truncate */ );	/* we can't do anything
-								 * about an error here */
+		/*
+		 * all items of file are deleted, so we can remove
+		 * "save" link
+		 * we can't do anything about an error here
+		 */
+		remove_save_link(inode, 0 /* not truncate */);
 out:
 		reiserfs_write_unlock(inode->i_sb);
 	} else {
 		/* no object items are in the tree */
 		;
 	}
-	clear_inode(inode);	/* note this must go after the journal_end to prevent deadlock */
+
+	/* note this must go after the journal_end to prevent deadlock */
+	clear_inode(inode);
+
 	dquot_drop(inode);
 	inode->i_blocks = 0;
 	return;
@@ -103,8 +119,10 @@
 	key->key_length = length;
 }
 
-/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set
-   offset and type of key */
+/*
+ * take base of inode_key (it comes from inode always) (dirid, objectid)
+ * and version from an inode, set offset and type of key
+ */
 void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
 		  int type, int length)
 {
@@ -114,9 +132,7 @@
 		      length);
 }
 
-//
-// when key is 0, do not set version and short key
-//
+/* when key is 0, do not set version and short key */
 inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
 			      int version,
 			      loff_t offset, int type, int length,
@@ -132,43 +148,47 @@
 	set_le_ih_k_type(ih, type);
 	put_ih_item_len(ih, length);
 	/*    set_ih_free_space (ih, 0); */
-	// for directory items it is entry count, for directs and stat
-	// datas - 0xffff, for indirects - 0
+	/*
+	 * for directory items it is entry count, for directs and stat
+	 * datas - 0xffff, for indirects - 0
+	 */
 	put_ih_entry_count(ih, entry_count);
 }
 
-//
-// FIXME: we might cache recently accessed indirect item
+/*
+ * FIXME: we might cache recently accessed indirect item
+ * Ugh.  Not too eager for that....
+ * I cut the code until such time as I see a convincing argument (benchmark).
+ * I don't want a bloated inode struct..., and I don't like code complexity....
+ */
 
-// Ugh.  Not too eager for that....
-//  I cut the code until such time as I see a convincing argument (benchmark).
-// I don't want a bloated inode struct..., and I don't like code complexity....
+/*
+ * cutting the code is fine, since it really isn't in use yet and is easy
+ * to add back in.  But, Vladimir has a really good idea here.  Think
+ * about what happens for reading a file.  For each page,
+ * The VFS layer calls reiserfs_readpage, who searches the tree to find
+ * an indirect item.  This indirect item has X number of pointers, where
+ * X is a big number if we've done the block allocation right.  But,
+ * we only use one or two of these pointers during each call to readpage,
+ * needlessly researching again later on.
+ *
+ * The size of the cache could be dynamic based on the size of the file.
+ *
+ * I'd also like to see us cache the location the stat data item, since
+ * we are needlessly researching for that frequently.
+ *
+ * --chris
+ */
 
-/* cutting the code is fine, since it really isn't in use yet and is easy
-** to add back in.  But, Vladimir has a really good idea here.  Think
-** about what happens for reading a file.  For each page,
-** The VFS layer calls reiserfs_readpage, who searches the tree to find
-** an indirect item.  This indirect item has X number of pointers, where
-** X is a big number if we've done the block allocation right.  But,
-** we only use one or two of these pointers during each call to readpage,
-** needlessly researching again later on.
-**
-** The size of the cache could be dynamic based on the size of the file.
-**
-** I'd also like to see us cache the location the stat data item, since
-** we are needlessly researching for that frequently.
-**
-** --chris
-*/
-
-/* If this page has a file tail in it, and
-** it was read in by get_block_create_0, the page data is valid,
-** but tail is still sitting in a direct item, and we can't write to
-** it.  So, look through this page, and check all the mapped buffers
-** to make sure they have valid block numbers.  Any that don't need
-** to be unmapped, so that __block_write_begin will correctly call
-** reiserfs_get_block to convert the tail into an unformatted node
-*/
+/*
+ * If this page has a file tail in it, and
+ * it was read in by get_block_create_0, the page data is valid,
+ * but tail is still sitting in a direct item, and we can't write to
+ * it.  So, look through this page, and check all the mapped buffers
+ * to make sure they have valid block numbers.  Any that don't need
+ * to be unmapped, so that __block_write_begin will correctly call
+ * reiserfs_get_block to convert the tail into an unformatted node
+ */
 static inline void fix_tail_page_for_writing(struct page *page)
 {
 	struct buffer_head *head, *next, *bh;
@@ -186,8 +206,10 @@
 	}
 }
 
-/* reiserfs_get_block does not need to allocate a block only if it has been
-   done already or non-hole position has been found in the indirect item */
+/*
+ * reiserfs_get_block does not need to allocate a block only if it has been
+ * done already or non-hole position has been found in the indirect item
+ */
 static inline int allocation_needed(int retval, b_blocknr_t allocated,
 				    struct item_head *ih,
 				    __le32 * item, int pos_in_item)
@@ -211,14 +233,16 @@
 	map_bh(bh, inode->i_sb, block);
 }
 
-//
-// files which were created in the earlier version can not be longer,
-// than 2 gb
-//
+/*
+ * files which were created in the earlier version can not be longer,
+ * than 2 gb
+ */
 static int file_capable(struct inode *inode, sector_t block)
 {
-	if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||	// it is new file.
-	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))	// old file, but 'block' is inside of 2gb
+	/* it is new file. */
+	if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
+	    /* old file, but 'block' is inside of 2gb */
+	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
 		return 1;
 
 	return 0;
@@ -250,14 +274,14 @@
 	return err;
 }
 
-// it is called by get_block when create == 0. Returns block number
-// for 'block'-th logical block of file. When it hits direct item it
-// returns 0 (being called from bmap) or read direct item into piece
-// of page (bh_result)
-
-// Please improve the english/clarity in the comment above, as it is
-// hard to understand.
-
+/*
+ * it is called by get_block when create == 0. Returns block number
+ * for 'block'-th logical block of file. When it hits direct item it
+ * returns 0 (being called from bmap) or read direct item into piece
+ * of page (bh_result)
+ * Please improve the english/clarity in the comment above, as it is
+ * hard to understand.
+ */
 static int _get_block_create_0(struct inode *inode, sector_t block,
 			       struct buffer_head *bh_result, int args)
 {
@@ -273,7 +297,7 @@
 	int done = 0;
 	unsigned long offset;
 
-	// prepare the key to look for the 'block'-th block of file
+	/* prepare the key to look for the 'block'-th block of file */
 	make_cpu_key(&key, inode,
 		     (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
 		     3);
@@ -285,23 +309,28 @@
 			kunmap(bh_result->b_page);
 		if (result == IO_ERROR)
 			return -EIO;
-		// We do not return -ENOENT if there is a hole but page is uptodate, because it means
-		// That there is some MMAPED data associated with it that is yet to be written to disk.
+		/*
+		 * We do not return -ENOENT if there is a hole but page is
+		 * uptodate, because it means that there is some MMAPED data
+		 * associated with it that is yet to be written to disk.
+		 */
 		if ((args & GET_BLOCK_NO_HOLE)
 		    && !PageUptodate(bh_result->b_page)) {
 			return -ENOENT;
 		}
 		return 0;
 	}
-	//
+
 	bh = get_last_bh(&path);
 	ih = tp_item_head(&path);
 	if (is_indirect_le_ih(ih)) {
 		__le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
 
-		/* FIXME: here we could cache indirect item or part of it in
-		   the inode to avoid search_by_key in case of subsequent
-		   access to file */
+		/*
+		 * FIXME: here we could cache indirect item or part of it in
+		 * the inode to avoid search_by_key in case of subsequent
+		 * access to file
+		 */
 		blocknr = get_block_num(ind_item, path.pos_in_item);
 		ret = 0;
 		if (blocknr) {
@@ -311,8 +340,12 @@
 				set_buffer_boundary(bh_result);
 			}
 		} else
-			// We do not return -ENOENT if there is a hole but page is uptodate, because it means
-			// That there is some MMAPED data associated with it that is yet to  be written to disk.
+			/*
+			 * We do not return -ENOENT if there is a hole but
+			 * page is uptodate, because it means that there is
+			 * some MMAPED data associated with it that is
+			 * yet to be written to disk.
+			 */
 		if ((args & GET_BLOCK_NO_HOLE)
 			    && !PageUptodate(bh_result->b_page)) {
 			ret = -ENOENT;
@@ -323,41 +356,45 @@
 			kunmap(bh_result->b_page);
 		return ret;
 	}
-	// requested data are in direct item(s)
+	/* requested data are in direct item(s) */
 	if (!(args & GET_BLOCK_READ_DIRECT)) {
-		// we are called by bmap. FIXME: we can not map block of file
-		// when it is stored in direct item(s)
+		/*
+		 * we are called by bmap. FIXME: we can not map block of file
+		 * when it is stored in direct item(s)
+		 */
 		pathrelse(&path);
 		if (p)
 			kunmap(bh_result->b_page);
 		return -ENOENT;
 	}
 
-	/* if we've got a direct item, and the buffer or page was uptodate,
-	 ** we don't want to pull data off disk again.  skip to the
-	 ** end, where we map the buffer and return
+	/*
+	 * if we've got a direct item, and the buffer or page was uptodate,
+	 * we don't want to pull data off disk again.  skip to the
+	 * end, where we map the buffer and return
 	 */
 	if (buffer_uptodate(bh_result)) {
 		goto finished;
 	} else
 		/*
-		 ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
-		 ** pages without any buffers.  If the page is up to date, we don't want
-		 ** read old data off disk.  Set the up to date bit on the buffer instead
-		 ** and jump to the end
+		 * grab_tail_page can trigger calls to reiserfs_get_block on
+		 * up to date pages without any buffers.  If the page is up
+		 * to date, we don't want read old data off disk.  Set the up
+		 * to date bit on the buffer instead and jump to the end
 		 */
 	if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
 		set_buffer_uptodate(bh_result);
 		goto finished;
 	}
-	// read file tail into part of page
+	/* read file tail into part of page */
 	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
 	copy_item_head(&tmp_ih, ih);
 
-	/* we only want to kmap if we are reading the tail into the page.
-	 ** this is not the common case, so we don't kmap until we are
-	 ** sure we need to.  But, this means the item might move if
-	 ** kmap schedules
+	/*
+	 * we only want to kmap if we are reading the tail into the page.
+	 * this is not the common case, so we don't kmap until we are
+	 * sure we need to.  But, this means the item might move if
+	 * kmap schedules
 	 */
 	if (!p)
 		p = (char *)kmap(bh_result->b_page);
@@ -368,10 +405,11 @@
 		if (!is_direct_le_ih(ih)) {
 			BUG();
 		}
-		/* make sure we don't read more bytes than actually exist in
-		 ** the file.  This can happen in odd cases where i_size isn't
-		 ** correct, and when direct item padding results in a few
-		 ** extra bytes at the end of the direct item
+		/*
+		 * make sure we don't read more bytes than actually exist in
+		 * the file.  This can happen in odd cases where i_size isn't
+		 * correct, and when direct item padding results in a few
+		 * extra bytes at the end of the direct item
 		 */
 		if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
 			break;
@@ -390,18 +428,20 @@
 
 		p += chars;
 
+		/*
+		 * we done, if read direct item is not the last item of
+		 * node FIXME: we could try to check right delimiting key
+		 * to see whether direct item continues in the right
+		 * neighbor or rely on i_size
+		 */
 		if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
-			// we done, if read direct item is not the last item of
-			// node FIXME: we could try to check right delimiting key
-			// to see whether direct item continues in the right
-			// neighbor or rely on i_size
 			break;
 
-		// update key to look for the next piece
+		/* update key to look for the next piece */
 		set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
 		result = search_for_position_by_key(inode->i_sb, &key, &path);
 		if (result != POSITION_FOUND)
-			// i/o error most likely
+			/* i/o error most likely */
 			break;
 		bh = get_last_bh(&path);
 		ih = tp_item_head(&path);
@@ -416,7 +456,8 @@
 	if (result == IO_ERROR)
 		return -EIO;
 
-	/* this buffer has valid data, but isn't valid for io.  mapping it to
+	/*
+	 * this buffer has valid data, but isn't valid for io.  mapping it to
 	 * block #0 tells the rest of reiserfs it just has a tail in it
 	 */
 	map_bh(bh_result, inode->i_sb, 0);
@@ -424,8 +465,10 @@
 	return 0;
 }
 
-// this is called to create file map. So, _get_block_create_0 will not
-// read direct item
+/*
+ * this is called to create file map. So, _get_block_create_0 will not
+ * read direct item
+ */
 static int reiserfs_bmap(struct inode *inode, sector_t block,
 			 struct buffer_head *bh_result, int create)
 {
@@ -439,22 +482,23 @@
 	return 0;
 }
 
-/* special version of get_block that is only used by grab_tail_page right
-** now.  It is sent to __block_write_begin, and when you try to get a
-** block past the end of the file (or a block from a hole) it returns
-** -ENOENT instead of a valid buffer.  __block_write_begin expects to
-** be able to do i/o on the buffers returned, unless an error value
-** is also returned.
-**
-** So, this allows __block_write_begin to be used for reading a single block
-** in a page.  Where it does not produce a valid page for holes, or past the
-** end of the file.  This turns out to be exactly what we need for reading
-** tails for conversion.
-**
-** The point of the wrapper is forcing a certain value for create, even
-** though the VFS layer is calling this function with create==1.  If you
-** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
-** don't use this function.
+/*
+ * special version of get_block that is only used by grab_tail_page right
+ * now.  It is sent to __block_write_begin, and when you try to get a
+ * block past the end of the file (or a block from a hole) it returns
+ * -ENOENT instead of a valid buffer.  __block_write_begin expects to
+ * be able to do i/o on the buffers returned, unless an error value
+ * is also returned.
+ *
+ * So, this allows __block_write_begin to be used for reading a single block
+ * in a page.  Where it does not produce a valid page for holes, or past the
+ * end of the file.  This turns out to be exactly what we need for reading
+ * tails for conversion.
+ *
+ * The point of the wrapper is forcing a certain value for create, even
+ * though the VFS layer is calling this function with create==1.  If you
+ * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
+ * don't use this function.
 */
 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
 				       struct buffer_head *bh_result,
@@ -463,8 +507,10 @@
 	return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
 }
 
-/* This is special helper for reiserfs_get_block in case we are executing
-   direct_IO request. */
+/*
+ * This is special helper for reiserfs_get_block in case we are executing
+ * direct_IO request.
+ */
 static int reiserfs_get_blocks_direct_io(struct inode *inode,
 					 sector_t iblock,
 					 struct buffer_head *bh_result,
@@ -474,9 +520,11 @@
 
 	bh_result->b_page = NULL;
 
-	/* We set the b_size before reiserfs_get_block call since it is
-	   referenced in convert_tail_for_hole() that may be called from
-	   reiserfs_get_block() */
+	/*
+	 * We set the b_size before reiserfs_get_block call since it is
+	 * referenced in convert_tail_for_hole() that may be called from
+	 * reiserfs_get_block()
+	 */
 	bh_result->b_size = (1 << inode->i_blkbits);
 
 	ret = reiserfs_get_block(inode, iblock, bh_result,
@@ -486,14 +534,18 @@
 
 	/* don't allow direct io onto tail pages */
 	if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
-		/* make sure future calls to the direct io funcs for this offset
-		 ** in the file fail by unmapping the buffer
+		/*
+		 * make sure future calls to the direct io funcs for this
+		 * offset in the file fail by unmapping the buffer
 		 */
 		clear_buffer_mapped(bh_result);
 		ret = -EINVAL;
 	}
-	/* Possible unpacked tail. Flush the data before pages have
-	   disappeared */
+
+	/*
+	 * Possible unpacked tail. Flush the data before pages have
+	 * disappeared
+	 */
 	if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
 		int err;
 
@@ -512,15 +564,15 @@
 }
 
 /*
-** helper function for when reiserfs_get_block is called for a hole
-** but the file tail is still in a direct item
-** bh_result is the buffer head for the hole
-** tail_offset is the offset of the start of the tail in the file
-**
-** This calls prepare_write, which will start a new transaction
-** you should not be in a transaction, or have any paths held when you
-** call this.
-*/
+ * helper function for when reiserfs_get_block is called for a hole
+ * but the file tail is still in a direct item
+ * bh_result is the buffer head for the hole
+ * tail_offset is the offset of the start of the tail in the file
+ *
+ * This calls prepare_write, which will start a new transaction
+ * you should not be in a transaction, or have any paths held when you
+ * call this.
+ */
 static int convert_tail_for_hole(struct inode *inode,
 				 struct buffer_head *bh_result,
 				 loff_t tail_offset)
@@ -540,9 +592,10 @@
 	tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
 
 	index = tail_offset >> PAGE_CACHE_SHIFT;
-	/* hole_page can be zero in case of direct_io, we are sure
-	   that we cannot get here if we write with O_DIRECT into
-	   tail page */
+	/*
+	 * hole_page can be zero in case of direct_io, we are sure
+	 * that we cannot get here if we write with O_DIRECT into tail page
+	 */
 	if (!hole_page || index != hole_page->index) {
 		tail_page = grab_cache_page(inode->i_mapping, index);
 		retval = -ENOMEM;
@@ -553,14 +606,15 @@
 		tail_page = hole_page;
 	}
 
-	/* we don't have to make sure the conversion did not happen while
-	 ** we were locking the page because anyone that could convert
-	 ** must first take i_mutex.
-	 **
-	 ** We must fix the tail page for writing because it might have buffers
-	 ** that are mapped, but have a block number of 0.  This indicates tail
-	 ** data that has been read directly into the page, and
-	 ** __block_write_begin won't trigger a get_block in this case.
+	/*
+	 * we don't have to make sure the conversion did not happen while
+	 * we were locking the page because anyone that could convert
+	 * must first take i_mutex.
+	 *
+	 * We must fix the tail page for writing because it might have buffers
+	 * that are mapped, but have a block number of 0.  This indicates tail
+	 * data that has been read directly into the page, and
+	 * __block_write_begin won't trigger a get_block in this case.
 	 */
 	fix_tail_page_for_writing(tail_page);
 	retval = __reiserfs_write_begin(tail_page, tail_start,
@@ -604,7 +658,8 @@
 		       struct buffer_head *bh_result, int create)
 {
 	int repeat, retval = 0;
-	b_blocknr_t allocated_block_nr = 0;	// b_blocknr_t is (unsigned) 32 bit int
+	/* b_blocknr_t is (unsigned) 32 bit int*/
+	b_blocknr_t allocated_block_nr = 0;
 	INITIALIZE_PATH(path);
 	int pos_in_item;
 	struct cpu_key key;
@@ -614,12 +669,14 @@
 	int done;
 	int fs_gen;
 	struct reiserfs_transaction_handle *th = NULL;
-	/* space reserved in transaction batch:
-	   . 3 balancings in direct->indirect conversion
-	   . 1 block involved into reiserfs_update_sd()
-	   XXX in practically impossible worst case direct2indirect()
-	   can incur (much) more than 3 balancings.
-	   quota update for user, group */
+	/*
+	 * space reserved in transaction batch:
+	 * . 3 balancings in direct->indirect conversion
+	 * . 1 block involved into reiserfs_update_sd()
+	 * XXX in practically impossible worst case direct2indirect()
+	 * can incur (much) more than 3 balancings.
+	 * quota update for user, group
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 + 1 +
 	    2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
@@ -636,8 +693,9 @@
 		return -EFBIG;
 	}
 
-	/* if !create, we aren't changing the FS, so we don't need to
-	 ** log anything, so we don't need to start a transaction
+	/*
+	 * if !create, we aren't changing the FS, so we don't need to
+	 * log anything, so we don't need to start a transaction
 	 */
 	if (!(create & GET_BLOCK_CREATE)) {
 		int ret;
@@ -647,6 +705,7 @@
 		reiserfs_write_unlock(inode->i_sb);
 		return ret;
 	}
+
 	/*
 	 * if we're already in a transaction, make sure to close
 	 * any new transactions we start in this func
@@ -655,8 +714,10 @@
 	    reiserfs_transaction_running(inode->i_sb))
 		dangle = 0;
 
-	/* If file is of such a size, that it might have a tail and tails are enabled
-	 ** we should mark it as possibly needing tail packing on close
+	/*
+	 * If file is of such a size, that it might have a tail and
+	 * tails are enabled  we should mark it as possibly needing
+	 * tail packing on close
 	 */
 	if ((have_large_tails(inode->i_sb)
 	     && inode->i_size < i_block_size(inode) * 4)
@@ -703,11 +764,12 @@
 		    _allocate_block(th, block, inode, &allocated_block_nr,
 				    &path, create);
 
+		/*
+		 * restart the transaction to give the journal a chance to free
+		 * some blocks.  releases the path, so we have to go back to
+		 * research if we succeed on the second try
+		 */
 		if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
-			/* restart the transaction to give the journal a chance to free
-			 ** some blocks.  releases the path, so we have to go back to
-			 ** research if we succeed on the second try
-			 */
 			SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
 			retval = restart_transaction(th, inode, &path);
 			if (retval)
@@ -734,9 +796,11 @@
 
 	if (indirect_item_found(retval, ih)) {
 		b_blocknr_t unfm_ptr;
-		/* 'block'-th block is in the file already (there is
-		   corresponding cell in some indirect item). But it may be
-		   zero unformatted node pointer (hole) */
+		/*
+		 * 'block'-th block is in the file already (there is
+		 * corresponding cell in some indirect item). But it may be
+		 * zero unformatted node pointer (hole)
+		 */
 		unfm_ptr = get_block_num(item, pos_in_item);
 		if (unfm_ptr == 0) {
 			/* use allocated block to plug the hole */
@@ -764,9 +828,10 @@
 
 		reiserfs_write_unlock(inode->i_sb);
 
-		/* the item was found, so new blocks were not added to the file
-		 ** there is no need to make sure the inode is updated with this
-		 ** transaction
+		/*
+		 * the item was found, so new blocks were not added to the file
+		 * there is no need to make sure the inode is updated with this
+		 * transaction
 		 */
 		return retval;
 	}
@@ -776,9 +841,11 @@
 		goto start_trans;
 	}
 
-	/* desired position is not found or is in the direct item. We have
-	   to append file with holes up to 'block'-th block converting
-	   direct items to indirect one if necessary */
+	/*
+	 * desired position is not found or is in the direct item. We have
+	 * to append file with holes up to 'block'-th block converting
+	 * direct items to indirect one if necessary
+	 */
 	done = 0;
 	do {
 		if (is_statdata_le_ih(ih)) {
@@ -790,16 +857,18 @@
 					  TYPE_INDIRECT, UNFM_P_SIZE,
 					  0 /* free_space */ );
 
+			/*
+			 * we are going to add 'block'-th block to the file.
+			 * Use allocated block for that
+			 */
 			if (cpu_key_k_offset(&key) == 1) {
-				/* we are going to add 'block'-th block to the file. Use
-				   allocated block for that */
 				unp = cpu_to_le32(allocated_block_nr);
 				set_block_dev_mapped(bh_result,
 						     allocated_block_nr, inode);
 				set_buffer_new(bh_result);
 				done = 1;
 			}
-			tmp_key = key;	// ;)
+			tmp_key = key;	/* ;) */
 			set_cpu_key_k_offset(&tmp_key, 1);
 			PATH_LAST_POSITION(&path)++;
 
@@ -809,9 +878,12 @@
 			if (retval) {
 				reiserfs_free_block(th, inode,
 						    allocated_block_nr, 1);
-				goto failure;	// retval == -ENOSPC, -EDQUOT or -EIO or -EEXIST
+				/*
+				 * retval == -ENOSPC, -EDQUOT or -EIO
+				 * or -EEXIST
+				 */
+				goto failure;
 			}
-			//mark_tail_converted (inode);
 		} else if (is_direct_le_ih(ih)) {
 			/* direct item has to be converted */
 			loff_t tail_offset;
@@ -819,18 +891,24 @@
 			tail_offset =
 			    ((le_ih_k_offset(ih) -
 			      1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
+
+			/*
+			 * direct item we just found fits into block we have
+			 * to map. Convert it into unformatted node: use
+			 * bh_result for the conversion
+			 */
 			if (tail_offset == cpu_key_k_offset(&key)) {
-				/* direct item we just found fits into block we have
-				   to map. Convert it into unformatted node: use
-				   bh_result for the conversion */
 				set_block_dev_mapped(bh_result,
 						     allocated_block_nr, inode);
 				unbh = bh_result;
 				done = 1;
 			} else {
-				/* we have to padd file tail stored in direct item(s)
-				   up to block size and convert it to unformatted
-				   node. FIXME: this should also get into page cache */
+				/*
+				 * we have to pad file tail stored in direct
+				 * item(s) up to block size and convert it
+				 * to unformatted node. FIXME: this should
+				 * also get into page cache
+				 */
 
 				pathrelse(&path);
 				/*
@@ -859,7 +937,10 @@
 							inode->i_ino,
 							retval);
 					if (allocated_block_nr) {
-						/* the bitmap, the super, and the stat data == 3 */
+						/*
+						 * the bitmap, the super,
+						 * and the stat data == 3
+						 */
 						if (!th)
 							th = reiserfs_persistent_transaction(inode->i_sb, 3);
 						if (th)
@@ -881,43 +962,57 @@
 						    allocated_block_nr, 1);
 				goto failure;
 			}
-			/* it is important the set_buffer_uptodate is done after
-			 ** the direct2indirect.  The buffer might contain valid
-			 ** data newer than the data on disk (read by readpage, changed,
-			 ** and then sent here by writepage).  direct2indirect needs
-			 ** to know if unbh was already up to date, so it can decide
-			 ** if the data in unbh needs to be replaced with data from
-			 ** the disk
+			/*
+			 * it is important the set_buffer_uptodate is done
+			 * after the direct2indirect.  The buffer might
+			 * contain valid data newer than the data on disk
+			 * (read by readpage, changed, and then sent here by
+			 * writepage).  direct2indirect needs to know if unbh
+			 * was already up to date, so it can decide if the
+			 * data in unbh needs to be replaced with data from
+			 * the disk
 			 */
 			set_buffer_uptodate(unbh);
 
-			/* unbh->b_page == NULL in case of DIRECT_IO request, this means
-			   buffer will disappear shortly, so it should not be added to
+			/*
+			 * unbh->b_page == NULL in case of DIRECT_IO request,
+			 * this means buffer will disappear shortly, so it
+			 * should not be added to
 			 */
 			if (unbh->b_page) {
-				/* we've converted the tail, so we must
-				 ** flush unbh before the transaction commits
+				/*
+				 * we've converted the tail, so we must
+				 * flush unbh before the transaction commits
 				 */
 				reiserfs_add_tail_list(inode, unbh);
 
-				/* mark it dirty now to prevent commit_write from adding
-				 ** this buffer to the inode's dirty buffer list
+				/*
+				 * mark it dirty now to prevent commit_write
+				 * from adding this buffer to the inode's
+				 * dirty buffer list
 				 */
 				/*
-				 * AKPM: changed __mark_buffer_dirty to mark_buffer_dirty().
-				 * It's still atomic, but it sets the page dirty too,
-				 * which makes it eligible for writeback at any time by the
-				 * VM (which was also the case with __mark_buffer_dirty())
+				 * AKPM: changed __mark_buffer_dirty to
+				 * mark_buffer_dirty().  It's still atomic,
+				 * but it sets the page dirty too, which makes
+				 * it eligible for writeback at any time by the
+				 * VM (which was also the case with
+				 * __mark_buffer_dirty())
 				 */
 				mark_buffer_dirty(unbh);
 			}
 		} else {
-			/* append indirect item with holes if needed, when appending
-			   pointer to 'block'-th block use block, which is already
-			   allocated */
+			/*
+			 * append indirect item with holes if needed, when
+			 * appending pointer to 'block'-th block use block,
+			 * which is already allocated
+			 */
 			struct cpu_key tmp_key;
-			unp_t unf_single = 0;	// We use this in case we need to allocate only
-			// one block which is a fastpath
+			/*
+			 * We use this in case we need to allocate
+			 * only one block which is a fastpath
+			 */
+			unp_t unf_single = 0;
 			unp_t *un;
 			__u64 max_to_insert =
 			    MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
@@ -926,14 +1021,17 @@
 
 			RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
 			       "vs-804: invalid position for append");
-			/* indirect item has to be appended, set up key of that position */
+			/*
+			 * indirect item has to be appended,
+			 * set up key of that position
+			 * (key type is unimportant)
+			 */
 			make_cpu_key(&tmp_key, inode,
 				     le_key_k_offset(version,
 						     &(ih->ih_key)) +
 				     op_bytes_number(ih,
 						     inode->i_sb->s_blocksize),
-				     //pos_in_item * inode->i_sb->s_blocksize,
-				     TYPE_INDIRECT, 3);	// key type is unimportant
+				     TYPE_INDIRECT, 3);
 
 			RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
 			       "green-805: invalid offset");
@@ -954,8 +1052,10 @@
 				}
 			}
 			if (blocks_needed <= max_to_insert) {
-				/* we are going to add target block to the file. Use allocated
-				   block for that */
+				/*
+				 * we are going to add target block to
+				 * the file. Use allocated block for that
+				 */
 				un[blocks_needed - 1] =
 				    cpu_to_le32(allocated_block_nr);
 				set_block_dev_mapped(bh_result,
@@ -964,8 +1064,11 @@
 				done = 1;
 			} else {
 				/* paste hole to the indirect item */
-				/* If kmalloc failed, max_to_insert becomes zero and it means we
-				   only have space for one block */
+				/*
+				 * If kmalloc failed, max_to_insert becomes
+				 * zero and it means we only have space for
+				 * one block
+				 */
 				blocks_needed =
 				    max_to_insert ? max_to_insert : 1;
 			}
@@ -984,9 +1087,12 @@
 				goto failure;
 			}
 			if (!done) {
-				/* We need to mark new file size in case this function will be
-				   interrupted/aborted later on. And we may do this only for
-				   holes. */
+				/*
+				 * We need to mark new file size in case
+				 * this function will be interrupted/aborted
+				 * later on. And we may do this only for
+				 * holes.
+				 */
 				inode->i_size +=
 				    inode->i_sb->s_blocksize * blocks_needed;
 			}
@@ -995,13 +1101,13 @@
 		if (done == 1)
 			break;
 
-		/* this loop could log more blocks than we had originally asked
-		 ** for.  So, we have to allow the transaction to end if it is
-		 ** too big or too full.  Update the inode so things are
-		 ** consistent if we crash before the function returns
-		 **
-		 ** release the path so that anybody waiting on the path before
-		 ** ending their transaction will be able to continue.
+		/*
+		 * this loop could log more blocks than we had originally
+		 * asked for.  So, we have to allow the transaction to end
+		 * if it is too big or too full.  Update the inode so things
+		 * are consistent if we crash before the function returns
+		 * release the path so that anybody waiting on the path before
+		 * ending their transaction will be able to continue.
 		 */
 		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
 			retval = restart_transaction(th, inode, &path);
@@ -1060,8 +1166,10 @@
 	return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
 }
 
-/* Compute real number of used bytes by file
- * Following three functions can go away when we'll have enough space in stat item
+/*
+ * Compute real number of used bytes by file
+ * Following three functions can go away when we'll have enough space in
+ * stat item
  */
 static int real_space_diff(struct inode *inode, int sd_size)
 {
@@ -1071,13 +1179,14 @@
 	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
 		return sd_size;
 
-	/* End of file is also in full block with indirect reference, so round
-	 ** up to the next block.
-	 **
-	 ** there is just no way to know if the tail is actually packed
-	 ** on the file, so we have to assume it isn't.  When we pack the
-	 ** tail, we add 4 bytes to pretend there really is an unformatted
-	 ** node pointer
+	/*
+	 * End of file is also in full block with indirect reference, so round
+	 * up to the next block.
+	 *
+	 * there is just no way to know if the tail is actually packed
+	 * on the file, so we have to assume it isn't.  When we pack the
+	 * tail, we add 4 bytes to pretend there really is an unformatted
+	 * node pointer
 	 */
 	bytes =
 	    ((inode->i_size +
@@ -1108,29 +1217,29 @@
 		bytes += (loff_t) 511;
 	}
 
-	/* files from before the quota patch might i_blocks such that
-	 ** bytes < real_space.  Deal with that here to prevent it from
-	 ** going negative.
+	/*
+	 * files from before the quota patch might i_blocks such that
+	 * bytes < real_space.  Deal with that here to prevent it from
+	 * going negative.
 	 */
 	if (bytes < real_space)
 		return 0;
 	return (bytes - real_space) >> 9;
 }
 
-//
-// BAD: new directories have stat data of new type and all other items
-// of old type. Version stored in the inode says about body items, so
-// in update_stat_data we can not rely on inode, but have to check
-// item version directly
-//
+/*
+ * BAD: new directories have stat data of new type and all other items
+ * of old type. Version stored in the inode says about body items, so
+ * in update_stat_data we can not rely on inode, but have to check
+ * item version directly
+ */
 
-// called by read_locked_inode
+/* called by read_locked_inode */
 static void init_inode(struct inode *inode, struct treepath *path)
 {
 	struct buffer_head *bh;
 	struct item_head *ih;
 	__u32 rdev;
-	//int version = ITEM_VERSION_1;
 
 	bh = PATH_PLAST_BUFFER(path);
 	ih = tp_item_head(path);
@@ -1168,20 +1277,26 @@
 		inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
 		blocks = (inode->i_size + 511) >> 9;
 		blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
+
+		/*
+		 * there was a bug in <=3.5.23 when i_blocks could take
+		 * negative values. Starting from 3.5.17 this value could
+		 * even be stored in stat data. For such files we set
+		 * i_blocks based on file size. Just 2 notes: this can be
+		 * wrong for sparse files. On-disk value will be only
+		 * updated if file's inode will ever change
+		 */
 		if (inode->i_blocks > blocks) {
-			// there was a bug in <=3.5.23 when i_blocks could take negative
-			// values. Starting from 3.5.17 this value could even be stored in
-			// stat data. For such files we set i_blocks based on file
-			// size. Just 2 notes: this can be wrong for sparce files. On-disk value will be
-			// only updated if file's inode will ever change
 			inode->i_blocks = blocks;
 		}
 
 		rdev = sd_v1_rdev(sd);
 		REISERFS_I(inode)->i_first_direct_byte =
 		    sd_v1_first_direct_byte(sd);
-		/* an early bug in the quota code can give us an odd number for the
-		 ** block count.  This is incorrect, fix it here.
+
+		/*
+		 * an early bug in the quota code can give us an odd
+		 * number for the block count.  This is incorrect, fix it here.
 		 */
 		if (inode->i_blocks & 1) {
 			inode->i_blocks++;
@@ -1189,12 +1304,16 @@
 		inode_set_bytes(inode,
 				to_real_used_space(inode, inode->i_blocks,
 						   SD_V1_SIZE));
-		/* nopack is initially zero for v1 objects. For v2 objects,
-		   nopack is initialised from sd_attrs */
+		/*
+		 * nopack is initially zero for v1 objects. For v2 objects,
+		 * nopack is initialised from sd_attrs
+		 */
 		REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
 	} else {
-		// new stat data found, but object may have old items
-		// (directories and symlinks)
+		/*
+		 * new stat data found, but object may have old items
+		 * (directories and symlinks)
+		 */
 		struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
 
 		inode->i_mode = sd_v2_mode(sd);
@@ -1225,8 +1344,10 @@
 		inode_set_bytes(inode,
 				to_real_used_space(inode, inode->i_blocks,
 						   SD_V2_SIZE));
-		/* read persistent inode attributes from sd and initialise
-		   generic inode flags from them */
+		/*
+		 * read persistent inode attributes from sd and initialise
+		 * generic inode flags from them
+		 */
 		REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
 		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
 	}
@@ -1249,7 +1370,7 @@
 	}
 }
 
-// update new stat data with inode fields
+/* update new stat data with inode fields */
 static void inode2sd(void *sd, struct inode *inode, loff_t size)
 {
 	struct stat_data *sd_v2 = (struct stat_data *)sd;
@@ -1273,7 +1394,7 @@
 	set_sd_v2_attrs(sd_v2, flags);
 }
 
-// used to copy inode's fields to old stat data
+/* used to copy inode's fields to old stat data */
 static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
 {
 	struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
@@ -1292,14 +1413,15 @@
 	else
 		set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
 
-	// Sigh. i_first_direct_byte is back
+	/* Sigh. i_first_direct_byte is back */
 	set_sd_v1_first_direct_byte(sd_v1,
 				    REISERFS_I(inode)->i_first_direct_byte);
 }
 
-/* NOTE, you must prepare the buffer head before sending it here,
-** and then log it after the call
-*/
+/*
+ * NOTE, you must prepare the buffer head before sending it here,
+ * and then log it after the call
+ */
 static void update_stat_data(struct treepath *path, struct inode *inode,
 			     loff_t size)
 {
@@ -1313,8 +1435,8 @@
 		reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
 			       INODE_PKEY(inode), ih);
 
+	/* path points to old stat data */
 	if (stat_data_v1(ih)) {
-		// path points to old stat data
 		inode2sd_v1(ih_item_body(bh, ih), inode, size);
 	} else {
 		inode2sd(ih_item_body(bh, ih), inode, size);
@@ -1335,7 +1457,8 @@
 
 	BUG_ON(!th->t_trans_id);
 
-	make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);	//key type is unimportant
+	/* key type is unimportant */
+	make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
 
 	for (;;) {
 		int pos;
@@ -1363,19 +1486,22 @@
 			return;
 		}
 
-		/* sigh, prepare_for_journal might schedule.  When it schedules the
-		 ** FS might change.  We have to detect that, and loop back to the
-		 ** search if the stat data item has moved
+		/*
+		 * sigh, prepare_for_journal might schedule.  When it
+		 * schedules the FS might change.  We have to detect that,
+		 * and loop back to the search if the stat data item has moved
 		 */
 		bh = get_last_bh(&path);
 		ih = tp_item_head(&path);
 		copy_item_head(&tmp_ih, ih);
 		fs_gen = get_generation(inode->i_sb);
 		reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
+
+		/* Stat_data item has been moved after scheduling. */
 		if (fs_changed(fs_gen, inode->i_sb)
 		    && item_moved(&tmp_ih, &path)) {
 			reiserfs_restore_prepared_buffer(inode->i_sb, bh);
-			continue;	/* Stat_data item has been moved after scheduling. */
+			continue;
 		}
 		break;
 	}
@@ -1385,23 +1511,23 @@
 	return;
 }
 
-/* reiserfs_read_locked_inode is called to read the inode off disk, and it
-** does a make_bad_inode when things go wrong.  But, we need to make sure
-** and clear the key in the private portion of the inode, otherwise a
-** corresponding iput might try to delete whatever object the inode last
-** represented.
-*/
+/*
+ * reiserfs_read_locked_inode is called to read the inode off disk, and it
+ * does a make_bad_inode when things go wrong.  But, we need to make sure
+ * and clear the key in the private portion of the inode, otherwise a
+ * corresponding iput might try to delete whatever object the inode last
+ * represented.
+ */
 static void reiserfs_make_bad_inode(struct inode *inode)
 {
 	memset(INODE_PKEY(inode), 0, KEY_SIZE);
 	make_bad_inode(inode);
 }
 
-//
-// initially this function was derived from minix or ext2's analog and
-// evolved as the prototype did
-//
-
+/*
+ * initially this function was derived from minix or ext2's analog and
+ * evolved as the prototype did
+ */
 int reiserfs_init_locked_inode(struct inode *inode, void *p)
 {
 	struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
@@ -1410,8 +1536,10 @@
 	return 0;
 }
 
-/* looks for stat data in the tree, and fills up the fields of in-core
-   inode stat data fields */
+/*
+ * looks for stat data in the tree, and fills up the fields of in-core
+ * inode stat data fields
+ */
 void reiserfs_read_locked_inode(struct inode *inode,
 				struct reiserfs_iget_args *args)
 {
@@ -1422,8 +1550,10 @@
 
 	dirino = args->dirid;
 
-	/* set version 1, version 2 could be used too, because stat data
-	   key is the same in both versions */
+	/*
+	 * set version 1, version 2 could be used too, because stat data
+	 * key is the same in both versions
+	 */
 	key.version = KEY_FORMAT_3_5;
 	key.on_disk_key.k_dir_id = dirino;
 	key.on_disk_key.k_objectid = inode->i_ino;
@@ -1439,8 +1569,9 @@
 		reiserfs_make_bad_inode(inode);
 		return;
 	}
+
+	/* a stale NFS handle can trigger this without it being an error */
 	if (retval != ITEM_FOUND) {
-		/* a stale NFS handle can trigger this without it being an error */
 		pathrelse(&path_to_sd);
 		reiserfs_make_bad_inode(inode);
 		clear_nlink(inode);
@@ -1449,20 +1580,25 @@
 
 	init_inode(inode, &path_to_sd);
 
-	/* It is possible that knfsd is trying to access inode of a file
-	   that is being removed from the disk by some other thread. As we
-	   update sd on unlink all that is required is to check for nlink
-	   here. This bug was first found by Sizif when debugging
-	   SquidNG/Butterfly, forgotten, and found again after Philippe
-	   Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
+	/*
+	 * It is possible that knfsd is trying to access inode of a file
+	 * that is being removed from the disk by some other thread. As we
+	 * update sd on unlink all that is required is to check for nlink
+	 * here. This bug was first found by Sizif when debugging
+	 * SquidNG/Butterfly, forgotten, and found again after Philippe
+	 * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
 
-	   More logical fix would require changes in fs/inode.c:iput() to
-	   remove inode from hash-table _after_ fs cleaned disk stuff up and
-	   in iget() to return NULL if I_FREEING inode is found in
-	   hash-table. */
-	/* Currently there is one place where it's ok to meet inode with
-	   nlink==0: processing of open-unlinked and half-truncated files
-	   during mount (fs/reiserfs/super.c:finish_unfinished()). */
+	 * More logical fix would require changes in fs/inode.c:iput() to
+	 * remove inode from hash-table _after_ fs cleaned disk stuff up and
+	 * in iget() to return NULL if I_FREEING inode is found in
+	 * hash-table.
+	 */
+
+	/*
+	 * Currently there is one place where it's ok to meet inode with
+	 * nlink==0: processing of open-unlinked and half-truncated files
+	 * during mount (fs/reiserfs/super.c:finish_unfinished()).
+	 */
 	if ((inode->i_nlink == 0) &&
 	    !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
 		reiserfs_warning(inode->i_sb, "vs-13075",
@@ -1472,7 +1608,8 @@
 		reiserfs_make_bad_inode(inode);
 	}
 
-	reiserfs_check_path(&path_to_sd);	/* init inode should be relsing */
+	/* init inode should be relsing */
+	reiserfs_check_path(&path_to_sd);
 
 	/*
 	 * Stat data v1 doesn't support ACLs.
@@ -1481,7 +1618,7 @@
 		cache_no_acl(inode);
 }
 
-/**
+/*
  * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
  *
  * @inode:    inode from hash table to check
@@ -1556,7 +1693,8 @@
 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
 		int fh_len, int fh_type)
 {
-	/* fhtype happens to reflect the number of u32s encoded.
+	/*
+	 * fhtype happens to reflect the number of u32s encoded.
 	 * due to a bug in earlier code, fhtype might indicate there
 	 * are more u32s then actually fitted.
 	 * so if fhtype seems to be more than len, reduce fhtype.
@@ -1625,13 +1763,16 @@
 	return *lenp;
 }
 
-/* looks for stat data, then copies fields to it, marks the buffer
-   containing stat data as dirty */
-/* reiserfs inodes are never really dirty, since the dirty inode call
-** always logs them.  This call allows the VFS inode marking routines
-** to properly mark inodes for datasync and such, but only actually
-** does something when called for a synchronous update.
-*/
+/*
+ * looks for stat data, then copies fields to it, marks the buffer
+ * containing stat data as dirty
+ */
+/*
+ * reiserfs inodes are never really dirty, since the dirty inode call
+ * always logs them.  This call allows the VFS inode marking routines
+ * to properly mark inodes for datasync and such, but only actually
+ * does something when called for a synchronous update.
+ */
 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
 	struct reiserfs_transaction_handle th;
@@ -1639,10 +1780,12 @@
 
 	if (inode->i_sb->s_flags & MS_RDONLY)
 		return -EROFS;
-	/* memory pressure can sometimes initiate write_inode calls with sync == 1,
-	 ** these cases are just when the system needs ram, not when the
-	 ** inode needs to reach disk for safety, and they can safely be
-	 ** ignored because the altered inode has already been logged.
+	/*
+	 * memory pressure can sometimes initiate write_inode calls with
+	 * sync == 1,
+	 * these cases are just when the system needs ram, not when the
+	 * inode needs to reach disk for safety, and they can safely be
+	 * ignored because the altered inode has already been logged.
 	 */
 	if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
 		reiserfs_write_lock(inode->i_sb);
@@ -1655,8 +1798,10 @@
 	return 0;
 }
 
-/* stat data of new object is inserted already, this inserts the item
-   containing "." and ".." entries */
+/*
+ * stat data of new object is inserted already, this inserts the item
+ * containing "." and ".." entries
+ */
 static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
 				  struct inode *inode,
 				  struct item_head *ih, struct treepath *path,
@@ -1674,9 +1819,11 @@
 		      le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
 		      TYPE_DIRENTRY, 3 /*key length */ );
 
-	/* compose item head for new item. Directories consist of items of
-	   old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
-	   is done by reiserfs_new_inode */
+	/*
+	 * compose item head for new item. Directories consist of items of
+	 * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
+	 * is done by reiserfs_new_inode
+	 */
 	if (old_format_only(sb)) {
 		make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
 				  TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
@@ -1714,9 +1861,12 @@
 	return reiserfs_insert_item(th, path, &key, ih, inode, body);
 }
 
-/* stat data of object has been inserted, this inserts the item
-   containing the body of symlink */
-static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct inode *inode,	/* Inode of symlink */
+/*
+ * stat data of object has been inserted, this inserts the item
+ * containing the body of symlink
+ */
+static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
+				struct inode *inode,
 				struct item_head *ih,
 				struct treepath *path, const char *symname,
 				int item_len)
@@ -1754,15 +1904,26 @@
 	return reiserfs_insert_item(th, path, &key, ih, inode, symname);
 }
 
-/* inserts the stat data into the tree, and then calls
-   reiserfs_new_directory (to insert ".", ".." item if new object is
-   directory) or reiserfs_new_symlink (to insert symlink body if new
-   object is symlink) or nothing (if new object is regular file)
+/*
+ * inserts the stat data into the tree, and then calls
+ * reiserfs_new_directory (to insert ".", ".." item if new object is
+ * directory) or reiserfs_new_symlink (to insert symlink body if new
+ * object is symlink) or nothing (if new object is regular file)
 
-   NOTE! uid and gid must already be set in the inode.  If we return
-   non-zero due to an error, we have to drop the quota previously allocated
-   for the fresh inode.  This can only be done outside a transaction, so
-   if we return non-zero, we also end the transaction.  */
+ * NOTE! uid and gid must already be set in the inode.  If we return
+ * non-zero due to an error, we have to drop the quota previously allocated
+ * for the fresh inode.  This can only be done outside a transaction, so
+ * if we return non-zero, we also end the transaction.
+ *
+ * @th: active transaction handle
+ * @dir: parent directory for new inode
+ * @mode: mode of new inode
+ * @symname: symlink contents if inode is symlink
+ * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
+ *         symlinks
+ * @inode: inode to be filled
+ * @security: optional security context to associate with this inode
+ */
 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 		       struct inode *dir, umode_t mode, const char *symname,
 		       /* 0 for regular, EMTRY_DIR_SIZE for dirs,
@@ -1820,10 +1981,11 @@
 	}
 
 	if (old_format_only(sb))
-		/* not a perfect generation count, as object ids can be reused, but
-		 ** this is as good as reiserfs can do right now.
-		 ** note that the private part of inode isn't filled in yet, we have
-		 ** to use the directory.
+		/*
+		 * not a perfect generation count, as object ids can be reused,
+		 * but this is as good as reiserfs can do right now.
+		 * note that the private part of inode isn't filled in yet,
+		 * we have to use the directory.
 		 */
 		inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
 	else
@@ -1878,9 +2040,9 @@
 		goto out_bad_inode;
 	}
 	if (old_format_only(sb)) {
+		/* i_uid or i_gid is too big to be stored in stat data v3.5 */
 		if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
 			pathrelse(&path_to_key);
-			/* i_uid or i_gid is too big to be stored in stat data v3.5 */
 			err = -EINVAL;
 			goto out_bad_inode;
 		}
@@ -1888,9 +2050,11 @@
 	} else {
 		inode2sd(&sd, inode, inode->i_size);
 	}
-	// store in in-core inode the key of stat data and version all
-	// object items will have (directory items will have old offset
-	// format, other new objects will consist of new items)
+	/*
+	 * store in in-core inode the key of stat data and version all
+	 * object items will have (directory items will have old offset
+	 * format, other new objects will consist of new items)
+	 */
 	if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
 		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
 	else
@@ -1975,10 +2139,6 @@
 
 	return 0;
 
-/* it looks like you can easily compress these two goto targets into
- * one.  Keeping it like this doesn't actually hurt anything, and they
- * are place holders for what the quota code actually needs.
- */
       out_bad_inode:
 	/* Invalidate the object, nothing was inserted yet */
 	INODE_PKEY(inode)->k_objectid = 0;
@@ -1990,7 +2150,10 @@
 
       out_end_trans:
 	journal_end(th, th->t_super, th->t_blocks_allocated);
-	/* Drop can be outside and it needs more credits so it's better to have it outside */
+	/*
+	 * Drop can be outside and it needs more credits so it's better
+	 * to have it outside
+	 */
 	depth = reiserfs_write_unlock_nested(inode->i_sb);
 	dquot_drop(inode);
 	reiserfs_write_lock_nested(inode->i_sb, depth);
@@ -2006,25 +2169,26 @@
 }
 
 /*
-** finds the tail page in the page cache,
-** reads the last block in.
-**
-** On success, page_result is set to a locked, pinned page, and bh_result
-** is set to an up to date buffer for the last block in the file.  returns 0.
-**
-** tail conversion is not done, so bh_result might not be valid for writing
-** check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
-** trying to write the block.
-**
-** on failure, nonzero is returned, page_result and bh_result are untouched.
-*/
+ * finds the tail page in the page cache,
+ * reads the last block in.
+ *
+ * On success, page_result is set to a locked, pinned page, and bh_result
+ * is set to an up to date buffer for the last block in the file.  returns 0.
+ *
+ * tail conversion is not done, so bh_result might not be valid for writing
+ * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
+ * trying to write the block.
+ *
+ * on failure, nonzero is returned, page_result and bh_result are untouched.
+ */
 static int grab_tail_page(struct inode *inode,
 			  struct page **page_result,
 			  struct buffer_head **bh_result)
 {
 
-	/* we want the page with the last byte in the file,
-	 ** not the page that will hold the next byte for appending
+	/*
+	 * we want the page with the last byte in the file,
+	 * not the page that will hold the next byte for appending
 	 */
 	unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
 	unsigned long pos = 0;
@@ -2036,10 +2200,11 @@
 	struct page *page;
 	int error;
 
-	/* we know that we are only called with inode->i_size > 0.
-	 ** we also know that a file tail can never be as big as a block
-	 ** If i_size % blocksize == 0, our file is currently block aligned
-	 ** and it won't need converting or zeroing after a truncate.
+	/*
+	 * we know that we are only called with inode->i_size > 0.
+	 * we also know that a file tail can never be as big as a block
+	 * If i_size % blocksize == 0, our file is currently block aligned
+	 * and it won't need converting or zeroing after a truncate.
 	 */
 	if ((offset & (blocksize - 1)) == 0) {
 		return -ENOENT;
@@ -2068,10 +2233,11 @@
 	} while (bh != head);
 
 	if (!buffer_uptodate(bh)) {
-		/* note, this should never happen, prepare_write should
-		 ** be taking care of this for us.  If the buffer isn't up to date,
-		 ** I've screwed up the code to find the buffer, or the code to
-		 ** call prepare_write
+		/*
+		 * note, this should never happen, prepare_write should be
+		 * taking care of this for us.  If the buffer isn't up to
+		 * date, I've screwed up the code to find the buffer, or the
+		 * code to call prepare_write
 		 */
 		reiserfs_error(inode->i_sb, "clm-6000",
 			       "error reading block %lu", bh->b_blocknr);
@@ -2091,11 +2257,11 @@
 }
 
 /*
-** vfs version of truncate file.  Must NOT be called with
-** a transaction already started.
-**
-** some code taken from block_truncate_page
-*/
+ * vfs version of truncate file.  Must NOT be called with
+ * a transaction already started.
+ *
+ * some code taken from block_truncate_page
+ */
 int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
 {
 	struct reiserfs_transaction_handle th;
@@ -2113,9 +2279,11 @@
 	if (inode->i_size > 0) {
 		error = grab_tail_page(inode, &page, &bh);
 		if (error) {
-			// -ENOENT means we truncated past the end of the file,
-			// and get_block_create_0 could not find a block to read in,
-			// which is ok.
+			/*
+			 * -ENOENT means we truncated past the end of the
+			 * file, and get_block_create_0 could not find a
+			 * block to read in, which is ok.
+			 */
 			if (error != -ENOENT)
 				reiserfs_error(inode->i_sb, "clm-6001",
 					       "grab_tail_page failed %d",
@@ -2125,25 +2293,30 @@
 		}
 	}
 
-	/* so, if page != NULL, we have a buffer head for the offset at
-	 ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
-	 ** then we have an unformatted node.  Otherwise, we have a direct item,
-	 ** and no zeroing is required on disk.  We zero after the truncate,
-	 ** because the truncate might pack the item anyway
-	 ** (it will unmap bh if it packs).
+	/*
+	 * so, if page != NULL, we have a buffer head for the offset at
+	 * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
+	 * then we have an unformatted node.  Otherwise, we have a direct item,
+	 * and no zeroing is required on disk.  We zero after the truncate,
+	 * because the truncate might pack the item anyway
+	 * (it will unmap bh if it packs).
+	 *
+	 * it is enough to reserve space in transaction for 2 balancings:
+	 * one for "save" link adding and another for the first
+	 * cut_from_item. 1 is for update_sd
 	 */
-	/* it is enough to reserve space in transaction for 2 balancings:
-	   one for "save" link adding and another for the first
-	   cut_from_item. 1 is for update_sd */
 	error = journal_begin(&th, inode->i_sb,
 			      JOURNAL_PER_BALANCE_CNT * 2 + 1);
 	if (error)
 		goto out;
 	reiserfs_update_inode_transaction(inode);
 	if (update_timestamps)
-		/* we are doing real truncate: if the system crashes before the last
-		   transaction of truncating gets committed - on reboot the file
-		   either appears truncated properly or not truncated at all */
+		/*
+		 * we are doing real truncate: if the system crashes
+		 * before the last transaction of truncating gets committed
+		 * - on reboot the file either appears truncated properly
+		 * or not truncated at all
+		 */
 		add_save_link(&th, inode, 1);
 	err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
 	error =
@@ -2212,7 +2385,10 @@
 	int copy_size;
 	int trans_running = 0;
 
-	/* catch places below that try to log something without starting a trans */
+	/*
+	 * catch places below that try to log something without
+	 * starting a trans
+	 */
 	th.t_trans_id = 0;
 
 	if (!buffer_uptodate(bh_result)) {
@@ -2331,7 +2507,8 @@
 	kunmap(bh_result->b_page);
 
 	if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
-		/* we've copied data from the page into the direct item, so the
+		/*
+		 * we've copied data from the page into the direct item, so the
 		 * buffer in the page is now clean, mark it to reflect that.
 		 */
 		lock_buffer(bh_result);
@@ -2370,7 +2547,8 @@
 		return 0;
 	}
 
-	/* The page dirty bit is cleared before writepage is called, which
+	/*
+	 * The page dirty bit is cleared before writepage is called, which
 	 * means we have to tell create_empty_buffers to make dirty buffers
 	 * The page really should be up to date at this point, so tossing
 	 * in the BH_Uptodate is just a sanity check.
@@ -2381,8 +2559,9 @@
 	}
 	head = page_buffers(page);
 
-	/* last page in the file, zero out any contents past the
-	 ** last byte in the file
+	/*
+	 * last page in the file, zero out any contents past the
+	 * last byte in the file
 	 */
 	if (page->index >= end_index) {
 		unsigned last_offset;
@@ -2412,7 +2591,8 @@
 		           (!buffer_mapped(bh) || (buffer_mapped(bh)
 						       && bh->b_blocknr ==
 						       0))) {
-			/* not mapped yet, or it points to a direct item, search
+			/*
+			 * not mapped yet, or it points to a direct item, search
 			 * the btree for the mapping info, and log any direct
 			 * items found
 			 */
@@ -2453,7 +2633,8 @@
 			journal_mark_dirty(&th, s, bh);
 			continue;
 		}
-		/* from this point on, we know the buffer is mapped to a
+		/*
+		 * from this point on, we know the buffer is mapped to a
 		 * real block and not a direct item
 		 */
 		if (wbc->sync_mode != WB_SYNC_NONE) {
@@ -2520,7 +2701,8 @@
 	return error;
 
       fail:
-	/* catches various errors, we need to make sure any valid dirty blocks
+	/*
+	 * catches various errors, we need to make sure any valid dirty blocks
 	 * get to the media.  The page is currently locked and not marked for
 	 * writeback
 	 */
@@ -2533,8 +2715,8 @@
 			mark_buffer_async_write(bh);
 		} else {
 			/*
-			 * clear any dirty bits that might have come from getting
-			 * attached to a dirty page
+			 * clear any dirty bits that might have come from
+			 * getting attached to a dirty page
 			 */
 			clear_buffer_dirty(bh);
 		}
@@ -2614,15 +2796,18 @@
 	ret = __block_write_begin(page, pos, len, reiserfs_get_block);
 	if (ret && reiserfs_transaction_running(inode->i_sb)) {
 		struct reiserfs_transaction_handle *th = current->journal_info;
-		/* this gets a little ugly.  If reiserfs_get_block returned an
-		 * error and left a transacstion running, we've got to close it,
-		 * and we've got to free handle if it was a persistent transaction.
+		/*
+		 * this gets a little ugly.  If reiserfs_get_block returned an
+		 * error and left a transacstion running, we've got to close
+		 * it, and we've got to free handle if it was a persistent
+		 * transaction.
 		 *
 		 * But, if we had nested into an existing transaction, we need
 		 * to just drop the ref count on the handle.
 		 *
 		 * If old_ref == 0, the transaction is from reiserfs_get_block,
-		 * and it was a persistent trans.  Otherwise, it was nested above.
+		 * and it was a persistent trans.  Otherwise, it was nested
+		 * above.
 		 */
 		if (th->t_refcount > old_ref) {
 			if (old_ref)
@@ -2671,15 +2856,18 @@
 	ret = __block_write_begin(page, from, len, reiserfs_get_block);
 	if (ret && reiserfs_transaction_running(inode->i_sb)) {
 		struct reiserfs_transaction_handle *th = current->journal_info;
-		/* this gets a little ugly.  If reiserfs_get_block returned an
-		 * error and left a transacstion running, we've got to close it,
-		 * and we've got to free handle if it was a persistent transaction.
+		/*
+		 * this gets a little ugly.  If reiserfs_get_block returned an
+		 * error and left a transacstion running, we've got to close
+		 * it, and we've got to free handle if it was a persistent
+		 * transaction.
 		 *
 		 * But, if we had nested into an existing transaction, we need
 		 * to just drop the ref count on the handle.
 		 *
 		 * If old_ref == 0, the transaction is from reiserfs_get_block,
-		 * and it was a persistent trans.  Otherwise, it was nested above.
+		 * and it was a persistent trans.  Otherwise, it was nested
+		 * above.
 		 */
 		if (th->t_refcount > old_ref) {
 			if (old_ref)
@@ -2734,17 +2922,20 @@
 
 	reiserfs_commit_page(inode, page, start, start + copied);
 
-	/* generic_commit_write does this for us, but does not update the
-	 ** transaction tracking stuff when the size changes.  So, we have
-	 ** to do the i_size updates here.
+	/*
+	 * generic_commit_write does this for us, but does not update the
+	 * transaction tracking stuff when the size changes.  So, we have
+	 * to do the i_size updates here.
 	 */
 	if (pos + copied > inode->i_size) {
 		struct reiserfs_transaction_handle myth;
 		reiserfs_write_lock(inode->i_sb);
 		locked = true;
-		/* If the file have grown beyond the border where it
-		   can have a tail, unmark it as needing a tail
-		   packing */
+		/*
+		 * If the file have grown beyond the border where it
+		 * can have a tail, unmark it as needing a tail
+		 * packing
+		 */
 		if ((have_large_tails(inode->i_sb)
 		     && inode->i_size > i_block_size(inode) * 4)
 		    || (have_small_tails(inode->i_sb)
@@ -2759,8 +2950,8 @@
 		inode->i_size = pos + copied;
 		/*
 		 * this will just nest into our transaction.  It's important
-		 * to use mark_inode_dirty so the inode gets pushed around on the
-		 * dirty lists, and so that O_SYNC works as expected
+		 * to use mark_inode_dirty so the inode gets pushed around on
+		 * the dirty lists, and so that O_SYNC works as expected
 		 */
 		mark_inode_dirty(inode);
 		reiserfs_update_sd(&myth, inode);
@@ -2822,15 +3013,18 @@
 	}
 	reiserfs_commit_page(inode, page, from, to);
 
-	/* generic_commit_write does this for us, but does not update the
-	 ** transaction tracking stuff when the size changes.  So, we have
-	 ** to do the i_size updates here.
+	/*
+	 * generic_commit_write does this for us, but does not update the
+	 * transaction tracking stuff when the size changes.  So, we have
+	 * to do the i_size updates here.
 	 */
 	if (pos > inode->i_size) {
 		struct reiserfs_transaction_handle myth;
-		/* If the file have grown beyond the border where it
-		   can have a tail, unmark it as needing a tail
-		   packing */
+		/*
+		 * If the file have grown beyond the border where it
+		 * can have a tail, unmark it as needing a tail
+		 * packing
+		 */
 		if ((have_large_tails(inode->i_sb)
 		     && inode->i_size > i_block_size(inode) * 4)
 		    || (have_small_tails(inode->i_sb)
@@ -2845,8 +3039,8 @@
 		inode->i_size = pos;
 		/*
 		 * this will just nest into our transaction.  It's important
-		 * to use mark_inode_dirty so the inode gets pushed around on the
-		 * dirty lists, and so that O_SYNC works as expected
+		 * to use mark_inode_dirty so the inode gets pushed around
+		 * on the dirty lists, and so that O_SYNC works as expected
 		 */
 		mark_inode_dirty(inode);
 		reiserfs_update_sd(&myth, inode);
@@ -2924,9 +3118,10 @@
 	}
 }
 
-/* decide if this buffer needs to stay around for data logging or ordered
-** write purposes
-*/
+/*
+ * decide if this buffer needs to stay around for data logging or ordered
+ * write purposes
+ */
 static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
 {
 	int ret = 1;
@@ -2937,7 +3132,8 @@
 	if (!buffer_mapped(bh)) {
 		goto free_jh;
 	}
-	/* the page is locked, and the only places that log a data buffer
+	/*
+	 * the page is locked, and the only places that log a data buffer
 	 * also lock the page.
 	 */
 	if (reiserfs_file_data_log(inode)) {
@@ -2952,7 +3148,8 @@
 		struct reiserfs_journal_list *jl;
 		struct reiserfs_jh *jh = bh->b_private;
 
-		/* why is this safe?
+		/*
+		 * why is this safe?
 		 * reiserfs_setattr updates i_size in the on disk
 		 * stat data before allowing vmtruncate to be called.
 		 *
@@ -3080,8 +3277,10 @@
 	return ret;
 }
 
-/* We thank Mingming Cao for helping us understand in great detail what
-   to do in this section of the code. */
+/*
+ * We thank Mingming Cao for helping us understand in great detail what
+ * to do in this section of the code.
+ */
 static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
 				  const struct iovec *iov, loff_t offset,
 				  unsigned long nr_segs)
@@ -3127,8 +3326,9 @@
 		dquot_initialize(inode);
 	reiserfs_write_lock(inode->i_sb);
 	if (attr->ia_valid & ATTR_SIZE) {
-		/* version 2 items will be caught by the s_maxbytes check
-		 ** done for us in vmtruncate
+		/*
+		 * version 2 items will be caught by the s_maxbytes check
+		 * done for us in vmtruncate
 		 */
 		if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
 		    attr->ia_size > MAX_NON_LFS) {
@@ -3189,7 +3389,10 @@
 		if (error)
 			return error;
 
-		/* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+		/*
+		 * (user+group)*(old+new) structure - we count quota
+		 * info and , inode write (sb, inode)
+		 */
 		reiserfs_write_lock(inode->i_sb);
 		error = journal_begin(&th, inode->i_sb, jbegin_count);
 		reiserfs_write_unlock(inode->i_sb);
@@ -3203,8 +3406,10 @@
 			goto out;
 		}
 
-		/* Update corresponding info in inode so that everything is in
-		 * one transaction */
+		/*
+		 * Update corresponding info in inode so that everything
+		 * is in one transaction
+		 */
 		if (attr->ia_valid & ATTR_UID)
 			inode->i_uid = attr->ia_uid;
 		if (attr->ia_valid & ATTR_GID)
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 946ccbf..a4197c3 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -15,7 +15,8 @@
  * reiserfs_ioctl - handler for ioctl for inode
  * supported commands:
  *  1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
- *                           and prevent packing file (argument arg has to be non-zero)
+ *                           and prevent packing file (argument arg has t
+ *			      be non-zero)
  *  2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
  *  3) That's all for a while ...
  */
@@ -132,7 +133,10 @@
 long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
 				unsigned long arg)
 {
-	/* These are just misnamed, they actually get/put from/to user an int */
+	/*
+	 * These are just misnamed, they actually
+	 * get/put from/to user an int
+	 */
 	switch (cmd) {
 	case REISERFS_IOC32_UNPACK:
 		cmd = REISERFS_IOC_UNPACK;
@@ -160,10 +164,10 @@
 int reiserfs_commit_write(struct file *f, struct page *page,
 			  unsigned from, unsigned to);
 /*
-** reiserfs_unpack
-** Function try to convert tail from direct item into indirect.
-** It set up nopack attribute in the REISERFS_I(inode)->nopack
-*/
+ * reiserfs_unpack
+ * Function try to convert tail from direct item into indirect.
+ * It set up nopack attribute in the REISERFS_I(inode)->nopack
+ */
 int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
 	int retval = 0;
@@ -194,9 +198,10 @@
 		goto out;
 	}
 
-	/* we unpack by finding the page with the tail, and calling
-	 ** __reiserfs_write_begin on that page.  This will force a
-	 ** reiserfs_get_block to unpack the tail for us.
+	/*
+	 * we unpack by finding the page with the tail, and calling
+	 * __reiserfs_write_begin on that page.  This will force a
+	 * reiserfs_get_block to unpack the tail for us.
 	 */
 	index = inode->i_size >> PAGE_CACHE_SHIFT;
 	mapping = inode->i_mapping;
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index c9f1365..cb6b826 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -5,15 +5,17 @@
 #include <linux/time.h>
 #include "reiserfs.h"
 
-// this contains item handlers for old item types: sd, direct,
-// indirect, directory
+/*
+ * this contains item handlers for old item types: sd, direct,
+ * indirect, directory
+ */
 
-/* and where are the comments? how about saying where we can find an
-   explanation of each item handler method? -Hans */
+/*
+ * and where are the comments? how about saying where we can find an
+ * explanation of each item handler method? -Hans
+ */
 
-//////////////////////////////////////////////////////////////////////////////
-// stat data functions
-//
+/* stat data functions */
 static int sd_bytes_number(struct item_head *ih, int block_size)
 {
 	return 0;
@@ -60,7 +62,7 @@
 
 static void sd_check_item(struct item_head *ih, char *item)
 {
-	// FIXME: type something here!
+	/* unused */
 }
 
 static int sd_create_vi(struct virtual_node *vn,
@@ -68,7 +70,6 @@
 			int is_affected, int insert_size)
 {
 	vi->vi_index = TYPE_STAT_DATA;
-	//vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
 	return 0;
 }
 
@@ -117,15 +118,13 @@
 	.print_vi = sd_print_vi
 };
 
-//////////////////////////////////////////////////////////////////////////////
-// direct item functions
-//
+/* direct item functions */
 static int direct_bytes_number(struct item_head *ih, int block_size)
 {
 	return ih_item_len(ih);
 }
 
-// FIXME: this should probably switch to indirect as well
+/* FIXME: this should probably switch to indirect as well */
 static void direct_decrement_key(struct cpu_key *key)
 {
 	cpu_key_k_offset_dec(key);
@@ -144,7 +143,7 @@
 {
 	int j = 0;
 
-//    return;
+/*    return; */
 	printk("\"");
 	while (j < ih_item_len(ih))
 		printk("%c", item[j++]);
@@ -153,7 +152,7 @@
 
 static void direct_check_item(struct item_head *ih, char *item)
 {
-	// FIXME: type something here!
+	/* unused */
 }
 
 static int direct_create_vi(struct virtual_node *vn,
@@ -161,7 +160,6 @@
 			    int is_affected, int insert_size)
 {
 	vi->vi_index = TYPE_DIRECT;
-	//vi->vi_type |= VI_TYPE_DIRECT;
 	return 0;
 }
 
@@ -211,16 +209,13 @@
 	.print_vi = direct_print_vi
 };
 
-//////////////////////////////////////////////////////////////////////////////
-// indirect item functions
-//
-
+/* indirect item functions */
 static int indirect_bytes_number(struct item_head *ih, int block_size)
 {
-	return ih_item_len(ih) / UNFM_P_SIZE * block_size;	//- get_ih_free_space (ih);
+	return ih_item_len(ih) / UNFM_P_SIZE * block_size;
 }
 
-// decrease offset, if it becomes 0, change type to stat data
+/* decrease offset, if it becomes 0, change type to stat data */
 static void indirect_decrement_key(struct cpu_key *key)
 {
 	cpu_key_k_offset_dec(key);
@@ -228,7 +223,7 @@
 		set_cpu_key_k_type(key, TYPE_STAT_DATA);
 }
 
-// if it is not first item of the body, then it is mergeable
+/* if it is not first item of the body, then it is mergeable */
 static int indirect_is_left_mergeable(struct reiserfs_key *key,
 				      unsigned long bsize)
 {
@@ -236,7 +231,7 @@
 	return (le_key_k_offset(version, key) != 1);
 }
 
-// printing of indirect item
+/* printing of indirect item */
 static void start_new_sequence(__u32 * start, int *len, __u32 new)
 {
 	*start = new;
@@ -295,7 +290,7 @@
 
 static void indirect_check_item(struct item_head *ih, char *item)
 {
-	// FIXME: type something here!
+	/* unused */
 }
 
 static int indirect_create_vi(struct virtual_node *vn,
@@ -303,7 +298,6 @@
 			      int is_affected, int insert_size)
 {
 	vi->vi_index = TYPE_INDIRECT;
-	//vi->vi_type |= VI_TYPE_INDIRECT;
 	return 0;
 }
 
@@ -321,16 +315,19 @@
 	return indirect_check_left(vi, free, 0, 0);
 }
 
-// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
+/*
+ * return size in bytes of 'units' units. If first == 0 - calculate
+ * from the head (left), otherwise - from tail (right)
+ */
 static int indirect_part_size(struct virtual_item *vi, int first, int units)
 {
-	// unit of indirect item is byte (yet)
+	/* unit of indirect item is byte (yet) */
 	return units;
 }
 
 static int indirect_unit_num(struct virtual_item *vi)
 {
-	// unit of indirect item is byte (yet)
+	/* unit of indirect item is byte (yet) */
 	return vi->vi_item_len - IH_SIZE;
 }
 
@@ -356,10 +353,7 @@
 	.print_vi = indirect_print_vi
 };
 
-//////////////////////////////////////////////////////////////////////////////
-// direntry functions
-//
-
+/* direntry functions */
 static int direntry_bytes_number(struct item_head *ih, int block_size)
 {
 	reiserfs_warning(NULL, "vs-16090",
@@ -428,7 +422,7 @@
 	int i;
 	struct reiserfs_de_head *deh;
 
-	// FIXME: type something here!
+	/* unused */
 	deh = (struct reiserfs_de_head *)item;
 	for (i = 0; i < ih_entry_count(ih); i++, deh++) {
 		;
@@ -439,7 +433,8 @@
 
 /*
  * function returns old entry number in directory item in real node
- * using new entry number in virtual item in virtual node */
+ * using new entry number in virtual item in virtual node
+ */
 static inline int old_entry_num(int is_affected, int virtual_entry_num,
 				int pos_in_item, int mode)
 {
@@ -463,9 +458,11 @@
 	return virtual_entry_num - 1;
 }
 
-/* Create an array of sizes of directory entries for virtual
-   item. Return space used by an item. FIXME: no control over
-   consuming of space used by this item handler */
+/*
+ * Create an array of sizes of directory entries for virtual
+ * item. Return space used by an item. FIXME: no control over
+ * consuming of space used by this item handler
+ */
 static int direntry_create_vi(struct virtual_node *vn,
 			      struct virtual_item *vi,
 			      int is_affected, int insert_size)
@@ -529,10 +526,10 @@
 
 }
 
-//
-// return number of entries which may fit into specified amount of
-// free space, or -1 if free space is not enough even for 1 entry
-//
+/*
+ * return number of entries which may fit into specified amount of
+ * free space, or -1 if free space is not enough even for 1 entry
+ */
 static int direntry_check_left(struct virtual_item *vi, int free,
 			       int start_skip, int end_skip)
 {
@@ -541,8 +538,8 @@
 	struct direntry_uarea *dir_u = vi->vi_uarea;
 
 	for (i = start_skip; i < dir_u->entry_count - end_skip; i++) {
+		/* i-th entry doesn't fit into the remaining free space */
 		if (dir_u->entry_sizes[i] > free)
-			/* i-th entry doesn't fit into the remaining free space */
 			break;
 
 		free -= dir_u->entry_sizes[i];
@@ -570,8 +567,8 @@
 	struct direntry_uarea *dir_u = vi->vi_uarea;
 
 	for (i = dir_u->entry_count - 1; i >= 0; i--) {
+		/* i-th entry doesn't fit into the remaining free space */
 		if (dir_u->entry_sizes[i] > free)
-			/* i-th entry doesn't fit into the remaining free space */
 			break;
 
 		free -= dir_u->entry_sizes[i];
@@ -643,9 +640,7 @@
 	.print_vi = direntry_print_vi
 };
 
-//////////////////////////////////////////////////////////////////////////////
-// Error catching functions to catch errors caused by incorrect item types.
-//
+/* Error catching functions to catch errors caused by incorrect item types. */
 static int errcatch_bytes_number(struct item_head *ih, int block_size)
 {
 	reiserfs_warning(NULL, "green-16001",
@@ -685,8 +680,12 @@
 {
 	reiserfs_warning(NULL, "green-16006",
 			 "Invalid item type observed, run fsck ASAP");
-	return 0;		// We might return -1 here as well, but it won't help as create_virtual_node() from where
-	// this operation is called from is of return type void.
+	/*
+	 * We might return -1 here as well, but it won't help as
+	 * create_virtual_node() from where this operation is called
+	 * from is of return type void.
+	 */
+	return 0;
 }
 
 static int errcatch_check_left(struct virtual_item *vi, int free,
@@ -739,9 +738,6 @@
 	errcatch_print_vi
 };
 
-//////////////////////////////////////////////////////////////////////////////
-//
-//
 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
 #error Item types must use disk-format assigned values.
 #endif
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 2259211..48f03e5 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,38 +1,38 @@
 /*
-** Write ahead logging implementation copyright Chris Mason 2000
-**
-** The background commits make this code very interrelated, and
-** overly complex.  I need to rethink things a bit....The major players:
-**
-** journal_begin -- call with the number of blocks you expect to log.
-**                  If the current transaction is too
-** 		    old, it will block until the current transaction is
-** 		    finished, and then start a new one.
-**		    Usually, your transaction will get joined in with
-**                  previous ones for speed.
-**
-** journal_join  -- same as journal_begin, but won't block on the current
-**                  transaction regardless of age.  Don't ever call
-**                  this.  Ever.  There are only two places it should be
-**                  called from, and they are both inside this file.
-**
-** journal_mark_dirty -- adds blocks into this transaction.  clears any flags
-**                       that might make them get sent to disk
-**                       and then marks them BH_JDirty.  Puts the buffer head
-**                       into the current transaction hash.
-**
-** journal_end -- if the current transaction is batchable, it does nothing
-**                   otherwise, it could do an async/synchronous commit, or
-**                   a full flush of all log and real blocks in the
-**                   transaction.
-**
-** flush_old_commits -- if the current transaction is too old, it is ended and
-**                      commit blocks are sent to disk.  Forces commit blocks
-**                      to disk for all backgrounded commits that have been
-**                      around too long.
-**		     -- Note, if you call this as an immediate flush from
-**		        from within kupdate, it will ignore the immediate flag
-*/
+ * Write ahead logging implementation copyright Chris Mason 2000
+ *
+ * The background commits make this code very interrelated, and
+ * overly complex.  I need to rethink things a bit....The major players:
+ *
+ * journal_begin -- call with the number of blocks you expect to log.
+ *                  If the current transaction is too
+ *		    old, it will block until the current transaction is
+ *		    finished, and then start a new one.
+ *		    Usually, your transaction will get joined in with
+ *                  previous ones for speed.
+ *
+ * journal_join  -- same as journal_begin, but won't block on the current
+ *                  transaction regardless of age.  Don't ever call
+ *                  this.  Ever.  There are only two places it should be
+ *                  called from, and they are both inside this file.
+ *
+ * journal_mark_dirty -- adds blocks into this transaction.  clears any flags
+ *                       that might make them get sent to disk
+ *                       and then marks them BH_JDirty.  Puts the buffer head
+ *                       into the current transaction hash.
+ *
+ * journal_end -- if the current transaction is batchable, it does nothing
+ *                   otherwise, it could do an async/synchronous commit, or
+ *                   a full flush of all log and real blocks in the
+ *                   transaction.
+ *
+ * flush_old_commits -- if the current transaction is too old, it is ended and
+ *                      commit blocks are sent to disk.  Forces commit blocks
+ *                      to disk for all backgrounded commits that have been
+ *                      around too long.
+ *		     -- Note, if you call this as an immediate flush from
+ *		        from within kupdate, it will ignore the immediate flag
+ */
 
 #include <linux/time.h>
 #include <linux/semaphore.h>
@@ -58,16 +58,19 @@
 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
                                j_working_list))
 
-#define JOURNAL_TRANS_HALF 1018	/* must be correct to keep the desc and commit
-				   structs at 4k */
+/* must be correct to keep the desc and commit structs at 4k */
+#define JOURNAL_TRANS_HALF 1018
 #define BUFNR 64		/*read ahead */
 
 /* cnode stat bits.  Move these into reiserfs_fs.h */
 
-#define BLOCK_FREED 2		/* this block was freed, and can't be written.  */
-#define BLOCK_FREED_HOLDER 3	/* this block was freed during this transaction, and can't be written */
+/* this block was freed, and can't be written.  */
+#define BLOCK_FREED 2
+/* this block was freed during this transaction, and can't be written */
+#define BLOCK_FREED_HOLDER 3
 
-#define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */
+/* used in flush_journal_list */
+#define BLOCK_NEEDS_FLUSH 4
 #define BLOCK_DIRTIED 5
 
 /* journal list state bits */
@@ -100,8 +103,10 @@
 /* values for join in do_journal_begin_r */
 enum {
 	JBEGIN_REG = 0,		/* regular journal begin */
-	JBEGIN_JOIN = 1,	/* join the running transaction if at all possible */
-	JBEGIN_ABORT = 2,	/* called from cleanup code, ignores aborted flag */
+	/* join the running transaction if at all possible */
+	JBEGIN_JOIN = 1,
+	/* called from cleanup code, ignores aborted flag */
+	JBEGIN_ABORT = 2,
 };
 
 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
@@ -116,10 +121,11 @@
 }
 
 /*
-** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
-** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
-** more details.
-*/
+ * clears BH_Dirty and sticks the buffer on the clean list.  Called because
+ * I can't allow refile_buffer to make schedule happen after I've freed a
+ * block.  Look at remove_from_transaction and journal_mark_freed for
+ * more details.
+ */
 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
 {
 	if (bh) {
@@ -197,7 +203,8 @@
 			list_add(&bn->list, &journal->j_bitmap_nodes);
 			journal->j_free_bitmap_nodes++;
 		} else {
-			break;	/* this is ok, we'll try again when more are needed */
+			/* this is ok, we'll try again when more are needed */
+			break;
 		}
 	}
 }
@@ -232,8 +239,8 @@
 }
 
 /*
-** only call this on FS unmount.
-*/
+ * only call this on FS unmount.
+ */
 static int free_list_bitmaps(struct super_block *sb,
 			     struct reiserfs_list_bitmap *jb_array)
 {
@@ -268,9 +275,9 @@
 }
 
 /*
-** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
-** jb_array is the array to be filled in.
-*/
+ * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
+ * jb_array is the array to be filled in.
+ */
 int reiserfs_allocate_list_bitmaps(struct super_block *sb,
 				   struct reiserfs_list_bitmap *jb_array,
 				   unsigned int bmap_nr)
@@ -299,9 +306,9 @@
 }
 
 /*
-** find an available list bitmap.  If you can't find one, flush a commit list
-** and try again
-*/
+ * find an available list bitmap.  If you can't find one, flush a commit list
+ * and try again
+ */
 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
 						    struct reiserfs_journal_list
 						    *jl)
@@ -325,18 +332,18 @@
 			break;
 		}
 	}
-	if (jb->journal_list) {	/* double check to make sure if flushed correctly */
+	/* double check to make sure if flushed correctly */
+	if (jb->journal_list)
 		return NULL;
-	}
 	jb->journal_list = jl;
 	return jb;
 }
 
 /*
-** allocates a new chunk of X nodes, and links them all together as a list.
-** Uses the cnode->next and cnode->prev pointers
-** returns NULL on failure
-*/
+ * allocates a new chunk of X nodes, and links them all together as a list.
+ * Uses the cnode->next and cnode->prev pointers
+ * returns NULL on failure
+ */
 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
 {
 	struct reiserfs_journal_cnode *head;
@@ -358,9 +365,7 @@
 	return head;
 }
 
-/*
-** pulls a cnode off the free list, or returns NULL on failure
-*/
+/* pulls a cnode off the free list, or returns NULL on failure */
 static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
 {
 	struct reiserfs_journal_cnode *cn;
@@ -386,8 +391,8 @@
 }
 
 /*
-** returns a cnode to the free list
-*/
+ * returns a cnode to the free list
+ */
 static void free_cnode(struct super_block *sb,
 		       struct reiserfs_journal_cnode *cn)
 {
@@ -412,7 +417,10 @@
 	clear_buffer_journal_restore_dirty(bh);
 }
 
-/* return a cnode with same dev, block number and size in table, or null if not found */
+/*
+ * return a cnode with same dev, block number and size in table,
+ * or null if not found
+ */
 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
 								  super_block
 								  *sb,
@@ -432,23 +440,24 @@
 }
 
 /*
-** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
-** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
-** being overwritten by a replay after crashing.
-**
-** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
-** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
-** sure you never write the block without logging it.
-**
-** next_zero_bit is a suggestion about the next block to try for find_forward.
-** when bl is rejected because it is set in a journal list bitmap, we search
-** for the next zero bit in the bitmap that rejected bl.  Then, we return that
-** through next_zero_bit for find_forward to try.
-**
-** Just because we return something in next_zero_bit does not mean we won't
-** reject it on the next call to reiserfs_in_journal
-**
-*/
+ * this actually means 'can this block be reallocated yet?'.  If you set
+ * search_all, a block can only be allocated if it is not in the current
+ * transaction, was not freed by the current transaction, and has no chance
+ * of ever being overwritten by a replay after crashing.
+ *
+ * If you don't set search_all, a block can only be allocated if it is not
+ * in the current transaction.  Since deleting a block removes it from the
+ * current transaction, this case should never happen.  If you don't set
+ * search_all, make sure you never write the block without logging it.
+ *
+ * next_zero_bit is a suggestion about the next block to try for find_forward.
+ * when bl is rejected because it is set in a journal list bitmap, we search
+ * for the next zero bit in the bitmap that rejected bl.  Then, we return
+ * that through next_zero_bit for find_forward to try.
+ *
+ * Just because we return something in next_zero_bit does not mean we won't
+ * reject it on the next call to reiserfs_in_journal
+ */
 int reiserfs_in_journal(struct super_block *sb,
 			unsigned int bmap_nr, int bit_nr, int search_all,
 			b_blocknr_t * next_zero_bit)
@@ -462,9 +471,11 @@
 	*next_zero_bit = 0;	/* always start this at zero. */
 
 	PROC_INFO_INC(sb, journal.in_journal);
-	/* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
-	 ** if we crash before the transaction that freed it commits,  this transaction won't
-	 ** have committed either, and the block will never be written
+	/*
+	 * If we aren't doing a search_all, this is a metablock, and it
+	 * will be logged before use.  if we crash before the transaction
+	 * that freed it commits,  this transaction won't have committed
+	 * either, and the block will never be written
 	 */
 	if (search_all) {
 		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
@@ -504,8 +515,7 @@
 	return 0;
 }
 
-/* insert cn into table
-*/
+/* insert cn into table */
 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
 				       struct reiserfs_journal_cnode *cn)
 {
@@ -551,10 +561,10 @@
 }
 
 /*
-** this used to be much more involved, and I'm keeping it just in case things get ugly again.
-** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
-** transaction.
-*/
+ * this used to be much more involved, and I'm keeping it just in case
+ * things get ugly again.  it gets called by flush_commit_list, and
+ * cleans up any data stored about blocks freed during a transaction.
+ */
 static void cleanup_freed_for_journal_list(struct super_block *sb,
 					   struct reiserfs_journal_list *jl)
 {
@@ -753,7 +763,8 @@
 		get_bh(bh);
 		jh = alloc_jh();
 		spin_lock(&j->j_dirty_buffers_lock);
-		/* buffer must be locked for __add_jh, should be able to have
+		/*
+		 * buffer must be locked for __add_jh, should be able to have
 		 * two adds at the same time
 		 */
 		BUG_ON(bh->b_private);
@@ -811,7 +822,8 @@
 			spin_lock(lock);
 			goto loop_next;
 		}
-		/* in theory, dirty non-uptodate buffers should never get here,
+		/*
+		 * in theory, dirty non-uptodate buffers should never get here,
 		 * but the upper layer io error paths still have a few quirks.
 		 * Handle them here as gracefully as we can
 		 */
@@ -849,13 +861,14 @@
 		if (!buffer_uptodate(bh)) {
 			ret = -EIO;
 		}
-		/* ugly interaction with invalidatepage here.
-		 * reiserfs_invalidate_page will pin any buffer that has a valid
-		 * journal head from an older transaction.  If someone else sets
-		 * our buffer dirty after we write it in the first loop, and
-		 * then someone truncates the page away, nobody will ever write
-		 * the buffer. We're safe if we write the page one last time
-		 * after freeing the journal header.
+		/*
+		 * ugly interaction with invalidatepage here.
+		 * reiserfs_invalidate_page will pin any buffer that has a
+		 * valid journal head from an older transaction.  If someone
+		 * else sets our buffer dirty after we write it in the first
+		 * loop, and then someone truncates the page away, nobody
+		 * will ever write the buffer. We're safe if we write the
+		 * page one last time after freeing the journal header.
 		 */
 		if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
 			spin_unlock(lock);
@@ -916,9 +929,11 @@
 				if (!journal_list_still_alive(s, trans_id))
 					return 1;
 
-				/* the one we just flushed is gone, this means all
-				 * older lists are also gone, so first_jl is no longer
-				 * valid either.  Go back to the beginning.
+				/*
+				 * the one we just flushed is gone, this means
+				 * all older lists are also gone, so first_jl
+				 * is no longer valid either.  Go back to the
+				 * beginning.
 				 */
 				if (!journal_list_still_alive
 				    (s, other_trans_id)) {
@@ -951,12 +966,12 @@
 }
 
 /*
-** if this journal list still has commit blocks unflushed, send them to disk.
-**
-** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
-** Before the commit block can by written, every other log block must be safely on disk
-**
-*/
+ * if this journal list still has commit blocks unflushed, send them to disk.
+ *
+ * log areas must be flushed in order (transaction 2 can't commit before
+ * transaction 1) Before the commit block can by written, every other log
+ * block must be safely on disk
+ */
 static int flush_commit_list(struct super_block *s,
 			     struct reiserfs_journal_list *jl, int flushall)
 {
@@ -975,8 +990,9 @@
 		return 0;
 	}
 
-	/* before we can put our commit blocks on disk, we have to make sure everyone older than
-	 ** us is on disk too
+	/*
+	 * before we can put our commit blocks on disk, we have to make
+	 * sure everyone older than us is on disk too
 	 */
 	BUG_ON(jl->j_len <= 0);
 	BUG_ON(trans_id == journal->j_trans_id);
@@ -984,7 +1000,10 @@
 	get_journal_list(jl);
 	if (flushall) {
 		if (flush_older_commits(s, jl) == 1) {
-			/* list disappeared during flush_older_commits.  return */
+			/*
+			 * list disappeared during flush_older_commits.
+			 * return
+			 */
 			goto put_jl;
 		}
 	}
@@ -1056,9 +1075,10 @@
 		depth = reiserfs_write_unlock_nested(s);
 		__wait_on_buffer(tbh);
 		reiserfs_write_lock_nested(s, depth);
-		// since we're using ll_rw_blk above, it might have skipped over
-		// a locked buffer.  Double check here
-		//
+		/*
+		 * since we're using ll_rw_blk above, it might have skipped
+		 * over a locked buffer.  Double check here
+		 */
 		/* redundant, sync_dirty_buffer() checks */
 		if (buffer_dirty(tbh)) {
 			depth = reiserfs_write_unlock_nested(s);
@@ -1072,17 +1092,21 @@
 #endif
 			retval = -EIO;
 		}
-		put_bh(tbh);	/* once for journal_find_get_block */
-		put_bh(tbh);	/* once due to original getblk in do_journal_end */
+		/* once for journal_find_get_block */
+		put_bh(tbh);
+		/* once due to original getblk in do_journal_end */
+		put_bh(tbh);
 		atomic_dec(&(jl->j_commit_left));
 	}
 
 	BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
 
-	/* If there was a write error in the journal - we can't commit
+	/*
+	 * If there was a write error in the journal - we can't commit
 	 * this transaction - it will be invalid and, if successful,
 	 * will just end up propagating the write error out to
-	 * the file system. */
+	 * the file system.
+	 */
 	if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
 		if (buffer_dirty(jl->j_commit_bh))
 			BUG();
@@ -1095,9 +1119,11 @@
 		reiserfs_write_lock_nested(s, depth);
 	}
 
-	/* If there was a write error in the journal - we can't commit this
+	/*
+	 * If there was a write error in the journal - we can't commit this
 	 * transaction - it will be invalid and, if successful, will just end
-	 * up propagating the write error out to the filesystem. */
+	 * up propagating the write error out to the filesystem.
+	 */
 	if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
 #ifdef CONFIG_REISERFS_CHECK
 		reiserfs_warning(s, "journal-615", "buffer write failed");
@@ -1112,7 +1138,10 @@
 	}
 	journal->j_last_commit_id = jl->j_trans_id;
 
-	/* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
+	/*
+	 * now, every commit block is on the disk.  It is safe to allow
+	 * blocks freed during this transaction to be reallocated
+	 */
 	cleanup_freed_for_journal_list(s, jl);
 
 	retval = retval ? retval : journal->j_errno;
@@ -1136,9 +1165,9 @@
 }
 
 /*
-** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or
-** returns NULL if it can't find anything
-*/
+ * flush_journal_list frequently needs to find a newer transaction for a
+ * given block.  This does that, or returns NULL if it can't find anything
+ */
 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
 							  reiserfs_journal_cnode
 							  *cn)
@@ -1162,10 +1191,11 @@
 				int);
 
 /*
-** once all the real blocks have been flushed, it is safe to remove them from the
-** journal list for this transaction.  Aside from freeing the cnode, this also allows the
-** block to be reallocated for data blocks if it had been deleted.
-*/
+ * once all the real blocks have been flushed, it is safe to remove them
+ * from the journal list for this transaction.  Aside from freeing the
+ * cnode, this also allows the block to be reallocated for data blocks
+ * if it had been deleted.
+ */
 static void remove_all_from_journal_list(struct super_block *sb,
 					 struct reiserfs_journal_list *jl,
 					 int debug)
@@ -1174,8 +1204,9 @@
 	struct reiserfs_journal_cnode *cn, *last;
 	cn = jl->j_realblock;
 
-	/* which is better, to lock once around the whole loop, or
-	 ** to lock for each call to remove_journal_hash?
+	/*
+	 * which is better, to lock once around the whole loop, or
+	 * to lock for each call to remove_journal_hash?
 	 */
 	while (cn) {
 		if (cn->blocknr != 0) {
@@ -1197,12 +1228,13 @@
 }
 
 /*
-** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
-** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
-** releasing blocks in this transaction for reuse as data blocks.
-** called by flush_journal_list, before it calls remove_all_from_journal_list
-**
-*/
+ * if this timestamp is greater than the timestamp we wrote last to the
+ * header block, write it to the header block.  once this is done, I can
+ * safely say the log area for this transaction won't ever be replayed,
+ * and I can start releasing blocks in this transaction for reuse as data
+ * blocks.  called by flush_journal_list, before it calls
+ * remove_all_from_journal_list
+ */
 static int _update_journal_header_block(struct super_block *sb,
 					unsigned long offset,
 					unsigned int trans_id)
@@ -1272,7 +1304,8 @@
 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
 	unsigned int trans_id = jl->j_trans_id;
 
-	/* we know we are the only ones flushing things, no extra race
+	/*
+	 * we know we are the only ones flushing things, no extra race
 	 * protection is required.
 	 */
       restart:
@@ -1302,15 +1335,16 @@
 	}
 }
 
-/* flush a journal list, both commit and real blocks
-**
-** always set flushall to 1, unless you are calling from inside
-** flush_journal_list
-**
-** IMPORTANT.  This can only be called while there are no journal writers,
-** and the journal is locked.  That means it can only be called from
-** do_journal_end, or by journal_release
-*/
+/*
+ * flush a journal list, both commit and real blocks
+ *
+ * always set flushall to 1, unless you are calling from inside
+ * flush_journal_list
+ *
+ * IMPORTANT.  This can only be called while there are no journal writers,
+ * and the journal is locked.  That means it can only be called from
+ * do_journal_end, or by journal_release
+ */
 static int flush_journal_list(struct super_block *s,
 			      struct reiserfs_journal_list *jl, int flushall)
 {
@@ -1352,8 +1386,9 @@
 		goto flush_older_and_return;
 	}
 
-	/* start by putting the commit list on disk.  This will also flush
-	 ** the commit lists of any olders transactions
+	/*
+	 * start by putting the commit list on disk.  This will also flush
+	 * the commit lists of any olders transactions
 	 */
 	flush_commit_list(s, jl, 1);
 
@@ -1367,8 +1402,9 @@
 		goto flush_older_and_return;
 	}
 
-	/* loop through each cnode, see if we need to write it,
-	 ** or wait on a more recent transaction, or just ignore it
+	/*
+	 * loop through each cnode, see if we need to write it,
+	 * or wait on a more recent transaction, or just ignore it
 	 */
 	if (atomic_read(&(journal->j_wcount)) != 0) {
 		reiserfs_panic(s, "journal-844", "journal list is flushing, "
@@ -1384,20 +1420,25 @@
 			goto free_cnode;
 		}
 
-		/* This transaction failed commit. Don't write out to the disk */
+		/*
+		 * This transaction failed commit.
+		 * Don't write out to the disk
+		 */
 		if (!(jl->j_state & LIST_DIRTY))
 			goto free_cnode;
 
 		pjl = find_newer_jl_for_cn(cn);
-		/* the order is important here.  We check pjl to make sure we
-		 ** don't clear BH_JDirty_wait if we aren't the one writing this
-		 ** block to disk
+		/*
+		 * the order is important here.  We check pjl to make sure we
+		 * don't clear BH_JDirty_wait if we aren't the one writing this
+		 * block to disk
 		 */
 		if (!pjl && cn->bh) {
 			saved_bh = cn->bh;
 
-			/* we do this to make sure nobody releases the buffer while
-			 ** we are working with it
+			/*
+			 * we do this to make sure nobody releases the
+			 * buffer while we are working with it
 			 */
 			get_bh(saved_bh);
 
@@ -1406,13 +1447,17 @@
 				was_jwait = 1;
 				was_dirty = 1;
 			} else if (can_dirty(cn)) {
-				/* everything with !pjl && jwait should be writable */
+				/*
+				 * everything with !pjl && jwait
+				 * should be writable
+				 */
 				BUG();
 			}
 		}
 
-		/* if someone has this block in a newer transaction, just make
-		 ** sure they are committed, and don't try writing it to disk
+		/*
+		 * if someone has this block in a newer transaction, just make
+		 * sure they are committed, and don't try writing it to disk
 		 */
 		if (pjl) {
 			if (atomic_read(&pjl->j_commit_left))
@@ -1420,16 +1465,18 @@
 			goto free_cnode;
 		}
 
-		/* bh == NULL when the block got to disk on its own, OR,
-		 ** the block got freed in a future transaction
+		/*
+		 * bh == NULL when the block got to disk on its own, OR,
+		 * the block got freed in a future transaction
 		 */
 		if (saved_bh == NULL) {
 			goto free_cnode;
 		}
 
-		/* this should never happen.  kupdate_one_transaction has this list
-		 ** locked while it works, so we should never see a buffer here that
-		 ** is not marked JDirty_wait
+		/*
+		 * this should never happen.  kupdate_one_transaction has
+		 * this list locked while it works, so we should never see a
+		 * buffer here that is not marked JDirty_wait
 		 */
 		if ((!was_jwait) && !buffer_locked(saved_bh)) {
 			reiserfs_warning(s, "journal-813",
@@ -1440,7 +1487,10 @@
 					 was_jwait ? ' ' : '!');
 		}
 		if (was_dirty) {
-			/* we inc again because saved_bh gets decremented at free_cnode */
+			/*
+			 * we inc again because saved_bh gets decremented
+			 * at free_cnode
+			 */
 			get_bh(saved_bh);
 			set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
 			lock_buffer(saved_bh);
@@ -1460,7 +1510,10 @@
 		last = cn;
 		cn = cn->next;
 		if (saved_bh) {
-			/* we incremented this to keep others from taking the buffer head away */
+			/*
+			 * we incremented this to keep others from
+			 * taking the buffer head away
+			 */
 			put_bh(saved_bh);
 			if (atomic_read(&(saved_bh->b_count)) < 0) {
 				reiserfs_warning(s, "journal-945",
@@ -1492,8 +1545,10 @@
 #endif
 					err = -EIO;
 				}
-				/* note, we must clear the JDirty_wait bit after the up to date
-				 ** check, otherwise we race against our flushpage routine
+				/*
+				 * note, we must clear the JDirty_wait bit
+				 * after the up to date check, otherwise we
+				 * race against our flushpage routine
 				 */
 				BUG_ON(!test_clear_buffer_journal_dirty
 				       (cn->bh));
@@ -1513,23 +1568,25 @@
 			       __func__);
       flush_older_and_return:
 
-	/* before we can update the journal header block, we _must_ flush all
-	 ** real blocks from all older transactions to disk.  This is because
-	 ** once the header block is updated, this transaction will not be
-	 ** replayed after a crash
+	/*
+	 * before we can update the journal header block, we _must_ flush all
+	 * real blocks from all older transactions to disk.  This is because
+	 * once the header block is updated, this transaction will not be
+	 * replayed after a crash
 	 */
 	if (flushall) {
 		flush_older_journal_lists(s, jl);
 	}
 
 	err = journal->j_errno;
-	/* before we can remove everything from the hash tables for this
-	 ** transaction, we must make sure it can never be replayed
-	 **
-	 ** since we are only called from do_journal_end, we know for sure there
-	 ** are no allocations going on while we are flushing journal lists.  So,
-	 ** we only need to update the journal header block for the last list
-	 ** being flushed
+	/*
+	 * before we can remove everything from the hash tables for this
+	 * transaction, we must make sure it can never be replayed
+	 *
+	 * since we are only called from do_journal_end, we know for sure there
+	 * are no allocations going on while we are flushing journal lists.  So,
+	 * we only need to update the journal header block for the last list
+	 * being flushed
 	 */
 	if (!err && flushall) {
 		err =
@@ -1554,7 +1611,8 @@
 	}
 	journal->j_last_flush_id = jl->j_trans_id;
 
-	/* not strictly required since we are freeing the list, but it should
+	/*
+	 * not strictly required since we are freeing the list, but it should
 	 * help find code using dead lists later on
 	 */
 	jl->j_len = 0;
@@ -1585,15 +1643,17 @@
 
 	cn = jl->j_realblock;
 	while (cn) {
-		/* if the blocknr == 0, this has been cleared from the hash,
-		 ** skip it
+		/*
+		 * if the blocknr == 0, this has been cleared from the hash,
+		 * skip it
 		 */
 		if (cn->blocknr == 0) {
 			goto next;
 		}
 		if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
 			struct buffer_head *tmp_bh;
-			/* we can race against journal_mark_freed when we try
+			/*
+			 * we can race against journal_mark_freed when we try
 			 * to lock_buffer(cn->bh), so we have to inc the buffer
 			 * count, and recheck things after locking
 			 */
@@ -1630,15 +1690,17 @@
 	jl->j_state |= LIST_DIRTY;
 	cn = jl->j_realblock;
 	while (cn) {
-		/* look for a more recent transaction that logged this
-		 ** buffer.  Only the most recent transaction with a buffer in
-		 ** it is allowed to send that buffer to disk
+		/*
+		 * look for a more recent transaction that logged this
+		 * buffer.  Only the most recent transaction with a buffer in
+		 * it is allowed to send that buffer to disk
 		 */
 		pjl = find_newer_jl_for_cn(cn);
 		if (!pjl && cn->blocknr && cn->bh
 		    && buffer_journal_dirty(cn->bh)) {
 			BUG_ON(!can_dirty(cn));
-			/* if the buffer is prepared, it will either be logged
+			/*
+			 * if the buffer is prepared, it will either be logged
 			 * or restored.  If restored, we need to make sure
 			 * it actually gets marked dirty
 			 */
@@ -1675,7 +1737,8 @@
 		goto done;
 	}
 
-	/* we've got j_flush_mutex held, nobody is going to delete any
+	/*
+	 * we've got j_flush_mutex held, nobody is going to delete any
 	 * of these lists out from underneath us
 	 */
 	while ((num_trans && transactions_flushed < num_trans) ||
@@ -1714,15 +1777,16 @@
 	return ret;
 }
 
-/* for o_sync and fsync heavy applications, they tend to use
-** all the journa list slots with tiny transactions.  These
-** trigger lots and lots of calls to update the header block, which
-** adds seeks and slows things down.
-**
-** This function tries to clear out a large chunk of the journal lists
-** at once, which makes everything faster since only the newest journal
-** list updates the header block
-*/
+/*
+ * for o_sync and fsync heavy applications, they tend to use
+ * all the journa list slots with tiny transactions.  These
+ * trigger lots and lots of calls to update the header block, which
+ * adds seeks and slows things down.
+ *
+ * This function tries to clear out a large chunk of the journal lists
+ * at once, which makes everything faster since only the newest journal
+ * list updates the header block
+ */
 static int flush_used_journal_lists(struct super_block *s,
 				    struct reiserfs_journal_list *jl)
 {
@@ -1759,9 +1823,11 @@
 	}
 	get_journal_list(jl);
 	get_journal_list(flush_jl);
-	/* try to find a group of blocks we can flush across all the
-	 ** transactions, but only bother if we've actually spanned
-	 ** across multiple lists
+
+	/*
+	 * try to find a group of blocks we can flush across all the
+	 * transactions, but only bother if we've actually spanned
+	 * across multiple lists
 	 */
 	if (flush_jl != jl) {
 		ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
@@ -1773,9 +1839,9 @@
 }
 
 /*
-** removes any nodes in table with name block and dev as bh.
-** only touchs the hnext and hprev pointers.
-*/
+ * removes any nodes in table with name block and dev as bh.
+ * only touchs the hnext and hprev pointers.
+ */
 void remove_journal_hash(struct super_block *sb,
 			 struct reiserfs_journal_cnode **table,
 			 struct reiserfs_journal_list *jl,
@@ -1804,7 +1870,11 @@
 			cur->blocknr = 0;
 			cur->sb = NULL;
 			cur->state = 0;
-			if (cur->bh && cur->jlist)	/* anybody who clears the cur->bh will also dec the nonzerolen */
+			/*
+			 * anybody who clears the cur->bh will also
+			 * dec the nonzerolen
+			 */
+			if (cur->bh && cur->jlist)
 				atomic_dec(&(cur->jlist->j_nonzerolen));
 			cur->bh = NULL;
 			cur->jlist = NULL;
@@ -1825,17 +1895,18 @@
 	if (journal->j_header_bh) {
 		brelse(journal->j_header_bh);
 	}
-	/* j_header_bh is on the journal dev, make sure not to release the journal
-	 * dev until we brelse j_header_bh
+	/*
+	 * j_header_bh is on the journal dev, make sure
+	 * not to release the journal dev until we brelse j_header_bh
 	 */
 	release_journal_dev(sb, journal);
 	vfree(journal);
 }
 
 /*
-** call on unmount.  Only set error to 1 if you haven't made your way out
-** of read_super() yet.  Any other caller must keep error at 0.
-*/
+ * call on unmount.  Only set error to 1 if you haven't made your way out
+ * of read_super() yet.  Any other caller must keep error at 0.
+ */
 static int do_journal_release(struct reiserfs_transaction_handle *th,
 			      struct super_block *sb, int error)
 {
@@ -1843,14 +1914,19 @@
 	int flushed = 0;
 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
 
-	/* we only want to flush out transactions if we were called with error == 0
+	/*
+	 * we only want to flush out transactions if we were
+	 * called with error == 0
 	 */
 	if (!error && !(sb->s_flags & MS_RDONLY)) {
 		/* end the current trans */
 		BUG_ON(!th->t_trans_id);
 		do_journal_end(th, sb, 10, FLUSH_ALL);
 
-		/* make sure something gets logged to force our way into the flush code */
+		/*
+		 * make sure something gets logged to force
+		 * our way into the flush code
+		 */
 		if (!journal_join(&myth, sb, 1)) {
 			reiserfs_prepare_for_journal(sb,
 						     SB_BUFFER_WITH_SB(sb),
@@ -1894,25 +1970,24 @@
 	return 0;
 }
 
-/*
-** call on unmount.  flush all journal trans, release all alloc'd ram
-*/
+/* * call on unmount.  flush all journal trans, release all alloc'd ram */
 int journal_release(struct reiserfs_transaction_handle *th,
 		    struct super_block *sb)
 {
 	return do_journal_release(th, sb, 0);
 }
 
-/*
-** only call from an error condition inside reiserfs_read_super!
-*/
+/* only call from an error condition inside reiserfs_read_super!  */
 int journal_release_error(struct reiserfs_transaction_handle *th,
 			  struct super_block *sb)
 {
 	return do_journal_release(th, sb, 1);
 }
 
-/* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
+/*
+ * compares description block with commit block.
+ * returns 1 if they differ, 0 if they are the same
+ */
 static int journal_compare_desc_commit(struct super_block *sb,
 				       struct reiserfs_journal_desc *desc,
 				       struct reiserfs_journal_commit *commit)
@@ -1926,11 +2001,12 @@
 	return 0;
 }
 
-/* returns 0 if it did not find a description block
-** returns -1 if it found a corrupt commit block
-** returns 1 if both desc and commit were valid
-** NOTE: only called during fs mount
-*/
+/*
+ * returns 0 if it did not find a description block
+ * returns -1 if it found a corrupt commit block
+ * returns 1 if both desc and commit were valid
+ * NOTE: only called during fs mount
+ */
 static int journal_transaction_is_valid(struct super_block *sb,
 					struct buffer_head *d_bh,
 					unsigned int *oldest_invalid_trans_id,
@@ -1976,7 +2052,10 @@
 		}
 		offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
 
-		/* ok, we have a journal description block, lets see if the transaction was valid */
+		/*
+		 * ok, we have a journal description block,
+		 * let's see if the transaction was valid
+		 */
 		c_bh =
 		    journal_bread(sb,
 				  SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
@@ -2028,11 +2107,11 @@
 }
 
 /*
-** given the start, and values for the oldest acceptable transactions,
-** this either reads in a replays a transaction, or returns because the
-** transaction is invalid, or too old.
-** NOTE: only called during fs mount
-*/
+ * given the start, and values for the oldest acceptable transactions,
+ * this either reads in a replays a transaction, or returns because the
+ * transaction is invalid, or too old.
+ * NOTE: only called during fs mount
+ */
 static int journal_read_transaction(struct super_block *sb,
 				    unsigned long cur_dblock,
 				    unsigned long oldest_start,
@@ -2106,7 +2185,10 @@
 	}
 
 	trans_id = get_desc_trans_id(desc);
-	/* now we know we've got a good transaction, and it was inside the valid time ranges */
+	/*
+	 * now we know we've got a good transaction, and it was
+	 * inside the valid time ranges
+	 */
 	log_blocks = kmalloc(get_desc_trans_len(desc) *
 			     sizeof(struct buffer_head *), GFP_NOFS);
 	real_blocks = kmalloc(get_desc_trans_len(desc) *
@@ -2213,7 +2295,10 @@
 		       "journal-1095: setting journal " "start to offset %ld",
 		       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
 
-	/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
+	/*
+	 * init starting values for the first transaction, in case
+	 * this is the last transaction to be replayed.
+	 */
 	journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
 	journal->j_last_flush_trans_id = trans_id;
 	journal->j_trans_id = trans_id + 1;
@@ -2227,12 +2312,14 @@
 	return 0;
 }
 
-/* This function reads blocks starting from block and to max_block of bufsize
-   size (but no more than BUFNR blocks at a time). This proved to improve
-   mounting speed on self-rebuilding raid5 arrays at least.
-   Right now it is only used from journal code. But later we might use it
-   from other places.
-   Note: Do not use journal_getblk/sb_getblk functions here! */
+/*
+ * This function reads blocks starting from block and to max_block of bufsize
+ * size (but no more than BUFNR blocks at a time). This proved to improve
+ * mounting speed on self-rebuilding raid5 arrays at least.
+ * Right now it is only used from journal code. But later we might use it
+ * from other places.
+ * Note: Do not use journal_getblk/sb_getblk functions here!
+ */
 static struct buffer_head *reiserfs_breada(struct block_device *dev,
 					   b_blocknr_t block, int bufsize,
 					   b_blocknr_t max_block)
@@ -2271,15 +2358,17 @@
 }
 
 /*
-** read and replay the log
-** on a clean unmount, the journal header's next unflushed pointer will
-** be to an invalid transaction.  This tests that before finding all the
-** transactions in the log, which makes normal mount times fast.
-** After a crash, this starts with the next unflushed transaction, and
-** replays until it finds one too old, or invalid.
-** On exit, it sets things up so the first transaction will work correctly.
-** NOTE: only called during fs mount
-*/
+ * read and replay the log
+ * on a clean unmount, the journal header's next unflushed pointer will be
+ * to an invalid transaction.  This tests that before finding all the
+ * transactions in the log, which makes normal mount times fast.
+ *
+ * After a crash, this starts with the next unflushed transaction, and
+ * replays until it finds one too old, or invalid.
+ *
+ * On exit, it sets things up so the first transaction will work correctly.
+ * NOTE: only called during fs mount
+ */
 static int journal_read(struct super_block *sb)
 {
 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
@@ -2303,9 +2392,10 @@
 		      bdevname(journal->j_dev_bd, b));
 	start = get_seconds();
 
-	/* step 1, read in the journal header block.  Check the transaction it says
-	 ** is the first unflushed, and if that transaction is not valid,
-	 ** replay is done
+	/*
+	 * step 1, read in the journal header block.  Check the transaction
+	 * it says is the first unflushed, and if that transaction is not
+	 * valid, replay is done
 	 */
 	journal->j_header_bh = journal_bread(sb,
 					     SB_ONDISK_JOURNAL_1st_BLOCK(sb)
@@ -2329,9 +2419,10 @@
 			       le32_to_cpu(jh->j_last_flush_trans_id));
 		valid_journal_header = 1;
 
-		/* now, we try to read the first unflushed offset.  If it is not valid,
-		 ** there is nothing more we can do, and it makes no sense to read
-		 ** through the whole log.
+		/*
+		 * now, we try to read the first unflushed offset.  If it
+		 * is not valid, there is nothing more we can do, and it
+		 * makes no sense to read through the whole log.
 		 */
 		d_bh =
 		    journal_bread(sb,
@@ -2345,15 +2436,19 @@
 		goto start_log_replay;
 	}
 
-	/* ok, there are transactions that need to be replayed.  start with the first log block, find
-	 ** all the valid transactions, and pick out the oldest.
+	/*
+	 * ok, there are transactions that need to be replayed.  start
+	 * with the first log block, find all the valid transactions, and
+	 * pick out the oldest.
 	 */
 	while (continue_replay
 	       && cur_dblock <
 	       (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
 		SB_ONDISK_JOURNAL_SIZE(sb))) {
-		/* Note that it is required for blocksize of primary fs device and journal
-		   device to be the same */
+		/*
+		 * Note that it is required for blocksize of primary fs
+		 * device and journal device to be the same
+		 */
 		d_bh =
 		    reiserfs_breada(journal->j_dev_bd, cur_dblock,
 				    sb->s_blocksize,
@@ -2431,9 +2526,11 @@
 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
 			       "journal-1225: No valid " "transactions found");
 	}
-	/* j_start does not get set correctly if we don't replay any transactions.
-	 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
-	 ** copy the trans_id from the header
+	/*
+	 * j_start does not get set correctly if we don't replay any
+	 * transactions.  if we had a valid journal_header, set j_start
+	 * to the first unflushed transaction value, copy the trans_id
+	 * from the header
 	 */
 	if (valid_journal_header && replay_count == 0) {
 		journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
@@ -2462,8 +2559,9 @@
 	    _update_journal_header_block(sb, journal->j_start,
 					 journal->j_last_flush_trans_id)) {
 		reiserfs_write_unlock(sb);
-		/* replay failed, caller must call free_journal_ram and abort
-		 ** the mount
+		/*
+		 * replay failed, caller must call free_journal_ram and abort
+		 * the mount
 		 */
 		return -1;
 	}
@@ -2556,7 +2654,7 @@
 	return 0;
 }
 
-/**
+/*
  * When creating/tuning a file system user can assign some
  * journal params within boundaries which depend on the ratio
  * blocksize/standard_blocksize.
@@ -2574,8 +2672,7 @@
 				     struct reiserfs_journal *journal)
 {
         if (journal->j_trans_max) {
-	        /* Non-default journal params.
-		   Do sanity check for them. */
+		/* Non-default journal params.  Do sanity check for them. */
 	        int ratio = 1;
 		if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
 		        ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
@@ -2597,10 +2694,12 @@
 			return 1;
 		}
 	} else {
-		/* Default journal params.
-                   The file system was created by old version
-		   of mkreiserfs, so some fields contain zeros,
-		   and we need to advise proper values for them */
+		/*
+		 * Default journal params.
+		 * The file system was created by old version
+		 * of mkreiserfs, so some fields contain zeros,
+		 * and we need to advise proper values for them
+		 */
 		if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
 			reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
 					 sb->s_blocksize);
@@ -2613,9 +2712,7 @@
 	return 0;
 }
 
-/*
-** must be called once on fs mount.  calls journal_read for you
-*/
+/* must be called once on fs mount.  calls journal_read for you */
 int journal_init(struct super_block *sb, const char *j_dev_name,
 		 int old_format, unsigned int commit_max_age)
 {
@@ -2654,8 +2751,10 @@
 						 REISERFS_DISK_OFFSET_IN_BYTES /
 						 sb->s_blocksize + 2);
 
-	/* Sanity check to see is the standard journal fitting within first bitmap
-	   (actual for small blocksizes) */
+	/*
+	 * Sanity check to see is the standard journal fitting
+	 * within first bitmap (actual for small blocksizes)
+	 */
 	if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
 	    (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
 	     SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
@@ -2803,10 +2902,10 @@
 }
 
 /*
-** test for a polite end of the current transaction.  Used by file_write, and should
-** be used by delete to make sure they don't write more than can fit inside a single
-** transaction
-*/
+ * test for a polite end of the current transaction.  Used by file_write,
+ * and should be used by delete to make sure they don't write more than
+ * can fit inside a single transaction
+ */
 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
 				   int new_alloc)
 {
@@ -2829,8 +2928,7 @@
 	return 0;
 }
 
-/* this must be called inside a transaction
-*/
+/* this must be called inside a transaction */
 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
 {
 	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
@@ -2840,8 +2938,7 @@
 	return;
 }
 
-/* this must be called without a transaction started
-*/
+/* this must be called without a transaction started */
 void reiserfs_allow_writes(struct super_block *s)
 {
 	struct reiserfs_journal *journal = SB_JOURNAL(s);
@@ -2849,8 +2946,7 @@
 	wake_up(&journal->j_join_wait);
 }
 
-/* this must be called without a transaction started
-*/
+/* this must be called without a transaction started */
 void reiserfs_wait_on_write_block(struct super_block *s)
 {
 	struct reiserfs_journal *journal = SB_JOURNAL(s);
@@ -2912,11 +3008,12 @@
 	}
 }
 
-/* join == true if you must join an existing transaction.
-** join == false if you can deal with waiting for others to finish
-**
-** this will block until the transaction is joinable.  send the number of blocks you
-** expect to use in nblocks.
+/*
+ * join == true if you must join an existing transaction.
+ * join == false if you can deal with waiting for others to finish
+ *
+ * this will block until the transaction is joinable.  send the number of
+ * blocks you expect to use in nblocks.
 */
 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
 			      struct super_block *sb, unsigned long nblocks,
@@ -2957,9 +3054,11 @@
 	}
 	now = get_seconds();
 
-	/* if there is no room in the journal OR
-	 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
-	 ** we don't sleep if there aren't other writers
+	/*
+	 * if there is no room in the journal OR
+	 * if this transaction is too old, and we weren't called joinable,
+	 * wait for it to finish before beginning we don't sleep if there
+	 * aren't other writers
 	 */
 
 	if ((!join && journal->j_must_wait > 0) ||
@@ -2973,7 +3072,8 @@
 	    || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
 
 		old_trans_id = journal->j_trans_id;
-		unlock_journal(sb);	/* allow others to finish this transaction */
+		/* allow others to finish this transaction */
+		unlock_journal(sb);
 
 		if (!join && (journal->j_len_alloc + nblocks + 2) >=
 		    journal->j_max_batch &&
@@ -2985,8 +3085,9 @@
 				goto relock;
 			}
 		}
-		/* don't mess with joining the transaction if all we have to do is
-		 * wait for someone else to do a commit
+		/*
+		 * don't mess with joining the transaction if all we
+		 * have to do is wait for someone else to do a commit
 		 */
 		if (atomic_read(&journal->j_jlock)) {
 			while (journal->j_trans_id == old_trans_id &&
@@ -3027,9 +3128,11 @@
 
       out_fail:
 	memset(th, 0, sizeof(*th));
-	/* Re-set th->t_super, so we can properly keep track of how many
+	/*
+	 * Re-set th->t_super, so we can properly keep track of how many
 	 * persistent transactions there are. We need to do this so if this
-	 * call is part of a failed restart_transaction, we can free it later */
+	 * call is part of a failed restart_transaction, we can free it later
+	 */
 	th->t_super = sb;
 	return retval;
 }
@@ -3042,14 +3145,15 @@
 	int ret;
 	struct reiserfs_transaction_handle *th;
 
-	/* if we're nesting into an existing transaction.  It will be
-	 ** persistent on its own
+	/*
+	 * if we're nesting into an existing transaction.  It will be
+	 * persistent on its own
 	 */
 	if (reiserfs_transaction_running(s)) {
 		th = current->journal_info;
 		th->t_refcount++;
 		BUG_ON(th->t_refcount < 2);
-		
+
 		return th;
 	}
 	th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
@@ -3085,8 +3189,9 @@
 {
 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
 
-	/* this keeps do_journal_end from NULLing out the current->journal_info
-	 ** pointer
+	/*
+	 * this keeps do_journal_end from NULLing out the
+	 * current->journal_info pointer
 	 */
 	th->t_handle_save = cur_th;
 	BUG_ON(cur_th && cur_th->t_refcount > 1);
@@ -3098,8 +3203,9 @@
 {
 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
 
-	/* this keeps do_journal_end from NULLing out the current->journal_info
-	 ** pointer
+	/*
+	 * this keeps do_journal_end from NULLing out the
+	 * current->journal_info pointer
 	 */
 	th->t_handle_save = cur_th;
 	BUG_ON(cur_th && cur_th->t_refcount > 1);
@@ -3125,9 +3231,10 @@
 						 "journal_info != 0");
 			return 0;
 		} else {
-			/* we've ended up with a handle from a different filesystem.
-			 ** save it and restore on journal_end.  This should never
-			 ** really happen...
+			/*
+			 * we've ended up with a handle from a different
+			 * filesystem.  save it and restore on journal_end.
+			 * This should never really happen...
 			 */
 			reiserfs_warning(sb, "clm-2100",
 					 "nesting info a different FS");
@@ -3140,9 +3247,10 @@
 	ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
 	BUG_ON(current->journal_info != th);
 
-	/* I guess this boils down to being the reciprocal of clm-2100 above.
-	 * If do_journal_begin_r fails, we need to put it back, since journal_end
-	 * won't be called to do it. */
+	/*
+	 * I guess this boils down to being the reciprocal of clm-2100 above.
+	 * If do_journal_begin_r fails, we need to put it back, since
+	 * journal_end won't be called to do it. */
 	if (ret)
 		current->journal_info = th->t_handle_save;
 	else
@@ -3152,14 +3260,15 @@
 }
 
 /*
-** puts bh into the current transaction.  If it was already there, reorders removes the
-** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
-**
-** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
-** transaction is committed.
-**
-** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
-*/
+ * puts bh into the current transaction.  If it was already there, reorders
+ * removes the old pointers from the hash, and puts new ones in (to make
+ * sure replay happen in the right order).
+ *
+ * if it was dirty, cleans and files onto the clean list.  I can't let it
+ * be dirty again until the transaction is committed.
+ *
+ * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+ */
 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
 		       struct super_block *sb, struct buffer_head *bh)
 {
@@ -3184,9 +3293,10 @@
 		return 0;
 	}
 
-	/* this must be turned into a panic instead of a warning.  We can't allow
-	 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
-	 ** could get to disk too early.  NOT GOOD.
+	/*
+	 * this must be turned into a panic instead of a warning.  We can't
+	 * allow a dirty or journal_dirty or locked buffer to be logged, as
+	 * some changes could get to disk too early.  NOT GOOD.
 	 */
 	if (!prepared || buffer_dirty(bh)) {
 		reiserfs_warning(sb, "journal-1777",
@@ -3205,8 +3315,10 @@
 				 atomic_read(&(journal->j_wcount)));
 		return 1;
 	}
-	/* this error means I've screwed up, and we've overflowed the transaction.
-	 ** Nothing can be done here, except make the FS readonly or panic.
+	/*
+	 * this error means I've screwed up, and we've overflowed
+	 * the transaction.  Nothing can be done here, except make the
+	 * FS readonly or panic.
 	 */
 	if (journal->j_len >= journal->j_trans_max) {
 		reiserfs_panic(th->t_super, "journal-1413",
@@ -3280,8 +3392,9 @@
 		struct reiserfs_transaction_handle *cur_th =
 		    current->journal_info;
 
-		/* we aren't allowed to close a nested transaction on a different
-		 ** filesystem from the one in the task struct
+		/*
+		 * we aren't allowed to close a nested transaction on a
+		 * different filesystem from the one in the task struct
 		 */
 		BUG_ON(cur_th->t_super != th->t_super);
 
@@ -3295,13 +3408,14 @@
 	}
 }
 
-/* removes from the current transaction, relsing and descrementing any counters.
-** also files the removed buffer directly onto the clean list
-**
-** called by journal_mark_freed when a block has been deleted
-**
-** returns 1 if it cleaned and relsed the buffer. 0 otherwise
-*/
+/*
+ * removes from the current transaction, relsing and descrementing any counters.
+ * also files the removed buffer directly onto the clean list
+ *
+ * called by journal_mark_freed when a block has been deleted
+ *
+ * returns 1 if it cleaned and relsed the buffer. 0 otherwise
+ */
 static int remove_from_transaction(struct super_block *sb,
 				   b_blocknr_t blocknr, int already_cleaned)
 {
@@ -3350,15 +3464,16 @@
 }
 
 /*
-** for any cnode in a journal list, it can only be dirtied of all the
-** transactions that include it are committed to disk.
-** this checks through each transaction, and returns 1 if you are allowed to dirty,
-** and 0 if you aren't
-**
-** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
-** blocks for a given transaction on disk
-**
-*/
+ * for any cnode in a journal list, it can only be dirtied of all the
+ * transactions that include it are committed to disk.
+ * this checks through each transaction, and returns 1 if you are allowed
+ * to dirty, and 0 if you aren't
+ *
+ * it is called by dirty_journal_list, which is called after
+ * flush_commit_list has gotten all the log blocks for a given
+ * transaction on disk
+ *
+ */
 static int can_dirty(struct reiserfs_journal_cnode *cn)
 {
 	struct super_block *sb = cn->sb;
@@ -3366,9 +3481,10 @@
 	struct reiserfs_journal_cnode *cur = cn->hprev;
 	int can_dirty = 1;
 
-	/* first test hprev.  These are all newer than cn, so any node here
-	 ** with the same block number and dev means this node can't be sent
-	 ** to disk right now.
+	/*
+	 * first test hprev.  These are all newer than cn, so any node here
+	 * with the same block number and dev means this node can't be sent
+	 * to disk right now.
 	 */
 	while (cur && can_dirty) {
 		if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
@@ -3377,8 +3493,9 @@
 		}
 		cur = cur->hprev;
 	}
-	/* then test hnext.  These are all older than cn.  As long as they
-	 ** are committed to the log, it is safe to write cn to disk
+	/*
+	 * then test hnext.  These are all older than cn.  As long as they
+	 * are committed to the log, it is safe to write cn to disk
 	 */
 	cur = cn->hnext;
 	while (cur && can_dirty) {
@@ -3392,9 +3509,10 @@
 	return can_dirty;
 }
 
-/* syncs the commit blocks, but does not force the real buffers to disk
-** will wait until the current transaction is done/committed before returning
-*/
+/*
+ * syncs the commit blocks, but does not force the real buffers to disk
+ * will wait until the current transaction is done/committed before returning
+ */
 int journal_end_sync(struct reiserfs_transaction_handle *th,
 		     struct super_block *sb, unsigned long nblocks)
 {
@@ -3411,9 +3529,7 @@
 	return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT);
 }
 
-/*
-** writeback the pending async commits to disk
-*/
+/* writeback the pending async commits to disk */
 static void flush_async_commits(struct work_struct *work)
 {
 	struct reiserfs_journal *journal =
@@ -3433,9 +3549,9 @@
 }
 
 /*
-** flushes any old transactions to disk
-** ends the current transaction if it is too old
-*/
+ * flushes any old transactions to disk
+ * ends the current transaction if it is too old
+ */
 void reiserfs_flush_old_commits(struct super_block *sb)
 {
 	time_t now;
@@ -3443,13 +3559,15 @@
 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
 
 	now = get_seconds();
-	/* safety check so we don't flush while we are replaying the log during
+	/*
+	 * safety check so we don't flush while we are replaying the log during
 	 * mount
 	 */
 	if (list_empty(&journal->j_journal_list))
 		return;
 
-	/* check the current transaction.  If there are no writers, and it is
+	/*
+	 * check the current transaction.  If there are no writers, and it is
 	 * too old, finish it, and force the commit blocks to disk
 	 */
 	if (atomic_read(&journal->j_wcount) <= 0 &&
@@ -3463,8 +3581,10 @@
 			journal_mark_dirty(&th, sb,
 					   SB_BUFFER_WITH_SB(sb));
 
-			/* we're only being called from kreiserfsd, it makes no sense to do
-			 ** an async commit so that kreiserfsd can do it later
+			/*
+			 * we're only being called from kreiserfsd, it makes
+			 * no sense to do an async commit so that kreiserfsd
+			 * can do it later
 			 */
 			do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
 		}
@@ -3472,16 +3592,20 @@
 }
 
 /*
-** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
-**
-** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
-** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
-** flushes the commit list and returns 0.
-**
-** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
-**
-** Note, we can't allow the journal_end to proceed while there are still writers in the log.
-*/
+ * returns 0 if do_journal_end should return right away, returns 1 if
+ * do_journal_end should finish the commit
+ *
+ * if the current transaction is too old, but still has writers, this will
+ * wait on j_join_wait until all the writers are done.  By the time it
+ * wakes up, the transaction it was called has already ended, so it just
+ * flushes the commit list and returns 0.
+ *
+ * Won't batch when flush or commit_now is set.  Also won't batch when
+ * others are waiting on j_join_wait.
+ *
+ * Note, we can't allow the journal_end to proceed while there are still
+ * writers in the log.
+ */
 static int check_journal_end(struct reiserfs_transaction_handle *th,
 			     struct super_block *sb, unsigned long nblocks,
 			     int flags)
@@ -3503,21 +3627,25 @@
 	}
 
 	journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
-	if (atomic_read(&(journal->j_wcount)) > 0) {	/* <= 0 is allowed.  unmounting might not call begin */
+	/* <= 0 is allowed.  unmounting might not call begin */
+	if (atomic_read(&(journal->j_wcount)) > 0)
 		atomic_dec(&(journal->j_wcount));
-	}
 
-	/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
-	 ** will be dealt with by next transaction that actually writes something, but should be taken
-	 ** care of in this trans
+	/*
+	 * BUG, deal with case where j_len is 0, but people previously
+	 * freed blocks need to be released will be dealt with by next
+	 * transaction that actually writes something, but should be taken
+	 * care of in this trans
 	 */
 	BUG_ON(journal->j_len == 0);
 
-	/* if wcount > 0, and we are called to with flush or commit_now,
-	 ** we wait on j_join_wait.  We will wake up when the last writer has
-	 ** finished the transaction, and started it on its way to the disk.
-	 ** Then, we flush the commit or journal list, and just return 0
-	 ** because the rest of journal end was already done for this transaction.
+	/*
+	 * if wcount > 0, and we are called to with flush or commit_now,
+	 * we wait on j_join_wait.  We will wake up when the last writer has
+	 * finished the transaction, and started it on its way to the disk.
+	 * Then, we flush the commit or journal list, and just return 0
+	 * because the rest of journal end was already done for this
+	 * transaction.
 	 */
 	if (atomic_read(&(journal->j_wcount)) > 0) {
 		if (flush || commit_now) {
@@ -3533,7 +3661,10 @@
 			}
 			unlock_journal(sb);
 
-			/* sleep while the current transaction is still j_jlocked */
+			/*
+			 * sleep while the current transaction is
+			 * still j_jlocked
+			 */
 			while (journal->j_trans_id == trans_id) {
 				if (atomic_read(&journal->j_jlock)) {
 					queue_log_writer(sb);
@@ -3547,7 +3678,7 @@
 				}
 			}
 			BUG_ON(journal->j_trans_id == trans_id);
-			
+
 			if (commit_now
 			    && journal_list_still_alive(sb, trans_id)
 			    && wait_on_commit) {
@@ -3585,19 +3716,22 @@
 }
 
 /*
-** Does all the work that makes deleting blocks safe.
-** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
-**
-** otherwise:
-** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
-** before this transaction has finished.
-**
-** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
-** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
-** the block can't be reallocated yet.
-**
-** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
-*/
+ * Does all the work that makes deleting blocks safe.
+ * when deleting a block mark BH_JNew, just remove it from the current
+ * transaction, clean it's buffer_head and move on.
+ *
+ * otherwise:
+ * set a bit for the block in the journal bitmap.  That will prevent it from
+ * being allocated for unformatted nodes before this transaction has finished.
+ *
+ * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.
+ * That will prevent any old transactions with this block from trying to flush
+ * to the real location.  Since we aren't removing the cnode from the
+ * journal_list_hash, *the block can't be reallocated yet.
+ *
+ * Then remove it from the current transaction, decrementing any counters and
+ * filing it on the clean list.
+ */
 int journal_mark_freed(struct reiserfs_transaction_handle *th,
 		       struct super_block *sb, b_blocknr_t blocknr)
 {
@@ -3620,7 +3754,10 @@
 		reiserfs_clean_and_file_buffer(bh);
 		cleaned = remove_from_transaction(sb, blocknr, cleaned);
 	} else {
-		/* set the bit for this block in the journal bitmap for this transaction */
+		/*
+		 * set the bit for this block in the journal bitmap
+		 * for this transaction
+		 */
 		jb = journal->j_current_jl->j_list_bitmap;
 		if (!jb) {
 			reiserfs_panic(sb, "journal-1702",
@@ -3636,17 +3773,22 @@
 		}
 		cleaned = remove_from_transaction(sb, blocknr, cleaned);
 
-		/* find all older transactions with this block, make sure they don't try to write it out */
+		/*
+		 * find all older transactions with this block,
+		 * make sure they don't try to write it out
+		 */
 		cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
 					  blocknr);
 		while (cn) {
 			if (sb == cn->sb && blocknr == cn->blocknr) {
 				set_bit(BLOCK_FREED, &cn->state);
 				if (cn->bh) {
+					/*
+					 * remove_from_transaction will brelse
+					 * the buffer if it was in the current
+					 * trans
+					 */
 					if (!cleaned) {
-						/* remove_from_transaction will brelse the buffer if it was 
-						 ** in the current trans
-						 */
 						clear_buffer_journal_dirty(cn->
 									   bh);
 						clear_buffer_dirty(cn->bh);
@@ -3661,7 +3803,11 @@
 								 "cn->bh->b_count < 0");
 						}
 					}
-					if (cn->jlist) {	/* since we are clearing the bh, we MUST dec nonzerolen */
+					/*
+					 * since we are clearing the bh,
+					 * we MUST dec nonzerolen
+					 */
+					if (cn->jlist) {
 						atomic_dec(&
 							   (cn->jlist->
 							    j_nonzerolen));
@@ -3697,10 +3843,16 @@
 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
 	int ret = 0;
 
-	/* is it from the current transaction, or from an unknown transaction? */
+	/*
+	 * is it from the current transaction,
+	 * or from an unknown transaction?
+	 */
 	if (id == journal->j_trans_id) {
 		jl = journal->j_current_jl;
-		/* try to let other writers come in and grow this transaction */
+		/*
+		 * try to let other writers come in and
+		 * grow this transaction
+		 */
 		let_transaction_grow(sb, id);
 		if (journal->j_trans_id != id) {
 			goto flush_commit_only;
@@ -3724,7 +3876,8 @@
 			ret = 1;
 
 	} else {
-		/* this gets tricky, we have to make sure the journal list in
+		/*
+		 * this gets tricky, we have to make sure the journal list in
 		 * the inode still exists.  We know the list is still around
 		 * if we've got a larger transaction id than the oldest list
 		 */
@@ -3751,7 +3904,8 @@
 	unsigned int id = REISERFS_I(inode)->i_trans_id;
 	struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
 
-	/* for the whole inode, assume unset id means it was
+	/*
+	 * for the whole inode, assume unset id means it was
 	 * changed in the current transaction.  More conservative
 	 */
 	if (!id || !jl) {
@@ -3789,12 +3943,11 @@
 
 extern struct tree_balance *cur_tb;
 /*
-** before we can change a metadata block, we have to make sure it won't
-** be written to disk while we are altering it.  So, we must:
-** clean it
-** wait on it.
-**
-*/
+ * before we can change a metadata block, we have to make sure it won't
+ * be written to disk while we are altering it.  So, we must:
+ * clean it
+ * wait on it.
+ */
 int reiserfs_prepare_for_journal(struct super_block *sb,
 				 struct buffer_head *bh, int wait)
 {
@@ -3815,15 +3968,15 @@
 }
 
 /*
-** long and ugly.  If flush, will not return until all commit
-** blocks and all real buffers in the trans are on disk.
-** If no_async, won't return until all commit blocks are on disk.
-**
-** keep reading, there are comments as you go along
-**
-** If the journal is aborted, we just clean up. Things like flushing
-** journal lists, etc just won't happen.
-*/
+ * long and ugly.  If flush, will not return until all commit
+ * blocks and all real buffers in the trans are on disk.
+ * If no_async, won't return until all commit blocks are on disk.
+ *
+ * keep reading, there are comments as you go along
+ *
+ * If the journal is aborted, we just clean up. Things like flushing
+ * journal lists, etc just won't happen.
+ */
 static int do_journal_end(struct reiserfs_transaction_handle *th,
 			  struct super_block *sb, unsigned long nblocks,
 			  int flags)
@@ -3850,8 +4003,10 @@
 	BUG_ON(th->t_refcount > 1);
 	BUG_ON(!th->t_trans_id);
 
-	/* protect flush_older_commits from doing mistakes if the
-           transaction ID counter gets overflowed.  */
+	/*
+	 * protect flush_older_commits from doing mistakes if the
+	 * transaction ID counter gets overflowed.
+	 */
 	if (th->t_trans_id == ~0U)
 		flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
 	flush = flags & FLUSH_ALL;
@@ -3875,8 +4030,10 @@
 		wait_on_commit = 1;
 	}
 
-	/* check_journal_end locks the journal, and unlocks if it does not return 1
-	 ** it tells us if we should continue with the journal_end, or just return
+	/*
+	 * check_journal_end locks the journal, and unlocks if it does
+	 * not return 1 it tells us if we should continue with the
+	 * journal_end, or just return
 	 */
 	if (!check_journal_end(th, sb, nblocks, flags)) {
 		reiserfs_schedule_old_flush(sb);
@@ -3891,19 +4048,23 @@
 	}
 
 	/*
-	 ** j must wait means we have to flush the log blocks, and the real blocks for
-	 ** this transaction
+	 * j must wait means we have to flush the log blocks, and the
+	 * real blocks for this transaction
 	 */
 	if (journal->j_must_wait > 0) {
 		flush = 1;
 	}
 #ifdef REISERFS_PREALLOCATE
-	/* quota ops might need to nest, setup the journal_info pointer for them
-	 * and raise the refcount so that it is > 0. */
+	/*
+	 * quota ops might need to nest, setup the journal_info pointer
+	 * for them and raise the refcount so that it is > 0.
+	 */
 	current->journal_info = th;
 	th->t_refcount++;
-	reiserfs_discard_all_prealloc(th);	/* it should not involve new blocks into
-						 * the transaction */
+
+	/* it should not involve new blocks into the transaction */
+	reiserfs_discard_all_prealloc(th);
+
 	th->t_refcount--;
 	current->journal_info = th->t_handle_save;
 #endif
@@ -3919,7 +4080,10 @@
 	memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
 	set_desc_trans_id(desc, journal->j_trans_id);
 
-	/* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
+	/*
+	 * setup commit block.  Don't write (keep it clean too) this one
+	 * until after everyone else is written
+	 */
 	c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
 			      ((journal->j_start + journal->j_len +
 				1) % SB_ONDISK_JOURNAL_SIZE(sb)));
@@ -3931,7 +4095,8 @@
 	/* init this journal list */
 	jl = journal->j_current_jl;
 
-	/* we lock the commit before doing anything because
+	/*
+	 * we lock the commit before doing anything because
 	 * we want to make sure nobody tries to run flush_commit_list until
 	 * the new transaction is fully setup, and we've already flushed the
 	 * ordered bh list
@@ -3951,9 +4116,10 @@
 	atomic_set(&jl->j_commit_left, journal->j_len + 2);
 	jl->j_realblock = NULL;
 
-	/* The ENTIRE FOR LOOP MUST not cause schedule to occur.
-	 **  for each real block, add it to the journal list hash,
-	 ** copy into real block index array in the commit or desc block
+	/*
+	 * The ENTIRE FOR LOOP MUST not cause schedule to occur.
+	 * for each real block, add it to the journal list hash,
+	 * copy into real block index array in the commit or desc block
 	 */
 	trans_half = journal_trans_half(sb->s_blocksize);
 	for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
@@ -3972,9 +4138,10 @@
 				last_cn->next = jl_cn;
 			}
 			last_cn = jl_cn;
-			/* make sure the block we are trying to log is not a block
-			   of journal or reserved area */
-
+			/*
+			 * make sure the block we are trying to log
+			 * is not a block of journal or reserved area
+			 */
 			if (is_block_in_log_or_reserved_area
 			    (sb, cn->bh->b_blocknr)) {
 				reiserfs_panic(sb, "journal-2332",
@@ -4004,19 +4171,26 @@
 	set_desc_trans_id(desc, journal->j_trans_id);
 	set_commit_trans_len(commit, journal->j_len);
 
-	/* special check in case all buffers in the journal were marked for not logging */
+	/*
+	 * special check in case all buffers in the journal
+	 * were marked for not logging
+	 */
 	BUG_ON(journal->j_len == 0);
 
-	/* we're about to dirty all the log blocks, mark the description block
+	/*
+	 * we're about to dirty all the log blocks, mark the description block
 	 * dirty now too.  Don't mark the commit block dirty until all the
 	 * others are on disk
 	 */
 	mark_buffer_dirty(d_bh);
 
-	/* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
+	/*
+	 * first data block is j_start + 1, so add one to
+	 * cur_write_start wherever you use it
+	 */
 	cur_write_start = journal->j_start;
 	cn = journal->j_first;
-	jindex = 1;		/* start at one so we don't get the desc again */
+	jindex = 1;	/* start at one so we don't get the desc again */
 	while (cn) {
 		clear_buffer_journal_new(cn->bh);
 		/* copy all the real blocks into log area.  dirty log blocks */
@@ -4042,7 +4216,10 @@
 			set_buffer_journal_dirty(cn->bh);
 			clear_buffer_journaled(cn->bh);
 		} else {
-			/* JDirty cleared sometime during transaction.  don't log this one */
+			/*
+			 * JDirty cleared sometime during transaction.
+			 * don't log this one
+			 */
 			reiserfs_warning(sb, "journal-2048",
 					 "BAD, buffer in journal hash, "
 					 "but not JDirty!");
@@ -4054,9 +4231,10 @@
 		reiserfs_cond_resched(sb);
 	}
 
-	/* we are done  with both the c_bh and d_bh, but
-	 ** c_bh must be written after all other commit blocks,
-	 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
+	/*
+	 * we are done with both the c_bh and d_bh, but
+	 * c_bh must be written after all other commit blocks,
+	 * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
 	 */
 
 	journal->j_current_jl = alloc_journal_list(sb);
@@ -4087,15 +4265,18 @@
 	journal->j_next_async_flush = 0;
 	init_journal_hash(sb);
 
-	// make sure reiserfs_add_jh sees the new current_jl before we
-	// write out the tails
+	/*
+	 * make sure reiserfs_add_jh sees the new current_jl before we
+	 * write out the tails
+	 */
 	smp_mb();
 
-	/* tail conversion targets have to hit the disk before we end the
+	/*
+	 * tail conversion targets have to hit the disk before we end the
 	 * transaction.  Otherwise a later transaction might repack the tail
-	 * before this transaction commits, leaving the data block unflushed and
-	 * clean, if we crash before the later transaction commits, the data block
-	 * is lost.
+	 * before this transaction commits, leaving the data block unflushed
+	 * and clean, if we crash before the later transaction commits, the
+	 * data block is lost.
 	 */
 	if (!list_empty(&jl->j_tail_bh_list)) {
 		depth = reiserfs_write_unlock_nested(sb);
@@ -4106,12 +4287,13 @@
 	BUG_ON(!list_empty(&jl->j_tail_bh_list));
 	mutex_unlock(&jl->j_commit_mutex);
 
-	/* honor the flush wishes from the caller, simple commits can
-	 ** be done outside the journal lock, they are done below
-	 **
-	 ** if we don't flush the commit list right now, we put it into
-	 ** the work queue so the people waiting on the async progress work
-	 ** queue don't wait for this proc to flush journal lists and such.
+	/*
+	 * honor the flush wishes from the caller, simple commits can
+	 * be done outside the journal lock, they are done below
+	 *
+	 * if we don't flush the commit list right now, we put it into
+	 * the work queue so the people waiting on the async progress work
+	 * queue don't wait for this proc to flush journal lists and such.
 	 */
 	if (flush) {
 		flush_commit_list(sb, jl, 1);
@@ -4120,9 +4302,10 @@
 		queue_delayed_work(REISERFS_SB(sb)->commit_wq,
 				   &journal->j_work, HZ / 10);
 
-	/* if the next transaction has any chance of wrapping, flush
-	 ** transactions that might get overwritten.  If any journal lists are very
-	 ** old flush them as well.
+	/*
+	 * if the next transaction has any chance of wrapping, flush
+	 * transactions that might get overwritten.  If any journal lists
+	 * are very old flush them as well.
 	 */
       first_jl:
 	list_for_each_safe(entry, safe, &journal->j_journal_list) {
@@ -4135,8 +4318,10 @@
 			} else if ((journal->j_start +
 				    journal->j_trans_max + 1) <
 				   SB_ONDISK_JOURNAL_SIZE(sb)) {
-				/* if we don't cross into the next transaction and we don't
-				 * wrap, there is no way we can overlap any later transactions
+				/*
+				 * if we don't cross into the next
+				 * transaction and we don't wrap, there is
+				 * no way we can overlap any later transactions
 				 * break now
 				 */
 				break;
@@ -4150,10 +4335,12 @@
 				flush_used_journal_lists(sb, temp_jl);
 				goto first_jl;
 			} else {
-				/* we don't overlap anything from out start to the end of the
-				 * log, and our wrapped portion doesn't overlap anything at
-				 * the start of the log.  We can break
-				 */
+				/*
+				* we don't overlap anything from out start
+				* to the end of the log, and our wrapped
+				* portion doesn't overlap anything at
+				* the start of the log.  We can break
+				*/
 				break;
 			}
 		}
@@ -4181,9 +4368,11 @@
 	reiserfs_check_lock_depth(sb, "journal end2");
 
 	memset(th, 0, sizeof(*th));
-	/* Re-set th->t_super, so we can properly keep track of how many
+	/*
+	 * Re-set th->t_super, so we can properly keep track of how many
 	 * persistent transactions there are. We need to do this so if this
-	 * call is part of a failed restart_transaction, we can free it later */
+	 * call is part of a failed restart_transaction, we can free it later
+	 */
 	th->t_super = sb;
 
 	return journal->j_errno;
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index b46399d..d48a9e7 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -8,28 +8,21 @@
 #include "reiserfs.h"
 #include <linux/buffer_head.h>
 
-/* these are used in do_balance.c */
-
-/* leaf_move_items
-   leaf_shift_left
-   leaf_shift_right
-   leaf_delete_items
-   leaf_insert_into_buf
-   leaf_paste_in_buffer
-   leaf_cut_from_buffer
-   leaf_paste_entries
-   */
-
-/* copy copy_count entries from source directory item to dest buffer (creating new item if needed) */
+/*
+ * copy copy_count entries from source directory item to dest buffer
+ * (creating new item if needed)
+ */
 static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
 				  struct buffer_head *source, int last_first,
 				  int item_num, int from, int copy_count)
 {
 	struct buffer_head *dest = dest_bi->bi_bh;
-	int item_num_in_dest;	/* either the number of target item,
-				   or if we must create a new item,
-				   the number of the item we will
-				   create it next to */
+	/*
+	 * either the number of target item, or if we must create a
+	 * new item, the number of the item we will create it next to
+	 */
+	int item_num_in_dest;
+
 	struct item_head *ih;
 	struct reiserfs_de_head *deh;
 	int copy_records_len;	/* length of all records in item to be copied */
@@ -39,7 +32,10 @@
 
 	RFALSE(!is_direntry_le_ih(ih), "vs-10000: item must be directory item");
 
-	/* length of all record to be copied and first byte of the last of them */
+	/*
+	 * length of all record to be copied and first byte of
+	 * the last of them
+	 */
 	deh = B_I_DEH(source, ih);
 	if (copy_count) {
 		copy_records_len = (from ? deh_location(&(deh[from - 1])) :
@@ -59,7 +55,10 @@
 	     LAST_TO_FIRST) ? ((B_NR_ITEMS(dest)) ? 0 : -1) : (B_NR_ITEMS(dest)
 							       - 1);
 
-	/* if there are no items in dest or the first/last item in dest is not item of the same directory */
+	/*
+	 * if there are no items in dest or the first/last item in
+	 * dest is not item of the same directory
+	 */
 	if ((item_num_in_dest == -1) ||
 	    (last_first == FIRST_TO_LAST && le_ih_k_offset(ih) == DOT_OFFSET) ||
 	    (last_first == LAST_TO_FIRST
@@ -83,11 +82,17 @@
 			if (from < ih_entry_count(ih)) {
 				set_le_ih_k_offset(&new_ih,
 						   deh_offset(&(deh[from])));
-				/*memcpy (&new_ih.ih_key.k_offset, &deh[from].deh_offset, SHORT_KEY_SIZE); */
 			} else {
-				/* no entries will be copied to this item in this function */
+				/*
+				 * no entries will be copied to this
+				 * item in this function
+				 */
 				set_le_ih_k_offset(&new_ih, U32_MAX);
-				/* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
+				/*
+				 * this item is not yet valid, but we
+				 * want I_IS_DIRECTORY_ITEM to return 1
+				 * for it, so we -1
+				 */
 			}
 			set_le_key_k_type(KEY_FORMAT_3_5, &(new_ih.ih_key),
 					  TYPE_DIRENTRY);
@@ -119,30 +124,38 @@
 			   DEH_SIZE * copy_count + copy_records_len);
 }
 
-/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
-   part of it or nothing (see the return 0 below) from SOURCE to the end
-   (if last_first) or beginning (!last_first) of the DEST */
+/*
+ * Copy the first (if last_first == FIRST_TO_LAST) or last
+ * (last_first == LAST_TO_FIRST) item or part of it or nothing
+ * (see the return 0 below) from SOURCE to the end (if last_first)
+ * or beginning (!last_first) of the DEST
+ */
 /* returns 1 if anything was copied, else 0 */
 static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
 				   struct buffer_head *src, int last_first,
 				   int bytes_or_entries)
 {
 	struct buffer_head *dest = dest_bi->bi_bh;
-	int dest_nr_item, src_nr_item;	/* number of items in the source and destination buffers */
+	/* number of items in the source and destination buffers */
+	int dest_nr_item, src_nr_item;
 	struct item_head *ih;
 	struct item_head *dih;
 
 	dest_nr_item = B_NR_ITEMS(dest);
 
+	/*
+	 * if ( DEST is empty or first item of SOURCE and last item of
+	 * DEST are the items of different objects or of different types )
+	 * then there is no need to treat this item differently from the
+	 * other items that we copy, so we return
+	 */
 	if (last_first == FIRST_TO_LAST) {
-		/* if ( DEST is empty or first item of SOURCE and last item of DEST are the items of different objects
-		   or of different types ) then there is no need to treat this item differently from the other items
-		   that we copy, so we return */
 		ih = item_head(src, 0);
 		dih = item_head(dest, dest_nr_item - 1);
+
+		/* there is nothing to merge */
 		if (!dest_nr_item
 		    || (!op_is_left_mergeable(&(ih->ih_key), src->b_size)))
-			/* there is nothing to merge */
 			return 0;
 
 		RFALSE(!ih_item_len(ih),
@@ -157,8 +170,11 @@
 			return 1;
 		}
 
-		/* copy part of the body of the first item of SOURCE to the end of the body of the last item of the DEST
-		   part defined by 'bytes_or_entries'; if bytes_or_entries == -1 copy whole body; don't create new item header
+		/*
+		 * copy part of the body of the first item of SOURCE
+		 * to the end of the body of the last item of the DEST
+		 * part defined by 'bytes_or_entries'; if bytes_or_entries
+		 * == -1 copy whole body; don't create new item header
 		 */
 		if (bytes_or_entries == -1)
 			bytes_or_entries = ih_item_len(ih);
@@ -176,8 +192,10 @@
 		}
 #endif
 
-		/* merge first item (or its part) of src buffer with the last
-		   item of dest buffer. Both are of the same file */
+		/*
+		 * merge first item (or its part) of src buffer with the last
+		 * item of dest buffer. Both are of the same file
+		 */
 		leaf_paste_in_buffer(dest_bi,
 				     dest_nr_item - 1, ih_item_len(dih),
 				     bytes_or_entries, ih_item_body(src, ih), 0);
@@ -195,8 +213,9 @@
 
 	/* copy boundary item to right (last_first == LAST_TO_FIRST) */
 
-	/* ( DEST is empty or last item of SOURCE and first item of DEST
-	   are the items of different object or of different types )
+	/*
+	 * (DEST is empty or last item of SOURCE and first item of DEST
+	 * are the items of different object or of different types)
 	 */
 	src_nr_item = B_NR_ITEMS(src);
 	ih = item_head(src, src_nr_item - 1);
@@ -206,8 +225,11 @@
 		return 0;
 
 	if (is_direntry_le_ih(ih)) {
+		/*
+		 * bytes_or_entries = entries number in last
+		 * item body of SOURCE
+		 */
 		if (bytes_or_entries == -1)
-			/* bytes_or_entries = entries number in last item body of SOURCE */
 			bytes_or_entries = ih_entry_count(ih);
 
 		leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
@@ -217,9 +239,11 @@
 		return 1;
 	}
 
-	/* copy part of the body of the last item of SOURCE to the begin of the body of the first item of the DEST;
-	   part defined by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body; change first item key of the DEST;
-	   don't create new item header
+	/*
+	 * copy part of the body of the last item of SOURCE to the
+	 * begin of the body of the first item of the DEST; part defined
+	 * by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body;
+	 * change first item key of the DEST; don't create new item header
 	 */
 
 	RFALSE(is_indirect_le_ih(ih) && get_ih_free_space(ih),
@@ -276,9 +300,12 @@
 	return 1;
 }
 
-/* copy cpy_mun items from buffer src to buffer dest
- * last_first == FIRST_TO_LAST means, that we copy cpy_num  items beginning from first-th item in src to tail of dest
- * last_first == LAST_TO_FIRST means, that we copy cpy_num  items beginning from first-th item in src to head of dest
+/*
+ * copy cpy_mun items from buffer src to buffer dest
+ * last_first == FIRST_TO_LAST means, that we copy cpy_num items beginning
+ *                             from first-th item in src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy cpy_num items beginning
+ *                             from first-th item in src to head of dest
  */
 static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
 				     struct buffer_head *src, int last_first,
@@ -311,7 +338,10 @@
 	nr = blkh_nr_item(blkh);
 	free_space = blkh_free_space(blkh);
 
-	/* we will insert items before 0-th or nr-th item in dest buffer. It depends of last_first parameter */
+	/*
+	 * we will insert items before 0-th or nr-th item in dest buffer.
+	 * It depends of last_first parameter
+	 */
 	dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr;
 
 	/* location of head of first new item */
@@ -377,8 +407,10 @@
 	}
 }
 
-/* This function splits the (liquid) item into two items (useful when
-   shifting part of an item into another node.) */
+/*
+ * This function splits the (liquid) item into two items (useful when
+ * shifting part of an item into another node.)
+ */
 static void leaf_item_bottle(struct buffer_info *dest_bi,
 			     struct buffer_head *src, int last_first,
 			     int item_num, int cpy_bytes)
@@ -390,7 +422,10 @@
 	       "vs-10170: bytes == - 1 means: do not split item");
 
 	if (last_first == FIRST_TO_LAST) {
-		/* if ( if item in position item_num in buffer SOURCE is directory item ) */
+		/*
+		 * if ( if item in position item_num in buffer SOURCE
+		 * is directory item )
+		 */
 		ih = item_head(src, item_num);
 		if (is_direntry_le_ih(ih))
 			leaf_copy_dir_entries(dest_bi, src, FIRST_TO_LAST,
@@ -398,9 +433,11 @@
 		else {
 			struct item_head n_ih;
 
-			/* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
-			   part defined by 'cpy_bytes'; create new item header; change old item_header (????);
-			   n_ih = new item_header;
+			/*
+			 * copy part of the body of the item number 'item_num'
+			 * of SOURCE to the end of the DEST part defined by
+			 * 'cpy_bytes'; create new item header; change old
+			 * item_header (????); n_ih = new item_header;
 			 */
 			memcpy(&n_ih, ih, IH_SIZE);
 			put_ih_item_len(&n_ih, cpy_bytes);
@@ -419,7 +456,10 @@
 					     item_body(src, item_num), 0);
 		}
 	} else {
-		/*  if ( if item in position item_num in buffer SOURCE is directory item ) */
+		/*
+		 * if ( if item in position item_num in buffer
+		 * SOURCE is directory item )
+		 */
 		ih = item_head(src, item_num);
 		if (is_direntry_le_ih(ih))
 			leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
@@ -429,13 +469,16 @@
 		else {
 			struct item_head n_ih;
 
-			/* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
-			   part defined by 'cpy_bytes'; create new item header;
-			   n_ih = new item_header;
+			/*
+			 * copy part of the body of the item number 'item_num'
+			 * of SOURCE to the begin of the DEST part defined by
+			 * 'cpy_bytes'; create new item header;
+			 * n_ih = new item_header;
 			 */
 			memcpy(&n_ih, ih, SHORT_KEY_SIZE);
 
-			n_ih.ih_version = ih->ih_version;	/* JDM Endian safe, both le */
+			/* Endian safe, both le */
+			n_ih.ih_version = ih->ih_version;
 
 			if (is_direct_le_ih(ih)) {
 				set_le_ih_k_offset(&n_ih,
@@ -459,7 +502,8 @@
 			/* set item length */
 			put_ih_item_len(&n_ih, cpy_bytes);
 
-			n_ih.ih_version = ih->ih_version;	/* JDM Endian safe, both le */
+			/* Endian safe, both le */
+			n_ih.ih_version = ih->ih_version;
 
 			leaf_insert_into_buf(dest_bi, 0, &n_ih,
 					     item_body(src, item_num) +
@@ -468,10 +512,12 @@
 	}
 }
 
-/* If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE to DEST.
-   If cpy_bytes not equal to minus one than copy cpy_num-1 whole items from SOURCE to DEST.
-   From last item copy cpy_num bytes for regular item and cpy_num directory entries for
-   directory item. */
+/*
+ * If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE
+ * to DEST.  If cpy_bytes not equal to minus one than copy cpy_num-1 whole
+ * items from SOURCE to DEST.  From last item copy cpy_num bytes for regular
+ * item and cpy_num directory entries for directory item.
+ */
 static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
 			   int last_first, int cpy_num, int cpy_bytes)
 {
@@ -498,22 +544,34 @@
 		else
 			bytes = -1;
 
-		/* copy the first item or it part or nothing to the end of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes)) */
+		/*
+		 * copy the first item or it part or nothing to the end of
+		 * the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes))
+		 */
 		i = leaf_copy_boundary_item(dest_bi, src, FIRST_TO_LAST, bytes);
 		cpy_num -= i;
 		if (cpy_num == 0)
 			return i;
 		pos += i;
 		if (cpy_bytes == -1)
-			/* copy first cpy_num items starting from position 'pos' of SOURCE to end of DEST */
+			/*
+			 * copy first cpy_num items starting from position
+			 * 'pos' of SOURCE to end of DEST
+			 */
 			leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
 						 pos, cpy_num);
 		else {
-			/* copy first cpy_num-1 items starting from position 'pos-1' of the SOURCE to the end of the DEST */
+			/*
+			 * copy first cpy_num-1 items starting from position
+			 * 'pos-1' of the SOURCE to the end of the DEST
+			 */
 			leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
 						 pos, cpy_num - 1);
 
-			/* copy part of the item which number is cpy_num+pos-1 to the end of the DEST */
+			/*
+			 * copy part of the item which number is
+			 * cpy_num+pos-1 to the end of the DEST
+			 */
 			leaf_item_bottle(dest_bi, src, FIRST_TO_LAST,
 					 cpy_num + pos - 1, cpy_bytes);
 		}
@@ -525,7 +583,11 @@
 		else
 			bytes = -1;
 
-		/* copy the last item or it part or nothing to the begin of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes)); */
+		/*
+		 * copy the last item or it part or nothing to the
+		 * begin of the DEST
+		 * (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes));
+		 */
 		i = leaf_copy_boundary_item(dest_bi, src, LAST_TO_FIRST, bytes);
 
 		cpy_num -= i;
@@ -534,15 +596,24 @@
 
 		pos = src_nr_item - cpy_num - i;
 		if (cpy_bytes == -1) {
-			/* starting from position 'pos' copy last cpy_num items of SOURCE to begin of DEST */
+			/*
+			 * starting from position 'pos' copy last cpy_num
+			 * items of SOURCE to begin of DEST
+			 */
 			leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
 						 pos, cpy_num);
 		} else {
-			/* copy last cpy_num-1 items starting from position 'pos+1' of the SOURCE to the begin of the DEST; */
+			/*
+			 * copy last cpy_num-1 items starting from position
+			 * 'pos+1' of the SOURCE to the begin of the DEST;
+			 */
 			leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
 						 pos + 1, cpy_num - 1);
 
-			/* copy part of the item which number is pos to the begin of the DEST */
+			/*
+			 * copy part of the item which number is pos to
+			 * the begin of the DEST
+			 */
 			leaf_item_bottle(dest_bi, src, LAST_TO_FIRST, pos,
 					 cpy_bytes);
 		}
@@ -550,9 +621,11 @@
 	return i;
 }
 
-/* there are types of coping: from S[0] to L[0], from S[0] to R[0],
-   from R[0] to L[0]. for each of these we have to define parent and
-   positions of destination and source buffers */
+/*
+ * there are types of coping: from S[0] to L[0], from S[0] to R[0],
+ * from R[0] to L[0]. for each of these we have to define parent and
+ * positions of destination and source buffers
+ */
 static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
 				       struct buffer_info *dest_bi,
 				       struct buffer_info *src_bi,
@@ -568,7 +641,9 @@
 		src_bi->tb = tb;
 		src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
-		src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);	/* src->b_item_order */
+
+		/* src->b_item_order */
+		src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
 		dest_bi->tb = tb;
 		dest_bi->bi_bh = tb->L[0];
 		dest_bi->bi_parent = tb->FL[0];
@@ -633,8 +708,10 @@
 	       shift_mode, src_bi->bi_bh, dest_bi->bi_bh);
 }
 
-/* copy mov_num items and mov_bytes of the (mov_num-1)th item to
-   neighbor. Delete them from source */
+/*
+ * copy mov_num items and mov_bytes of the (mov_num-1)th item to
+ * neighbor. Delete them from source
+ */
 int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
 		    int mov_bytes, struct buffer_head *Snew)
 {
@@ -657,18 +734,24 @@
 	return ret_value;
 }
 
-/* Shift shift_num items (and shift_bytes of last shifted item if shift_bytes != -1)
-   from S[0] to L[0] and replace the delimiting key */
+/*
+ * Shift shift_num items (and shift_bytes of last shifted item if
+ * shift_bytes != -1) from S[0] to L[0] and replace the delimiting key
+ */
 int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
 {
 	struct buffer_head *S0 = PATH_PLAST_BUFFER(tb->tb_path);
 	int i;
 
-	/* move shift_num (and shift_bytes bytes) items from S[0] to left neighbor L[0] */
+	/*
+	 * move shift_num (and shift_bytes bytes) items from S[0]
+	 * to left neighbor L[0]
+	 */
 	i = leaf_move_items(LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL);
 
 	if (shift_num) {
-		if (B_NR_ITEMS(S0) == 0) {	/* number of items in S[0] == 0 */
+		/* number of items in S[0] == 0 */
+		if (B_NR_ITEMS(S0) == 0) {
 
 			RFALSE(shift_bytes != -1,
 			       "vs-10270: S0 is empty now, but shift_bytes != -1 (%d)",
@@ -704,13 +787,18 @@
 
 /* CLEANING STOPPED HERE */
 
-/* Shift shift_num (shift_bytes) items from S[0] to the right neighbor, and replace the delimiting key */
+/*
+ * Shift shift_num (shift_bytes) items from S[0] to the right neighbor,
+ * and replace the delimiting key
+ */
 int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
 {
-	//  struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
 	int ret_value;
 
-	/* move shift_num (and shift_bytes) items from S[0] to right neighbor R[0] */
+	/*
+	 * move shift_num (and shift_bytes) items from S[0] to
+	 * right neighbor R[0]
+	 */
 	ret_value =
 	    leaf_move_items(LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL);
 
@@ -725,12 +813,16 @@
 
 static void leaf_delete_items_entirely(struct buffer_info *bi,
 				       int first, int del_num);
-/*  If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
-    If not.
-    If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
-    the first item. Part defined by del_bytes. Don't delete first item header
-    If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
-    the last item . Part defined by del_bytes. Don't delete last item header.
+/*
+ * If del_bytes == -1, starting from position 'first' delete del_num
+ * items in whole in buffer CUR.
+ *   If not.
+ *   If last_first == 0. Starting from position 'first' delete del_num-1
+ *   items in whole. Delete part of body of the first item. Part defined by
+ *   del_bytes. Don't delete first item header
+ *   If last_first == 1. Starting from position 'first+1' delete del_num-1
+ *   items in whole. Delete part of body of the last item . Part defined by
+ *   del_bytes. Don't delete last item header.
 */
 void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
 		       int first, int del_num, int del_bytes)
@@ -761,32 +853,43 @@
 		leaf_delete_items_entirely(cur_bi, first, del_num);
 	else {
 		if (last_first == FIRST_TO_LAST) {
-			/* delete del_num-1 items beginning from item in position first  */
+			/*
+			 * delete del_num-1 items beginning from
+			 * item in position first
+			 */
 			leaf_delete_items_entirely(cur_bi, first, del_num - 1);
 
-			/* delete the part of the first item of the bh
-			   do not delete item header
+			/*
+			 * delete the part of the first item of the bh
+			 * do not delete item header
 			 */
 			leaf_cut_from_buffer(cur_bi, 0, 0, del_bytes);
 		} else {
 			struct item_head *ih;
 			int len;
 
-			/* delete del_num-1 items beginning from item in position first+1  */
+			/*
+			 * delete del_num-1 items beginning from
+			 * item in position first+1
+			 */
 			leaf_delete_items_entirely(cur_bi, first + 1,
 						   del_num - 1);
 
 			ih = item_head(bh, B_NR_ITEMS(bh) - 1);
 			if (is_direntry_le_ih(ih))
 				/* the last item is directory  */
-				/* len = numbers of directory entries in this item */
+				/*
+				 * len = numbers of directory entries
+				 * in this item
+				 */
 				len = ih_entry_count(ih);
 			else
 				/* len = body len of item */
 				len = ih_item_len(ih);
 
-			/* delete the part of the last item of the bh
-			   do not delete item header
+			/*
+			 * delete the part of the last item of the bh
+			 * do not delete item header
 			 */
 			leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
 					     len - del_bytes, del_bytes);
@@ -867,8 +970,10 @@
 	}
 }
 
-/* paste paste_size bytes to affected_item_num-th item.
-   When item is a directory, this only prepare space for new entries */
+/*
+ * paste paste_size bytes to affected_item_num-th item.
+ * When item is a directory, this only prepare space for new entries
+ */
 void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
 			  int pos_in_item, int paste_size,
 			  const char *body, int zeros_number)
@@ -957,10 +1062,12 @@
 	}
 }
 
-/* cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
-   does not have free space, so it moves DEHs and remaining records as
-   necessary. Return value is size of removed part of directory item
-   in bytes. */
+/*
+ * cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
+ * does not have free space, so it moves DEHs and remaining records as
+ * necessary. Return value is size of removed part of directory item
+ * in bytes.
+ */
 static int leaf_cut_entries(struct buffer_head *bh,
 			    struct item_head *ih, int from, int del_count)
 {
@@ -971,8 +1078,10 @@
 	int cut_records_len;	/* length of all removed records */
 	int i;
 
-	/* make sure, that item is directory and there are enough entries to
-	   remove */
+	/*
+	 * make sure that item is directory and there are enough entries to
+	 * remove
+	 */
 	RFALSE(!is_direntry_le_ih(ih), "10180: item is not directory item");
 	RFALSE(ih_entry_count(ih) < from + del_count,
 	       "10185: item contains not enough entries: entry_count = %d, from = %d, to delete = %d",
@@ -987,8 +1096,10 @@
 	/* entry head array */
 	deh = B_I_DEH(bh, ih);
 
-	/* first byte of remaining entries, those are BEFORE cut entries
-	   (prev_record) and length of all removed records (cut_records_len) */
+	/*
+	 * first byte of remaining entries, those are BEFORE cut entries
+	 * (prev_record) and length of all removed records (cut_records_len)
+	 */
 	prev_record_offset =
 	    (from ? deh_location(&(deh[from - 1])) : ih_item_len(ih));
 	cut_records_len = prev_record_offset /*from_record */  -
@@ -1021,14 +1132,15 @@
 	return DEH_SIZE * del_count + cut_records_len;
 }
 
-/*  when cut item is part of regular file
-        pos_in_item - first byte that must be cut
-        cut_size - number of bytes to be cut beginning from pos_in_item
-
-   when cut item is part of directory
-        pos_in_item - number of first deleted entry
-        cut_size - count of deleted entries
-    */
+/*
+ * when cut item is part of regular file
+ *      pos_in_item - first byte that must be cut
+ *      cut_size - number of bytes to be cut beginning from pos_in_item
+ *
+ * when cut item is part of directory
+ *      pos_in_item - number of first deleted entry
+ *      cut_size - count of deleted entries
+ */
 void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
 			  int pos_in_item, int cut_size)
 {
@@ -1055,7 +1167,6 @@
 			       cut_item_num);
 			/* change item key by key of first entry in the item */
 			set_le_ih_k_offset(ih, deh_offset(B_I_DEH(bh, ih)));
-			/*memcpy (&ih->ih_key.k_offset, &(B_I_DEH (bh, ih)->deh_offset), SHORT_KEY_SIZE); */
 		}
 	} else {
 		/* item is direct or indirect */
@@ -1195,7 +1306,10 @@
 	}
 }
 
-/* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
+/*
+ * paste new_entry_count entries (new_dehs, records) into position
+ * before to item_num-th item
+ */
 void leaf_paste_entries(struct buffer_info *bi,
 			int item_num,
 			int before,
@@ -1215,7 +1329,10 @@
 
 	ih = item_head(bh, item_num);
 
-	/* make sure, that item is directory, and there are enough records in it */
+	/*
+	 * make sure, that item is directory, and there are enough
+	 * records in it
+	 */
 	RFALSE(!is_direntry_le_ih(ih), "10225: item is not directory item");
 	RFALSE(ih_entry_count(ih) < before,
 	       "10230: there are no entry we paste entries before. entry_count = %d, before = %d",
@@ -1277,8 +1394,6 @@
 	/* change item key if necessary (when we paste before 0-th entry */
 	if (!before) {
 		set_le_ih_k_offset(ih, deh_offset(new_dehs));
-/*      memcpy (&ih->ih_key.k_offset,
-		       &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
 	}
 #ifdef CONFIG_REISERFS_CHECK
 	{
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 1ce8fbe..6bc38de 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -22,8 +22,10 @@
 #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) set_nlink(i, 1); }
 #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
 
-// directory item contains array of entry headers. This performs
-// binary search through that array
+/*
+ * directory item contains array of entry headers. This performs
+ * binary search through that array
+ */
 static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off)
 {
 	struct item_head *ih = de->de_ih;
@@ -43,7 +45,7 @@
 			lbound = j + 1;
 			continue;
 		}
-		// this is not name found, but matched third key component
+		/* this is not name found, but matched third key component */
 		de->de_entry_num = j;
 		return NAME_FOUND;
 	}
@@ -52,7 +54,9 @@
 	return NAME_NOT_FOUND;
 }
 
-// comment?  maybe something like set de to point to what the path points to?
+/*
+ * comment?  maybe something like set de to point to what the path points to?
+ */
 static inline void set_de_item_location(struct reiserfs_dir_entry *de,
 					struct treepath *path)
 {
@@ -62,7 +66,9 @@
 	de->de_item_num = PATH_LAST_POSITION(path);
 }
 
-// de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+/*
+ * de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+ */
 inline void set_de_name_and_namelen(struct reiserfs_dir_entry *de)
 {
 	struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num;
@@ -76,7 +82,7 @@
 		de->de_namelen = strlen(de->de_name);
 }
 
-// what entry points to
+/* what entry points to */
 static inline void set_de_object_key(struct reiserfs_dir_entry *de)
 {
 	BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih));
@@ -100,17 +106,16 @@
 	set_cpu_key_k_type(&(de->de_entry_key), TYPE_DIRENTRY);
 }
 
-/* We assign a key to each directory item, and place multiple entries
-in a single directory item.  A directory item has a key equal to the
-key of the first directory entry in it.
+/*
+ * We assign a key to each directory item, and place multiple entries in a
+ * single directory item.  A directory item has a key equal to the key of
+ * the first directory entry in it.
 
-This function first calls search_by_key, then, if item whose first
-entry matches is not found it looks for the entry inside directory
-item found by search_by_key. Fills the path to the entry, and to the
-entry position in the item
-
-*/
-
+ * This function first calls search_by_key, then, if item whose first entry
+ * matches is not found it looks for the entry inside directory item found
+ * by search_by_key. Fills the path to the entry, and to the entry position
+ * in the item
+ */
 /* The function is NOT SCHEDULE-SAFE! */
 int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
 			struct treepath *path, struct reiserfs_dir_entry *de)
@@ -152,12 +157,17 @@
 	}
 #endif				/* CONFIG_REISERFS_CHECK */
 
-	/* binary search in directory item by third componen t of the
-	   key. sets de->de_entry_num of de */
+	/*
+	 * binary search in directory item by third component of the
+	 * key. sets de->de_entry_num of de
+	 */
 	retval = bin_search_in_dir_item(de, cpu_key_k_offset(key));
 	path->pos_in_item = de->de_entry_num;
 	if (retval != NAME_NOT_FOUND) {
-		// ugly, but rename needs de_bh, de_deh, de_name, de_namelen, de_objectid set
+		/*
+		 * ugly, but rename needs de_bh, de_deh, de_name,
+		 * de_namelen, de_objectid set
+		 */
 		set_de_name_and_namelen(de);
 		set_de_object_key(de);
 	}
@@ -166,11 +176,12 @@
 
 /* Keyed 32-bit hash function using TEA in a Davis-Meyer function */
 
-/* The third component is hashed, and you can choose from more than
-   one hash function.  Per directory hashes are not yet implemented
-   but are thought about. This function should be moved to hashes.c
-   Jedi, please do so.  -Hans */
-
+/*
+ * The third component is hashed, and you can choose from more than
+ * one hash function.  Per directory hashes are not yet implemented
+ * but are thought about. This function should be moved to hashes.c
+ * Jedi, please do so.  -Hans
+ */
 static __u32 get_third_component(struct super_block *s,
 				 const char *name, int len)
 {
@@ -183,11 +194,13 @@
 
 	res = REISERFS_SB(s)->s_hash_function(name, len);
 
-	// take bits from 7-th to 30-th including both bounds
+	/* take bits from 7-th to 30-th including both bounds */
 	res = GET_HASH_VALUE(res);
 	if (res == 0)
-		// needed to have no names before "." and ".." those have hash
-		// value == 0 and generation conters 1 and 2 accordingly
+		/*
+		 * needed to have no names before "." and ".." those have hash
+		 * value == 0 and generation conters 1 and 2 accordingly
+		 */
 		res = 128;
 	return res + MAX_GENERATION_NUMBER;
 }
@@ -208,7 +221,7 @@
 
 /* de's de_bh, de_ih, de_deh, de_item_num, de_entry_num are set already */
 
-				/* used when hash collisions exist */
+/* used when hash collisions exist */
 
 static int linear_search_in_dir_item(struct cpu_key *key,
 				     struct reiserfs_dir_entry *de,
@@ -232,43 +245,50 @@
 	deh += i;
 
 	for (; i >= 0; i--, deh--) {
+		/* hash value does not match, no need to check whole name */
 		if (GET_HASH_VALUE(deh_offset(deh)) !=
 		    GET_HASH_VALUE(cpu_key_k_offset(key))) {
-			// hash value does not match, no need to check whole name
 			return NAME_NOT_FOUND;
 		}
 
-		/* mark, that this generation number is used */
+		/* mark that this generation number is used */
 		if (de->de_gen_number_bit_string)
 			set_bit(GET_GENERATION_NUMBER(deh_offset(deh)),
 				de->de_gen_number_bit_string);
 
-		// calculate pointer to name and namelen
+		/* calculate pointer to name and namelen */
 		de->de_entry_num = i;
 		set_de_name_and_namelen(de);
 
+		/*
+		 * de's de_name, de_namelen, de_recordlen are set.
+		 * Fill the rest.
+		 */
 		if ((retval =
 		     reiserfs_match(de, name, namelen)) != NAME_NOT_FOUND) {
-			// de's de_name, de_namelen, de_recordlen are set. Fill the rest:
 
-			// key of pointed object
+			/* key of pointed object */
 			set_de_object_key(de);
 
 			store_de_entry_key(de);
 
-			// retval can be NAME_FOUND or NAME_FOUND_INVISIBLE
+			/* retval can be NAME_FOUND or NAME_FOUND_INVISIBLE */
 			return retval;
 		}
 	}
 
 	if (GET_GENERATION_NUMBER(le_ih_k_offset(de->de_ih)) == 0)
-		/* we have reached left most entry in the node. In common we
-		   have to go to the left neighbor, but if generation counter
-		   is 0 already, we know for sure, that there is no name with
-		   the same hash value */
-		// FIXME: this work correctly only because hash value can not
-		// be 0. Btw, in case of Yura's hash it is probably possible,
-		// so, this is a bug
+		/*
+		 * we have reached left most entry in the node. In common we
+		 * have to go to the left neighbor, but if generation counter
+		 * is 0 already, we know for sure, that there is no name with
+		 * the same hash value
+		 */
+		/*
+		 * FIXME: this work correctly only because hash value can not
+		 *  be 0. Btw, in case of Yura's hash it is probably possible,
+		 * so, this is a bug
+		 */
 		return NAME_NOT_FOUND;
 
 	RFALSE(de->de_item_num,
@@ -277,8 +297,10 @@
 	return GOTO_PREVIOUS_ITEM;
 }
 
-// may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
-// FIXME: should add something like IOERROR
+/*
+ * may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
+ * FIXME: should add something like IOERROR
+ */
 static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
 			       struct treepath *path_to_entry,
 			       struct reiserfs_dir_entry *de)
@@ -307,13 +329,19 @@
 		retval =
 		    linear_search_in_dir_item(&key_to_search, de, name,
 					      namelen);
+		/*
+		 * there is no need to scan directory anymore.
+		 * Given entry found or does not exist
+		 */
 		if (retval != GOTO_PREVIOUS_ITEM) {
-			/* there is no need to scan directory anymore. Given entry found or does not exist */
 			path_to_entry->pos_in_item = de->de_entry_num;
 			return retval;
 		}
 
-		/* there is left neighboring item of this directory and given entry can be there */
+		/*
+		 * there is left neighboring item of this directory
+		 * and given entry can be there
+		 */
 		set_cpu_key_k_offset(&key_to_search,
 				     le_ih_k_offset(de->de_ih) - 1);
 		pathrelse(path_to_entry);
@@ -347,8 +375,10 @@
 			return ERR_PTR(-EACCES);
 		}
 
-		/* Propagate the private flag so we know we're
-		 * in the priv tree */
+		/*
+		 * Propagate the private flag so we know we're
+		 * in the priv tree
+		 */
 		if (IS_PRIVATE(dir))
 			inode->i_flags |= S_PRIVATE;
 	}
@@ -361,9 +391,9 @@
 }
 
 /*
-** looks up the dentry of the parent directory for child.
-** taken from ext2_get_parent
-*/
+ * looks up the dentry of the parent directory for child.
+ * taken from ext2_get_parent
+ */
 struct dentry *reiserfs_get_parent(struct dentry *child)
 {
 	int retval;
@@ -406,8 +436,13 @@
 	struct reiserfs_dir_entry de;
 	DECLARE_BITMAP(bit_string, MAX_GENERATION_NUMBER + 1);
 	int gen_number;
-	char small_buf[32 + DEH_SIZE];	/* 48 bytes now and we avoid kmalloc
-					   if we create file with short name */
+
+	/*
+	 * 48 bytes now and we avoid kmalloc if we
+	 * create file with short name
+	 */
+	char small_buf[32 + DEH_SIZE];
+
 	char *buffer;
 	int buflen, paste_size;
 	int retval;
@@ -439,21 +474,30 @@
 	    (get_inode_sd_version(dir) ==
 	     STAT_DATA_V1) ? (DEH_SIZE + namelen) : buflen;
 
-	/* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */
+	/*
+	 * fill buffer : directory entry head, name[, dir objectid | ,
+	 * stat data | ,stat data, dir objectid ]
+	 */
 	deh = (struct reiserfs_de_head *)buffer;
 	deh->deh_location = 0;	/* JDM Endian safe if 0 */
 	put_deh_offset(deh, cpu_key_k_offset(&entry_key));
 	deh->deh_state = 0;	/* JDM Endian safe if 0 */
 	/* put key (ino analog) to de */
-	deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id;	/* safe: k_dir_id is le */
-	deh->deh_objectid = INODE_PKEY(inode)->k_objectid;	/* safe: k_objectid is le */
+
+	/* safe: k_dir_id is le */
+	deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id;
+	/* safe: k_objectid is le */
+	deh->deh_objectid = INODE_PKEY(inode)->k_objectid;
 
 	/* copy name */
 	memcpy((char *)(deh + 1), name, namelen);
 	/* padd by 0s to the 4 byte boundary */
 	padd_item((char *)(deh + 1), ROUND_UP(namelen), namelen);
 
-	/* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */
+	/*
+	 * entry is ready to be pasted into tree, set 'visibility'
+	 * and 'stat data in entry' attributes
+	 */
 	mark_de_without_sd(deh);
 	visible ? mark_de_visible(deh) : mark_de_hidden(deh);
 
@@ -499,7 +543,8 @@
 	/* update max-hash-collisions counter in reiserfs_sb_info */
 	PROC_INFO_MAX(th->t_super, max_hash_collisions, gen_number);
 
-	if (gen_number != 0) {	/* we need to re-search for the insertion point */
+	/* we need to re-search for the insertion point */
+	if (gen_number != 0) {
 		if (search_by_entry_key(dir->i_sb, &entry_key, &path, &de) !=
 		    NAME_NOT_FOUND) {
 			reiserfs_warning(dir->i_sb, "vs-7032",
@@ -527,18 +572,19 @@
 	dir->i_size += paste_size;
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
 	if (!S_ISDIR(inode->i_mode) && visible)
-		// reiserfs_mkdir or reiserfs_rename will do that by itself
+		/* reiserfs_mkdir or reiserfs_rename will do that by itself */
 		reiserfs_update_sd(th, dir);
 
 	reiserfs_check_path(&path);
 	return 0;
 }
 
-/* quota utility function, call if you've had to abort after calling
-** new_inode_init, and have not called reiserfs_new_inode yet.
-** This should only be called on inodes that do not have stat data
-** inserted into the tree yet.
-*/
+/*
+ * quota utility function, call if you've had to abort after calling
+ * new_inode_init, and have not called reiserfs_new_inode yet.
+ * This should only be called on inodes that do not have stat data
+ * inserted into the tree yet.
+ */
 static int drop_new_inode(struct inode *inode)
 {
 	dquot_drop(inode);
@@ -548,18 +594,23 @@
 	return 0;
 }
 
-/* utility function that does setup for reiserfs_new_inode.
-** dquot_initialize needs lots of credits so it's better to have it
-** outside of a transaction, so we had to pull some bits of
-** reiserfs_new_inode out into this func.
-*/
+/*
+ * utility function that does setup for reiserfs_new_inode.
+ * dquot_initialize needs lots of credits so it's better to have it
+ * outside of a transaction, so we had to pull some bits of
+ * reiserfs_new_inode out into this func.
+ */
 static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
 {
-	/* Make inode invalid - just in case we are going to drop it before
-	 * the initialization happens */
+	/*
+	 * Make inode invalid - just in case we are going to drop it before
+	 * the initialization happens
+	 */
 	INODE_PKEY(inode)->k_objectid = 0;
-	/* the quota init calls have to know who to charge the quota to, so
-	 ** we have to set uid and gid here
+
+	/*
+	 * the quota init calls have to know who to charge the quota to, so
+	 * we have to set uid and gid here
 	 */
 	inode_init_owner(inode, dir, mode);
 	dquot_initialize(inode);
@@ -571,7 +622,10 @@
 {
 	int retval;
 	struct inode *inode;
-	/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+	/*
+	 * We need blocks for transaction + (user+group)*(quotas
+	 * for new inode + update of quota for directory owner)
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 2 +
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -644,7 +698,10 @@
 	struct inode *inode;
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
-	/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+	/*
+	 * We need blocks for transaction + (user+group)*(quotas
+	 * for new inode + update of quota for directory owner)
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -685,7 +742,7 @@
 	inode->i_op = &reiserfs_special_inode_operations;
 	init_special_inode(inode, inode->i_mode, rdev);
 
-	//FIXME: needed for block and char devices only
+	/* FIXME: needed for block and char devices only */
 	reiserfs_update_sd(&th, inode);
 
 	reiserfs_update_inode_transaction(inode);
@@ -721,7 +778,10 @@
 	struct inode *inode;
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
-	/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+	/*
+	 * We need blocks for transaction + (user+group)*(quotas
+	 * for new inode + update of quota for directory owner)
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -730,7 +790,10 @@
 	dquot_initialize(dir);
 
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
-	/* set flag that new packing locality created and new blocks for the content     * of that directory are not displaced yet */
+	/*
+	 * set flag that new packing locality created and new blocks
+	 * for the content of that directory are not displaced yet
+	 */
 	REISERFS_I(dir)->new_packing_locality = 1;
 #endif
 	mode = S_IFDIR | mode;
@@ -754,8 +817,9 @@
 		goto out_failed;
 	}
 
-	/* inc the link count now, so another writer doesn't overflow it while
-	 ** we sleep later on.
+	/*
+	 * inc the link count now, so another writer doesn't overflow
+	 * it while we sleep later on.
 	 */
 	INC_DIR_INODE_NLINK(dir)
 
@@ -774,7 +838,7 @@
 	inode->i_op = &reiserfs_dir_inode_operations;
 	inode->i_fop = &reiserfs_dir_operations;
 
-	// note, _this_ add_entry will not update dir's stat data
+	/* note, _this_ add_entry will not update dir's stat data */
 	retval =
 	    reiserfs_add_entry(&th, dir, dentry->d_name.name,
 			       dentry->d_name.len, inode, 1 /*visible */ );
@@ -790,7 +854,7 @@
 		iput(inode);
 		goto out_failed;
 	}
-	// the above add_entry did not update dir's stat data
+	/* the above add_entry did not update dir's stat data */
 	reiserfs_update_sd(&th, dir);
 
 	unlock_new_inode(inode);
@@ -803,10 +867,11 @@
 
 static inline int reiserfs_empty_dir(struct inode *inode)
 {
-	/* we can cheat because an old format dir cannot have
-	 ** EMPTY_DIR_SIZE, and a new format dir cannot have
-	 ** EMPTY_DIR_SIZE_V1.  So, if the inode is either size,
-	 ** regardless of disk format version, the directory is empty.
+	/*
+	 * we can cheat because an old format dir cannot have
+	 * EMPTY_DIR_SIZE, and a new format dir cannot have
+	 * EMPTY_DIR_SIZE_V1.  So, if the inode is either size,
+	 * regardless of disk format version, the directory is empty.
 	 */
 	if (inode->i_size != EMPTY_DIR_SIZE &&
 	    inode->i_size != EMPTY_DIR_SIZE_V1) {
@@ -824,10 +889,12 @@
 	INITIALIZE_PATH(path);
 	struct reiserfs_dir_entry de;
 
-	/* we will be doing 2 balancings and update 2 stat data, we change quotas
-	 * of the owner of the directory and of the owner of the parent directory.
-	 * The quota structure is possibly deleted only on last iput => outside
-	 * of this transaction */
+	/*
+	 * we will be doing 2 balancings and update 2 stat data, we
+	 * change quotas of the owner of the directory and of the owner
+	 * of the parent directory.  The quota structure is possibly
+	 * deleted only on last iput => outside of this transaction
+	 */
 	jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -856,8 +923,9 @@
 	reiserfs_update_inode_transaction(dir);
 
 	if (de.de_objectid != inode->i_ino) {
-		// FIXME: compare key of an object and a key found in the
-		// entry
+		/*
+		 * FIXME: compare key of an object and a key found in the entry
+		 */
 		retval = -EIO;
 		goto end_rmdir;
 	}
@@ -895,9 +963,11 @@
 	return retval;
 
       end_rmdir:
-	/* we must release path, because we did not call
-	   reiserfs_cut_from_item, or reiserfs_cut_from_item does not
-	   release path if operation was not complete */
+	/*
+	 * we must release path, because we did not call
+	 * reiserfs_cut_from_item, or reiserfs_cut_from_item does not
+	 * release path if operation was not complete
+	 */
 	pathrelse(&path);
 	err = journal_end(&th, dir->i_sb, jbegin_count);
 	reiserfs_write_unlock(dir->i_sb);
@@ -918,10 +988,13 @@
 
 	inode = dentry->d_inode;
 
-	/* in this transaction we can be doing at max two balancings and update
-	 * two stat datas, we change quotas of the owner of the directory and of
-	 * the owner of the parent directory. The quota structure is possibly
-	 * deleted only on iput => outside of this transaction */
+	/*
+	 * in this transaction we can be doing at max two balancings and
+	 * update two stat datas, we change quotas of the owner of the
+	 * directory and of the owner of the parent directory. The quota
+	 * structure is possibly deleted only on iput => outside of
+	 * this transaction
+	 */
 	jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -946,8 +1019,9 @@
 	reiserfs_update_inode_transaction(dir);
 
 	if (de.de_objectid != inode->i_ino) {
-		// FIXME: compare key of an object and a key found in the
-		// entry
+		/*
+		 * FIXME: compare key of an object and a key found in the entry
+		 */
 		retval = -EIO;
 		goto end_unlink;
 	}
@@ -1011,7 +1085,10 @@
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
 	int mode = S_IFLNK | S_IRWXUGO;
-	/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+	/*
+	 * We need blocks for transaction + (user+group)*(quotas for
+	 * new inode + update of quota for directory owner)
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
@@ -1070,10 +1147,6 @@
 	inode->i_op = &reiserfs_symlink_inode_operations;
 	inode->i_mapping->a_ops = &reiserfs_address_space_operations;
 
-	// must be sure this inode is written with this transaction
-	//
-	//reiserfs_update_sd (&th, inode, READ_BLOCKS);
-
 	retval = reiserfs_add_entry(&th, parent_dir, dentry->d_name.name,
 				    dentry->d_name.len, inode, 1 /*visible */ );
 	if (retval) {
@@ -1102,7 +1175,10 @@
 	int retval;
 	struct inode *inode = old_dentry->d_inode;
 	struct reiserfs_transaction_handle th;
-	/* We need blocks for transaction + update of quotas for the owners of the directory */
+	/*
+	 * We need blocks for transaction + update of quotas for
+	 * the owners of the directory
+	 */
 	int jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -1111,7 +1187,7 @@
 
 	reiserfs_write_lock(dir->i_sb);
 	if (inode->i_nlink >= REISERFS_LINK_MAX) {
-		//FIXME: sd_nlink is 32 bit for new files
+		/* FIXME: sd_nlink is 32 bit for new files */
 		reiserfs_write_unlock(dir->i_sb);
 		return -EMLINK;
 	}
@@ -1158,9 +1234,9 @@
 {
 	struct reiserfs_dir_entry tmp = *de;
 
-	// recalculate pointer to name and name length
+	/* recalculate pointer to name and name length */
 	set_de_name_and_namelen(&tmp);
-	// FIXME: could check more
+	/* FIXME: could check more */
 	if (tmp.de_namelen != len || memcmp(name, de->de_name, len))
 		return 0;
 	return 1;
@@ -1217,14 +1293,16 @@
 	unsigned long savelink = 1;
 	struct timespec ctime;
 
-	/* three balancings: (1) old name removal, (2) new name insertion
-	   and (3) maybe "save" link insertion
-	   stat data updates: (1) old directory,
-	   (2) new directory and (3) maybe old object stat data (when it is
-	   directory) and (4) maybe stat data of object to which new entry
-	   pointed initially and (5) maybe block containing ".." of
-	   renamed directory
-	   quota updates: two parent directories */
+	/*
+	 * three balancings: (1) old name removal, (2) new name insertion
+	 * and (3) maybe "save" link insertion
+	 * stat data updates: (1) old directory,
+	 * (2) new directory and (3) maybe old object stat data (when it is
+	 * directory) and (4) maybe stat data of object to which new entry
+	 * pointed initially and (5) maybe block containing ".." of
+	 * renamed directory
+	 * quota updates: two parent directories
+	 */
 	jbegin_count =
 	    JOURNAL_PER_BALANCE_CNT * 3 + 5 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
@@ -1235,8 +1313,10 @@
 	old_inode = old_dentry->d_inode;
 	new_dentry_inode = new_dentry->d_inode;
 
-	// make sure, that oldname still exists and points to an object we
-	// are going to rename
+	/*
+	 * make sure that oldname still exists and points to an object we
+	 * are going to rename
+	 */
 	old_de.de_gen_number_bit_string = NULL;
 	reiserfs_write_lock(old_dir->i_sb);
 	retval =
@@ -1256,10 +1336,11 @@
 
 	old_inode_mode = old_inode->i_mode;
 	if (S_ISDIR(old_inode_mode)) {
-		// make sure, that directory being renamed has correct ".."
-		// and that its new parent directory has not too many links
-		// already
-
+		/*
+		 * make sure that directory being renamed has correct ".."
+		 * and that its new parent directory has not too many links
+		 * already
+		 */
 		if (new_dentry_inode) {
 			if (!reiserfs_empty_dir(new_dentry_inode)) {
 				reiserfs_write_unlock(old_dir->i_sb);
@@ -1267,8 +1348,9 @@
 			}
 		}
 
-		/* directory is renamed, its parent directory will be changed,
-		 ** so find ".." entry
+		/*
+		 * directory is renamed, its parent directory will be changed,
+		 * so find ".." entry
 		 */
 		dot_dot_de.de_gen_number_bit_string = NULL;
 		retval =
@@ -1311,8 +1393,9 @@
 	reiserfs_update_inode_transaction(old_dir);
 	reiserfs_update_inode_transaction(new_dir);
 
-	/* this makes it so an fsync on an open fd for the old name will
-	 ** commit the rename operation
+	/*
+	 * this makes it so an fsync on an open fd for the old name will
+	 * commit the rename operation
 	 */
 	reiserfs_update_inode_transaction(old_inode);
 
@@ -1320,7 +1403,10 @@
 		reiserfs_update_inode_transaction(new_dentry_inode);
 
 	while (1) {
-		// look for old name using corresponding entry key (found by reiserfs_find_entry)
+		/*
+		 * look for old name using corresponding entry key
+		 * (found by reiserfs_find_entry)
+		 */
 		if ((retval =
 		     search_by_entry_key(new_dir->i_sb, &old_de.de_entry_key,
 					 &old_entry_path,
@@ -1335,14 +1421,18 @@
 
 		reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1);
 
-		// look for new name by reiserfs_find_entry
+		/* look for new name by reiserfs_find_entry */
 		new_de.de_gen_number_bit_string = NULL;
 		retval =
 		    reiserfs_find_entry(new_dir, new_dentry->d_name.name,
 					new_dentry->d_name.len, &new_entry_path,
 					&new_de);
-		// reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from
-		// reiserfs_add_entry above, and we'll catch any i/o errors before we get here.
+		/*
+		 * reiserfs_add_entry should not return IO_ERROR,
+		 * because it is called with essentially same parameters from
+		 * reiserfs_add_entry above, and we'll catch any i/o errors
+		 * before we get here.
+		 */
 		if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) {
 			pathrelse(&new_entry_path);
 			pathrelse(&old_entry_path);
@@ -1370,22 +1460,26 @@
 			}
 			copy_item_head(&dot_dot_ih,
 				       tp_item_head(&dot_dot_entry_path));
-			// node containing ".." gets into transaction
+			/* node containing ".." gets into transaction */
 			reiserfs_prepare_for_journal(old_inode->i_sb,
 						     dot_dot_de.de_bh, 1);
 		}
-		/* we should check seals here, not do
-		   this stuff, yes? Then, having
-		   gathered everything into RAM we
-		   should lock the buffers, yes?  -Hans */
-		/* probably.  our rename needs to hold more
-		 ** than one path at once.  The seals would
-		 ** have to be written to deal with multi-path
-		 ** issues -chris
+		/*
+		 * we should check seals here, not do
+		 * this stuff, yes? Then, having
+		 * gathered everything into RAM we
+		 * should lock the buffers, yes?  -Hans
 		 */
-		/* sanity checking before doing the rename - avoid races many
-		 ** of the above checks could have scheduled.  We have to be
-		 ** sure our items haven't been shifted by another process.
+		/*
+		 * probably.  our rename needs to hold more
+		 * than one path at once.  The seals would
+		 * have to be written to deal with multi-path
+		 * issues -chris
+		 */
+		/*
+		 * sanity checking before doing the rename - avoid races many
+		 * of the above checks could have scheduled.  We have to be
+		 * sure our items haven't been shifted by another process.
 		 */
 		if (item_moved(&new_entry_ih, &new_entry_path) ||
 		    !entry_points_to_object(new_dentry->d_name.name,
@@ -1430,8 +1524,10 @@
 		break;
 	}
 
-	/* ok, all the changes can be done in one fell swoop when we
-	   have claimed all the buffers needed. */
+	/*
+	 * ok, all the changes can be done in one fell swoop when we
+	 * have claimed all the buffers needed.
+	 */
 
 	mark_de_visible(new_de.de_deh + new_de.de_entry_num);
 	set_ino_in_dir_entry(&new_de, INODE_PKEY(old_inode));
@@ -1442,12 +1538,14 @@
 	ctime = CURRENT_TIME_SEC;
 	old_dir->i_ctime = old_dir->i_mtime = ctime;
 	new_dir->i_ctime = new_dir->i_mtime = ctime;
-	/* thanks to Alex Adriaanse <alex_a@caltech.edu> for patch which adds ctime update of
-	   renamed object */
+	/*
+	 * thanks to Alex Adriaanse <alex_a@caltech.edu> for patch
+	 * which adds ctime update of renamed object
+	 */
 	old_inode->i_ctime = ctime;
 
 	if (new_dentry_inode) {
-		// adjust link number of the victim
+		/* adjust link number of the victim */
 		if (S_ISDIR(new_dentry_inode->i_mode)) {
 			clear_nlink(new_dentry_inode);
 		} else {
@@ -1462,21 +1560,28 @@
 		set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
 		journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
 
+		/*
+		 * there (in new_dir) was no directory, so it got new link
+		 * (".."  of renamed directory)
+		 */
 		if (!new_dentry_inode)
-			/* there (in new_dir) was no directory, so it got new link
-			   (".."  of renamed directory) */
 			INC_DIR_INODE_NLINK(new_dir);
 
 		/* old directory lost one link - ".. " of renamed directory */
 		DEC_DIR_INODE_NLINK(old_dir);
 	}
-	// looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
+	/*
+	 * looks like in 2.3.99pre3 brelse is atomic.
+	 * so we can use pathrelse
+	 */
 	pathrelse(&new_entry_path);
 	pathrelse(&dot_dot_entry_path);
 
-	// FIXME: this reiserfs_cut_from_item's return value may screw up
-	// anybody, but it will panic if will not be able to find the
-	// entry. This needs one more clean up
+	/*
+	 * FIXME: this reiserfs_cut_from_item's return value may screw up
+	 * anybody, but it will panic if will not be able to find the
+	 * entry. This needs one more clean up
+	 */
 	if (reiserfs_cut_from_item
 	    (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL,
 	     0) < 0)
@@ -1501,11 +1606,8 @@
 	return retval;
 }
 
-/*
- * directories can handle most operations...
- */
+/* directories can handle most operations...  */
 const struct inode_operations reiserfs_dir_inode_operations = {
-	//&reiserfs_dir_operations,   /* default_file_ops */
 	.create = reiserfs_create,
 	.lookup = reiserfs_lookup,
 	.link = reiserfs_link,
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index f732d6a..99f66f8 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -7,7 +7,7 @@
 #include <linux/time.h>
 #include "reiserfs.h"
 
-// find where objectid map starts
+/* find where objectid map starts */
 #define objectid_map(s,rs) (old_format_only (s) ? \
                          (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
 			 (__le32 *)((rs) + 1))
@@ -20,7 +20,7 @@
 		reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
 			       (long unsigned int)le32_to_cpu(map[0]));
 
-	// FIXME: add something else here
+	/* FIXME: add something else here */
 }
 
 #else
@@ -29,19 +29,21 @@
 }
 #endif
 
-/* When we allocate objectids we allocate the first unused objectid.
-   Each sequence of objectids in use (the odd sequences) is followed
-   by a sequence of objectids not in use (the even sequences).  We
-   only need to record the last objectid in each of these sequences
-   (both the odd and even sequences) in order to fully define the
-   boundaries of the sequences.  A consequence of allocating the first
-   objectid not in use is that under most conditions this scheme is
-   extremely compact.  The exception is immediately after a sequence
-   of operations which deletes a large number of objects of
-   non-sequential objectids, and even then it will become compact
-   again as soon as more objects are created.  Note that many
-   interesting optimizations of layout could result from complicating
-   objectid assignment, but we have deferred making them for now. */
+/*
+ * When we allocate objectids we allocate the first unused objectid.
+ * Each sequence of objectids in use (the odd sequences) is followed
+ * by a sequence of objectids not in use (the even sequences).  We
+ * only need to record the last objectid in each of these sequences
+ * (both the odd and even sequences) in order to fully define the
+ * boundaries of the sequences.  A consequence of allocating the first
+ * objectid not in use is that under most conditions this scheme is
+ * extremely compact.  The exception is immediately after a sequence
+ * of operations which deletes a large number of objects of
+ * non-sequential objectids, and even then it will become compact
+ * again as soon as more objects are created.  Note that many
+ * interesting optimizations of layout could result from complicating
+ * objectid assignment, but we have deferred making them for now.
+ */
 
 /* get unique object identifier */
 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
@@ -64,19 +66,23 @@
 		return 0;
 	}
 
-	/* This incrementation allocates the first unused objectid. That
-	   is to say, the first entry on the objectid map is the first
-	   unused objectid, and by incrementing it we use it.  See below
-	   where we check to see if we eliminated a sequence of unused
-	   objectids.... */
+	/*
+	 * This incrementation allocates the first unused objectid. That
+	 * is to say, the first entry on the objectid map is the first
+	 * unused objectid, and by incrementing it we use it.  See below
+	 * where we check to see if we eliminated a sequence of unused
+	 * objectids....
+	 */
 	map[1] = cpu_to_le32(unused_objectid + 1);
 
-	/* Now we check to see if we eliminated the last remaining member of
-	   the first even sequence (and can eliminate the sequence by
-	   eliminating its last objectid from oids), and can collapse the
-	   first two odd sequences into one sequence.  If so, then the net
-	   result is to eliminate a pair of objectids from oids.  We do this
-	   by shifting the entire map to the left. */
+	/*
+	 * Now we check to see if we eliminated the last remaining member of
+	 * the first even sequence (and can eliminate the sequence by
+	 * eliminating its last objectid from oids), and can collapse the
+	 * first two odd sequences into one sequence.  If so, then the net
+	 * result is to eliminate a pair of objectids from oids.  We do this
+	 * by shifting the entire map to the left.
+	 */
 	if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
 		memmove(map + 1, map + 3,
 			(sb_oid_cursize(rs) - 3) * sizeof(__u32));
@@ -97,30 +103,33 @@
 	int i = 0;
 
 	BUG_ON(!th->t_trans_id);
-	//return;
+	/*return; */
 	check_objectid_map(s, map);
 
 	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
 	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
 
-	/* start at the beginning of the objectid map (i = 0) and go to
-	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
-	   what we use, though it is possible that binary search would be
-	   more efficient after performing lots of deletions (which is
-	   when oids is large.)  We only check even i's. */
+	/*
+	 * start at the beginning of the objectid map (i = 0) and go to
+	 * the end of it (i = disk_sb->s_oid_cursize).  Linear search is
+	 * what we use, though it is possible that binary search would be
+	 * more efficient after performing lots of deletions (which is
+	 * when oids is large.)  We only check even i's.
+	 */
 	while (i < sb_oid_cursize(rs)) {
 		if (objectid_to_release == le32_to_cpu(map[i])) {
 			/* This incrementation unallocates the objectid. */
-			//map[i]++;
 			le32_add_cpu(&map[i], 1);
 
-			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
+			/*
+			 * Did we unallocate the last member of an
+			 * odd sequence, and can shrink oids?
+			 */
 			if (map[i] == map[i + 1]) {
 				/* shrink objectid map */
 				memmove(map + i, map + i + 2,
 					(sb_oid_cursize(rs) - i -
 					 2) * sizeof(__u32));
-				//disk_sb->s_oid_cursize -= 2;
 				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
 
 				RFALSE(sb_oid_cursize(rs) < 2 ||
@@ -135,14 +144,19 @@
 		    objectid_to_release < le32_to_cpu(map[i + 1])) {
 			/* size of objectid map is not changed */
 			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
-				//objectid_map[i+1]--;
 				le32_add_cpu(&map[i + 1], -1);
 				return;
 			}
 
-			/* JDM comparing two little-endian values for equality -- safe */
+			/*
+			 * JDM comparing two little-endian values for
+			 * equality -- safe
+			 */
+			/*
+			 * objectid map must be expanded, but
+			 * there is no space
+			 */
 			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
-				/* objectid map must be expanded, but there is no space */
 				PROC_INFO_INC(s, leaked_oid);
 				return;
 			}
@@ -178,8 +192,9 @@
 	new_objectid_map = (__le32 *) (disk_sb + 1);
 
 	if (cur_size > new_size) {
-		/* mark everyone used that was listed as free at the end of the objectid
-		 ** map
+		/*
+		 * mark everyone used that was listed as free at
+		 * the end of the objectid map
 		 */
 		objectid_map[new_size - 1] = objectid_map[cur_size - 1];
 		set_sb_oid_cursize(disk_sb, new_size);
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 41f7881..c7425fd 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -172,18 +172,19 @@
 	return k;
 }
 
-/* debugging reiserfs we used to print out a lot of different
-   variables, like keys, item headers, buffer heads etc. Values of
-   most fields matter. So it took a long time just to write
-   appropriative printk. With this reiserfs_warning you can use format
-   specification for complex structures like you used to do with
-   printfs for integers, doubles and pointers. For instance, to print
-   out key structure you have to write just:
-   reiserfs_warning ("bad key %k", key);
-   instead of
-   printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
-           key->k_offset, key->k_uniqueness);
-*/
+/*
+ * debugging reiserfs we used to print out a lot of different
+ * variables, like keys, item headers, buffer heads etc. Values of
+ * most fields matter. So it took a long time just to write
+ * appropriative printk. With this reiserfs_warning you can use format
+ * specification for complex structures like you used to do with
+ * printfs for integers, doubles and pointers. For instance, to print
+ * out key structure you have to write just:
+ * reiserfs_warning ("bad key %k", key);
+ * instead of
+ * printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
+ *         key->k_offset, key->k_uniqueness);
+ */
 static DEFINE_SPINLOCK(error_lock);
 static void prepare_error_buf(const char *fmt, va_list args)
 {
@@ -243,15 +244,16 @@
 
 }
 
-/* in addition to usual conversion specifiers this accepts reiserfs
-   specific conversion specifiers:
-   %k to print little endian key,
-   %K to print cpu key,
-   %h to print item_head,
-   %t to print directory entry
-   %z to print block head (arg must be struct buffer_head *
-   %b to print buffer_head
-*/
+/*
+ * in addition to usual conversion specifiers this accepts reiserfs
+ * specific conversion specifiers:
+ * %k to print little endian key,
+ * %K to print cpu key,
+ * %h to print item_head,
+ * %t to print directory entry
+ * %z to print block head (arg must be struct buffer_head *
+ * %b to print buffer_head
+ */
 
 #define do_reiserfs_warning(fmt)\
 {\
@@ -304,50 +306,52 @@
 #endif
 }
 
-/* The format:
-
-           maintainer-errorid: [function-name:] message
-
-    where errorid is unique to the maintainer and function-name is
-    optional, is recommended, so that anyone can easily find the bug
-    with a simple grep for the short to type string
-    maintainer-errorid.  Don't bother with reusing errorids, there are
-    lots of numbers out there.
-
-    Example:
-
-    reiserfs_panic(
-	p_sb, "reiser-29: reiserfs_new_blocknrs: "
-	"one of search_start or rn(%d) is equal to MAX_B_NUM,"
-	"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
-	rn, bh
-    );
-
-    Regular panic()s sometimes clear the screen before the message can
-    be read, thus the need for the while loop.
-
-    Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
-    pointless complexity):
-
-    panics in reiserfs.h have numbers from 1000 to 1999
-    super.c				        2000 to 2999
-    preserve.c (unused)			    3000 to 3999
-    bitmap.c				    4000 to 4999
-    stree.c				        5000 to 5999
-    prints.c				    6000 to 6999
-    namei.c                     7000 to 7999
-    fix_nodes.c                 8000 to 8999
-    dir.c                       9000 to 9999
-	lbalance.c					10000 to 10999
-	ibalance.c		11000 to 11999 not ready
-	do_balan.c		12000 to 12999
-	inode.c			13000 to 13999
-	file.c			14000 to 14999
-    objectid.c                       15000 - 15999
-    buffer.c                         16000 - 16999
-    symlink.c                        17000 - 17999
-
-   .  */
+/*
+ * The format:
+ *
+ *          maintainer-errorid: [function-name:] message
+ *
+ *   where errorid is unique to the maintainer and function-name is
+ *   optional, is recommended, so that anyone can easily find the bug
+ *   with a simple grep for the short to type string
+ *   maintainer-errorid.  Don't bother with reusing errorids, there are
+ *   lots of numbers out there.
+ *
+ *   Example:
+ *
+ *   reiserfs_panic(
+ *     p_sb, "reiser-29: reiserfs_new_blocknrs: "
+ *     "one of search_start or rn(%d) is equal to MAX_B_NUM,"
+ *     "which means that we are optimizing location based on the "
+ *     "bogus location of a temp buffer (%p).",
+ *     rn, bh
+ *   );
+ *
+ *   Regular panic()s sometimes clear the screen before the message can
+ *   be read, thus the need for the while loop.
+ *
+ *   Numbering scheme for panic used by Vladimir and Anatoly( Hans completely
+ *   ignores this scheme, and considers it pointless complexity):
+ *
+ *   panics in reiserfs_fs.h have numbers from 1000 to 1999
+ *   super.c			2000 to 2999
+ *   preserve.c (unused)	3000 to 3999
+ *   bitmap.c			4000 to 4999
+ *   stree.c			5000 to 5999
+ *   prints.c			6000 to 6999
+ *   namei.c			7000 to 7999
+ *   fix_nodes.c		8000 to 8999
+ *   dir.c			9000 to 9999
+ *   lbalance.c			10000 to 10999
+ *   ibalance.c			11000 to 11999 not ready
+ *   do_balan.c			12000 to 12999
+ *   inode.c			13000 to 13999
+ *   file.c			14000 to 14999
+ *   objectid.c			15000 - 15999
+ *   buffer.c			16000 - 16999
+ *   symlink.c			17000 - 17999
+ *
+ *  .  */
 
 void __reiserfs_panic(struct super_block *sb, const char *id,
 		      const char *function, const char *fmt, ...)
@@ -411,9 +415,11 @@
 	reiserfs_abort_journal(sb, errno);
 }
 
-/* this prints internal nodes (4 keys/items in line) (dc_number,
-   dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
-   dc_size)...*/
+/*
+ * this prints internal nodes (4 keys/items in line) (dc_number,
+ * dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
+ * dc_size)...
+ */
 static int print_internal(struct buffer_head *bh, int first, int last)
 {
 	struct reiserfs_key *key;
@@ -543,9 +549,11 @@
 	printk("Block count %u\n", sb_block_count(rs));
 	printk("Blocksize %d\n", sb_blocksize(rs));
 	printk("Free blocks %u\n", sb_free_blocks(rs));
-	// FIXME: this would be confusing if
-	// someone stores reiserfs super block in some data block ;)
+	/*
+	 * FIXME: this would be confusing if
+	 * someone stores reiserfs super block in some data block ;)
 //    skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
+	 */
 	skipped = bh->b_blocknr;
 	data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
 	    (!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
@@ -581,8 +589,8 @@
 
 	return 0;
 }
-
-void print_block(struct buffer_head *bh, ...)	//int print_mode, int first, int last)
+/* ..., int print_mode, int first, int last) */
+void print_block(struct buffer_head *bh, ...)
 {
 	va_list args;
 	int mode, first, last;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 35bfde1..2195e7f 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1,5 +1,6 @@
 /*
- * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details
+ * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for
+ * licensing and copyright details
  */
 
 #include <linux/reiserfs_fs.h>
@@ -23,52 +24,73 @@
 
 struct reiserfs_journal_list;
 
-/** bitmasks for i_flags field in reiserfs-specific part of inode */
+/* bitmasks for i_flags field in reiserfs-specific part of inode */
 typedef enum {
-    /** this says what format of key do all items (but stat data) of
-      an object have.  If this is set, that format is 3.6 otherwise
-      - 3.5 */
+	/*
+	 * this says what format of key do all items (but stat data) of
+	 * an object have.  If this is set, that format is 3.6 otherwise - 3.5
+	 */
 	i_item_key_version_mask = 0x0001,
-    /** If this is unset, object has 3.5 stat data, otherwise, it has
-      3.6 stat data with 64bit size, 32bit nlink etc. */
+
+	/*
+	 * If this is unset, object has 3.5 stat data, otherwise,
+	 * it has 3.6 stat data with 64bit size, 32bit nlink etc.
+	 */
 	i_stat_data_version_mask = 0x0002,
-    /** file might need tail packing on close */
+
+	/* file might need tail packing on close */
 	i_pack_on_close_mask = 0x0004,
-    /** don't pack tail of file */
+
+	/* don't pack tail of file */
 	i_nopack_mask = 0x0008,
-    /** If those is set, "safe link" was created for this file during
-      truncate or unlink. Safe link is used to avoid leakage of disk
-      space on crash with some files open, but unlinked. */
+
+	/*
+	 * If either of these are set, "safe link" was created for this
+	 * file during truncate or unlink. Safe link is used to avoid
+	 * leakage of disk space on crash with some files open, but unlinked.
+	 */
 	i_link_saved_unlink_mask = 0x0010,
 	i_link_saved_truncate_mask = 0x0020,
+
 	i_has_xattr_dir = 0x0040,
 	i_data_log = 0x0080,
 } reiserfs_inode_flags;
 
 struct reiserfs_inode_info {
 	__u32 i_key[4];		/* key is still 4 32 bit integers */
-    /** transient inode flags that are never stored on disk. Bitmasks
-      for this field are defined above. */
+
+	/*
+	 * transient inode flags that are never stored on disk. Bitmasks
+	 * for this field are defined above.
+	 */
 	__u32 i_flags;
 
-	__u32 i_first_direct_byte;	// offset of first byte stored in direct item.
+	/* offset of first byte stored in direct item. */
+	__u32 i_first_direct_byte;
 
 	/* copy of persistent inode flags read from sd_attrs. */
 	__u32 i_attrs;
 
-	int i_prealloc_block;	/* first unused block of a sequence of unused blocks */
+	/* first unused block of a sequence of unused blocks */
+	int i_prealloc_block;
 	int i_prealloc_count;	/* length of that sequence */
-	struct list_head i_prealloc_list;	/* per-transaction list of inodes which
-						 * have preallocated blocks */
 
-	unsigned new_packing_locality:1;	/* new_packig_locality is created; new blocks
-						 * for the contents of this directory should be
-						 * displaced */
+	/* per-transaction list of inodes which  have preallocated blocks */
+	struct list_head i_prealloc_list;
 
-	/* we use these for fsync or O_SYNC to decide which transaction
-	 ** needs to be committed in order for this inode to be properly
-	 ** flushed */
+	/*
+	 * new_packing_locality is created; new blocks for the contents
+	 * of this directory should be displaced
+	 */
+	unsigned new_packing_locality:1;
+
+	/*
+	 * we use these for fsync or O_SYNC to decide which transaction
+	 * needs to be committed in order for this inode to be properly
+	 * flushed
+	 */
 	unsigned int i_trans_id;
+
 	struct reiserfs_journal_list *i_jl;
 	atomic_t openers;
 	struct mutex tailpack;
@@ -82,9 +104,10 @@
 	reiserfs_attrs_cleared = 0x00000001,
 } reiserfs_super_block_flags;
 
-/* struct reiserfs_super_block accessors/mutators
- * since this is a disk structure, it will always be in
- * little endian format. */
+/*
+ * struct reiserfs_super_block accessors/mutators since this is a disk
+ * structure, it will always be in little endian format.
+ */
 #define sb_block_count(sbp)         (le32_to_cpu((sbp)->s_v1.s_block_count))
 #define set_sb_block_count(sbp,v)   ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
 #define sb_free_blocks(sbp)         (le32_to_cpu((sbp)->s_v1.s_free_blocks))
@@ -152,48 +175,61 @@
 
 /* LOGGING -- */
 
-/* These all interelate for performance.
-**
-** If the journal block count is smaller than n transactions, you lose speed.
-** I don't know what n is yet, I'm guessing 8-16.
-**
-** typical transaction size depends on the application, how often fsync is
-** called, and how many metadata blocks you dirty in a 30 second period.
-** The more small files (<16k) you use, the larger your transactions will
-** be.
-**
-** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
-** to wrap, which slows things down.  If you need high speed meta data updates, the journal should be big enough
-** to prevent wrapping before dirty meta blocks get to disk.
-**
-** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal
-** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping.
-**
-** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash.
-**
-*/
+/*
+ * These all interelate for performance.
+ *
+ * If the journal block count is smaller than n transactions, you lose speed.
+ * I don't know what n is yet, I'm guessing 8-16.
+ *
+ * typical transaction size depends on the application, how often fsync is
+ * called, and how many metadata blocks you dirty in a 30 second period.
+ * The more small files (<16k) you use, the larger your transactions will
+ * be.
+ *
+ * If your journal fills faster than dirty buffers get flushed to disk, it
+ * must flush them before allowing the journal to wrap, which slows things
+ * down.  If you need high speed meta data updates, the journal should be
+ * big enough to prevent wrapping before dirty meta blocks get to disk.
+ *
+ * If the batch max is smaller than the transaction max, you'll waste space
+ * at the end of the journal because journal_end sets the next transaction
+ * to start at 0 if the next transaction has any chance of wrapping.
+ *
+ * The large the batch max age, the better the speed, and the more meta
+ * data changes you'll lose after a crash.
+ */
 
 /* don't mess with these for a while */
-				/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
+/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
 #define JOURNAL_BLOCK_SIZE  4096	/* BUG gotta get rid of this */
 #define JOURNAL_MAX_CNODE   1500	/* max cnodes to allocate. */
 #define JOURNAL_HASH_SIZE 8192
-#define JOURNAL_NUM_BITMAPS 5	/* number of copies of the bitmaps to have floating.  Must be >= 2 */
 
-/* One of these for every block in every transaction
-** Each one is in two hash tables.  First, a hash of the current transaction, and after journal_end, a
-** hash of all the in memory transactions.
-** next and prev are used by the current transaction (journal_hash).
-** hnext and hprev are used by journal_list_hash.  If a block is in more than one transaction, the journal_list_hash
-** links it in multiple times.  This allows flush_journal_list to remove just the cnode belonging
-** to a given transaction.
-*/
+/* number of copies of the bitmaps to have floating.  Must be >= 2 */
+#define JOURNAL_NUM_BITMAPS 5
+
+/*
+ * One of these for every block in every transaction
+ * Each one is in two hash tables.  First, a hash of the current transaction,
+ * and after journal_end, a hash of all the in memory transactions.
+ * next and prev are used by the current transaction (journal_hash).
+ * hnext and hprev are used by journal_list_hash.  If a block is in more
+ * than one transaction, the journal_list_hash links it in multiple times.
+ * This allows flush_journal_list to remove just the cnode belonging to a
+ * given transaction.
+ */
 struct reiserfs_journal_cnode {
 	struct buffer_head *bh;	/* real buffer head */
 	struct super_block *sb;	/* dev of real buffer head */
-	__u32 blocknr;		/* block number of real buffer head, == 0 when buffer on disk */
+
+	/* block number of real buffer head, == 0 when buffer on disk */
+	__u32 blocknr;
+
 	unsigned long state;
-	struct reiserfs_journal_list *jlist;	/* journal list this cnode lives in */
+
+	/* journal list this cnode lives in */
+	struct reiserfs_journal_list *jlist;
+
 	struct reiserfs_journal_cnode *next;	/* next in transaction list */
 	struct reiserfs_journal_cnode *prev;	/* prev in transaction list */
 	struct reiserfs_journal_cnode *hprev;	/* prev in hash list */
@@ -212,18 +248,22 @@
 };
 
 /*
-** one of these for each transaction.  The most important part here is the j_realblock.
-** this list of cnodes is used to hash all the blocks in all the commits, to mark all the
-** real buffer heads dirty once all the commits hit the disk,
-** and to make sure every real block in a transaction is on disk before allowing the log area
-** to be overwritten */
+ * one of these for each transaction.  The most important part here is the
+ * j_realblock.  this list of cnodes is used to hash all the blocks in all
+ * the commits, to mark all the real buffer heads dirty once all the commits
+ * hit the disk, and to make sure every real block in a transaction is on
+ * disk before allowing the log area to be overwritten
+ */
 struct reiserfs_journal_list {
 	unsigned long j_start;
 	unsigned long j_state;
 	unsigned long j_len;
 	atomic_t j_nonzerolen;
 	atomic_t j_commit_left;
-	atomic_t j_older_commits_done;	/* all commits older than this on disk */
+
+	/* all commits older than this on disk */
+	atomic_t j_older_commits_done;
+
 	struct mutex j_commit_mutex;
 	unsigned int j_trans_id;
 	time_t j_timestamp;
@@ -234,11 +274,15 @@
 	/* time ordered list of all active transactions */
 	struct list_head j_list;
 
-	/* time ordered list of all transactions we haven't tried to flush yet */
+	/*
+	 * time ordered list of all transactions we haven't tried
+	 * to flush yet
+	 */
 	struct list_head j_working_list;
 
 	/* list of tail conversion targets in need of flush before commit */
 	struct list_head j_tail_bh_list;
+
 	/* list of data=ordered buffers in need of flush before commit */
 	struct list_head j_bh_list;
 	int j_refcount;
@@ -246,46 +290,83 @@
 
 struct reiserfs_journal {
 	struct buffer_head **j_ap_blocks;	/* journal blocks on disk */
-	struct reiserfs_journal_cnode *j_last;	/* newest journal block */
-	struct reiserfs_journal_cnode *j_first;	/*  oldest journal block.  start here for traverse */
+	/* newest journal block */
+	struct reiserfs_journal_cnode *j_last;
+
+	/* oldest journal block.  start here for traverse */
+	struct reiserfs_journal_cnode *j_first;
 
 	struct block_device *j_dev_bd;
 	fmode_t j_dev_mode;
-	int j_1st_reserved_block;	/* first block on s_dev of reserved area journal */
+
+	/* first block on s_dev of reserved area journal */
+	int j_1st_reserved_block;
 
 	unsigned long j_state;
 	unsigned int j_trans_id;
 	unsigned long j_mount_id;
-	unsigned long j_start;	/* start of current waiting commit (index into j_ap_blocks) */
+
+	/* start of current waiting commit (index into j_ap_blocks) */
+	unsigned long j_start;
 	unsigned long j_len;	/* length of current waiting commit */
-	unsigned long j_len_alloc;	/* number of buffers requested by journal_begin() */
+
+	/* number of buffers requested by journal_begin() */
+	unsigned long j_len_alloc;
+
 	atomic_t j_wcount;	/* count of writers for current commit */
-	unsigned long j_bcount;	/* batch count. allows turning X transactions into 1 */
-	unsigned long j_first_unflushed_offset;	/* first unflushed transactions offset */
-	unsigned j_last_flush_trans_id;	/* last fully flushed journal timestamp */
+
+	/* batch count. allows turning X transactions into 1 */
+	unsigned long j_bcount;
+
+	/* first unflushed transactions offset */
+	unsigned long j_first_unflushed_offset;
+
+	/* last fully flushed journal timestamp */
+	unsigned j_last_flush_trans_id;
+
 	struct buffer_head *j_header_bh;
 
 	time_t j_trans_start_time;	/* time this transaction started */
 	struct mutex j_mutex;
 	struct mutex j_flush_mutex;
-	wait_queue_head_t j_join_wait;	/* wait for current transaction to finish before starting new one */
-	atomic_t j_jlock;	/* lock for j_join_wait */
+
+	/* wait for current transaction to finish before starting new one */
+	wait_queue_head_t j_join_wait;
+
+	atomic_t j_jlock;		/* lock for j_join_wait */
 	int j_list_bitmap_index;	/* number of next list bitmap to use */
-	int j_must_wait;	/* no more journal begins allowed. MUST sleep on j_join_wait */
-	int j_next_full_flush;	/* next journal_end will flush all journal list */
-	int j_next_async_flush;	/* next journal_end will flush all async commits */
+
+	/* no more journal begins allowed. MUST sleep on j_join_wait */
+	int j_must_wait;
+
+	/* next journal_end will flush all journal list */
+	int j_next_full_flush;
+
+	/* next journal_end will flush all async commits */
+	int j_next_async_flush;
 
 	int j_cnode_used;	/* number of cnodes on the used list */
 	int j_cnode_free;	/* number of cnodes on the free list */
 
-	unsigned int j_trans_max;	/* max number of blocks in a transaction.  */
-	unsigned int j_max_batch;	/* max number of blocks to batch into a trans */
-	unsigned int j_max_commit_age;	/* in seconds, how old can an async commit be */
-	unsigned int j_max_trans_age;	/* in seconds, how old can a transaction be */
-	unsigned int j_default_max_commit_age;	/* the default for the max commit age */
+	/* max number of blocks in a transaction.  */
+	unsigned int j_trans_max;
+
+	/* max number of blocks to batch into a trans */
+	unsigned int j_max_batch;
+
+	/* in seconds, how old can an async commit be */
+	unsigned int j_max_commit_age;
+
+	/* in seconds, how old can a transaction be */
+	unsigned int j_max_trans_age;
+
+	/* the default for the max commit age */
+	unsigned int j_default_max_commit_age;
 
 	struct reiserfs_journal_cnode *j_cnode_free_list;
-	struct reiserfs_journal_cnode *j_cnode_free_orig;	/* orig pointer returned from vmalloc */
+
+	/* orig pointer returned from vmalloc */
+	struct reiserfs_journal_cnode *j_cnode_free_orig;
 
 	struct reiserfs_journal_list *j_current_jl;
 	int j_free_bitmap_nodes;
@@ -306,14 +387,21 @@
 
 	/* list of all active transactions */
 	struct list_head j_journal_list;
+
 	/* lists that haven't been touched by writeback attempts */
 	struct list_head j_working_list;
 
-	struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS];	/* array of bitmaps to record the deleted blocks */
-	struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE];	/* hash table for real buffer heads in current trans */
-	struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE];	/* hash table for all the real buffer heads in all
-										   the transactions */
-	struct list_head j_prealloc_list;	/* list of inodes which have preallocated blocks */
+	/* hash table for real buffer heads in current trans */
+	struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE];
+
+	/* hash table for all the real buffer heads in all the transactions */
+	struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE];
+
+	/* array of bitmaps to record the deleted blocks */
+	struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS];
+
+	/* list of inodes which have preallocated blocks */
+	struct list_head j_prealloc_list;
 	int j_persistent_trans;
 	unsigned long j_max_trans_size;
 	unsigned long j_max_batch_size;
@@ -328,11 +416,12 @@
 
 enum journal_state_bits {
 	J_WRITERS_BLOCKED = 1,	/* set when new writers not allowed */
-	J_WRITERS_QUEUED,	/* set when log is full due to too many writers */
-	J_ABORTED,		/* set when log is aborted */
+	J_WRITERS_QUEUED,    /* set when log is full due to too many writers */
+	J_ABORTED,           /* set when log is aborted */
 };
 
-#define JOURNAL_DESC_MAGIC "ReIsErLB"	/* ick.  magic string to find desc blocks in the journal */
+/* ick.  magic string to find desc blocks in the journal */
+#define JOURNAL_DESC_MAGIC "ReIsErLB"
 
 typedef __u32(*hashf_t) (const signed char *, int);
 
@@ -364,7 +453,10 @@
 	stat_cnt_t leaked_oid;
 	stat_cnt_t leaves_removable;
 
-	/* balances per level. Use explicit 5 as MAX_HEIGHT is not visible yet. */
+	/*
+	 * balances per level.
+	 * Use explicit 5 as MAX_HEIGHT is not visible yet.
+	 */
 	stat_cnt_t balance_at[5];	/* XXX */
 	/* sbk == search_by_key */
 	stat_cnt_t sbk_read_at[5];	/* XXX */
@@ -416,18 +508,24 @@
 
 /* reiserfs union of in-core super block data */
 struct reiserfs_sb_info {
-	struct buffer_head *s_sbh;	/* Buffer containing the super block */
-	/* both the comment and the choice of
-	   name are unclear for s_rs -Hans */
-	struct reiserfs_super_block *s_rs;	/* Pointer to the super block in the buffer */
+	/* Buffer containing the super block */
+	struct buffer_head *s_sbh;
+
+	/* Pointer to the on-disk super block in the buffer */
+	struct reiserfs_super_block *s_rs;
 	struct reiserfs_bitmap_info *s_ap_bitmap;
-	struct reiserfs_journal *s_journal;	/* pointer to journal information */
+
+	/* pointer to journal information */
+	struct reiserfs_journal *s_journal;
+
 	unsigned short s_mount_state;	/* reiserfs state (valid, invalid) */
 
 	/* Serialize writers access, replace the old bkl */
 	struct mutex lock;
+
 	/* Owner of the lock (can be recursive) */
 	struct task_struct *lock_owner;
+
 	/* Depth of the lock, start from -1 like the bkl */
 	int lock_depth;
 
@@ -435,30 +533,50 @@
 
 	/* Comment? -Hans */
 	void (*end_io_handler) (struct buffer_head *, int);
-	hashf_t s_hash_function;	/* pointer to function which is used
-					   to sort names in directory. Set on
-					   mount */
-	unsigned long s_mount_opt;	/* reiserfs's mount options are set
-					   here (currently - NOTAIL, NOLOG,
-					   REPLAYONLY) */
 
-	struct {		/* This is a structure that describes block allocator options */
-		unsigned long bits;	/* Bitfield for enable/disable kind of options */
-		unsigned long large_file_size;	/* size started from which we consider file to be a large one(in blocks) */
+	/*
+	 * pointer to function which is used to sort names in directory.
+	 * Set on mount
+	 */
+	hashf_t s_hash_function;
+
+	/* reiserfs's mount options are set here */
+	unsigned long s_mount_opt;
+
+	/* This is a structure that describes block allocator options */
+	struct {
+		/* Bitfield for enable/disable kind of options */
+		unsigned long bits;
+
+		/*
+		 * size started from which we consider file
+		 * to be a large one (in blocks)
+		 */
+		unsigned long large_file_size;
+
 		int border;	/* percentage of disk, border takes */
-		int preallocmin;	/* Minimal file size (in blocks) starting from which we do preallocations */
-		int preallocsize;	/* Number of blocks we try to prealloc when file
-					   reaches preallocmin size (in blocks) or
-					   prealloc_list is empty. */
+
+		/*
+		 * Minimal file size (in blocks) starting
+		 * from which we do preallocations
+		 */
+		int preallocmin;
+
+		/*
+		 * Number of blocks we try to prealloc when file
+		 * reaches preallocmin size (in blocks) or prealloc_list
+		 is empty.
+		 */
+		int preallocsize;
 	} s_alloc_options;
 
 	/* Comment? -Hans */
 	wait_queue_head_t s_wait;
-	/* To be obsoleted soon by per buffer seals.. -Hans */
-	atomic_t s_generation_counter;	// increased by one every time the
-	// tree gets re-balanced
-	unsigned long s_properties;	/* File system properties. Currently holds
-					   on-disk FS format */
+	/* increased by one every time the  tree gets re-balanced */
+	atomic_t s_generation_counter;
+
+	/* File system properties. Currently holds on-disk FS format */
+	unsigned long s_properties;
 
 	/* session statistics */
 	int s_disk_reads;
@@ -471,14 +589,23 @@
 	int s_bmaps_without_search;
 	int s_direct2indirect;
 	int s_indirect2direct;
-	/* set up when it's ok for reiserfs_read_inode2() to read from
-	   disk inode with nlink==0. Currently this is only used during
-	   finish_unfinished() processing at mount time */
+
+	/*
+	 * set up when it's ok for reiserfs_read_inode2() to read from
+	 * disk inode with nlink==0. Currently this is only used during
+	 * finish_unfinished() processing at mount time
+	 */
 	int s_is_unlinked_ok;
+
 	reiserfs_proc_info_data_t s_proc_info_data;
 	struct proc_dir_entry *procdir;
-	int reserved_blocks;	/* amount of blocks reserved for further allocations */
-	spinlock_t bitmap_lock;	/* this lock on now only used to protect reserved_blocks variable */
+
+	/* amount of blocks reserved for further allocations */
+	int reserved_blocks;
+
+
+	/* this lock on now only used to protect reserved_blocks variable */
+	spinlock_t bitmap_lock;
 	struct dentry *priv_root;	/* root of /.reiserfs_priv */
 	struct dentry *xattr_root;	/* root of /.reiserfs_priv/xattrs */
 	int j_errno;
@@ -494,14 +621,13 @@
 	char *s_jdev;		/* Stored jdev for mount option showing */
 #ifdef CONFIG_REISERFS_CHECK
 
-	struct tree_balance *cur_tb;	/*
-					 * Detects whether more than one
-					 * copy of tb exists per superblock
-					 * as a means of checking whether
-					 * do_balance is executing concurrently
-					 * against another tree reader/writer
-					 * on a same mount point.
-					 */
+	/*
+	 * Detects whether more than one copy of tb exists per superblock
+	 * as a means of checking whether do_balance is executing
+	 * concurrently against another tree reader/writer on a same
+	 * mount point.
+	 */
+	struct tree_balance *cur_tb;
 #endif
 };
 
@@ -510,25 +636,36 @@
 #define REISERFS_3_6 1
 #define REISERFS_OLD_FORMAT 2
 
-enum reiserfs_mount_options {
 /* Mount options */
-	REISERFS_LARGETAIL,	/* large tails will be created in a session */
-	REISERFS_SMALLTAIL,	/* small (for files less than block size) tails will be created in a session */
-	REPLAYONLY,		/* replay journal and return 0. Use by fsck */
-	REISERFS_CONVERT,	/* -o conv: causes conversion of old
-				   format super block to the new
-				   format. If not specified - old
-				   partition will be dealt with in a
-				   manner of 3.5.x */
+enum reiserfs_mount_options {
+	/* large tails will be created in a session */
+	REISERFS_LARGETAIL,
+	/*
+	 * small (for files less than block size) tails will
+	 * be created in a session
+	 */
+	REISERFS_SMALLTAIL,
 
-/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
-** reiserfs disks from 3.5.19 or earlier.  99% of the time, this option
-** is not required.  If the normal autodection code can't determine which
-** hash to use (because both hashes had the same value for a file)
-** use this option to force a specific hash.  It won't allow you to override
-** the existing hash on the FS, so if you have a tea hash disk, and mount
-** with -o hash=rupasov, the mount will fail.
-*/
+	/* replay journal and return 0. Use by fsck */
+	REPLAYONLY,
+
+	/*
+	 * -o conv: causes conversion of old format super block to the
+	 * new format. If not specified - old partition will be dealt
+	 * with in a manner of 3.5.x
+	 */
+	REISERFS_CONVERT,
+
+	/*
+	 * -o hash={tea, rupasov, r5, detect} is meant for properly mounting
+	 * reiserfs disks from 3.5.19 or earlier.  99% of the time, this
+	 * option is not required.  If the normal autodection code can't
+	 * determine which hash to use (because both hashes had the same
+	 * value for a file) use this option to force a specific hash.
+	 * It won't allow you to override the existing hash on the FS, so
+	 * if you have a tea hash disk, and mount with -o hash=rupasov,
+	 * the mount will fail.
+	 */
 	FORCE_TEA_HASH,		/* try to force tea hash on mount */
 	FORCE_RUPASOV_HASH,	/* try to force rupasov hash on mount */
 	FORCE_R5_HASH,		/* try to force rupasov hash on mount */
@@ -538,9 +675,11 @@
 	REISERFS_DATA_ORDERED,
 	REISERFS_DATA_WRITEBACK,
 
-/* used for testing experimental features, makes benchmarking new
-   features with and without more convenient, should never be used by
-   users in any code shipped to users (ideally) */
+	/*
+	 * used for testing experimental features, makes benchmarking new
+	 * features with and without more convenient, should never be used by
+	 * users in any code shipped to users (ideally)
+	 */
 
 	REISERFS_NO_BORDER,
 	REISERFS_NO_UNHASHED_RELOCATION,
@@ -707,28 +846,28 @@
 
 struct fid;
 
-/* in reading the #defines, it may help to understand that they employ
-   the following abbreviations:
-
-   B = Buffer
-   I = Item header
-   H = Height within the tree (should be changed to LEV)
-   N = Number of the item in the node
-   STAT = stat data
-   DEH = Directory Entry Header
-   EC = Entry Count
-   E = Entry number
-   UL = Unsigned Long
-   BLKH = BLocK Header
-   UNFM = UNForMatted node
-   DC = Disk Child
-   P = Path
-
-   These #defines are named by concatenating these abbreviations,
-   where first comes the arguments, and last comes the return value,
-   of the macro.
-
-*/
+/*
+ * in reading the #defines, it may help to understand that they employ
+ *  the following abbreviations:
+ *
+ *  B = Buffer
+ *  I = Item header
+ *  H = Height within the tree (should be changed to LEV)
+ *  N = Number of the item in the node
+ *  STAT = stat data
+ *  DEH = Directory Entry Header
+ *  EC = Entry Count
+ *  E = Entry number
+ *  UL = Unsigned Long
+ *  BLKH = BLocK Header
+ *  UNFM = UNForMatted node
+ *  DC = Disk Child
+ *  P = Path
+ *
+ *  These #defines are named by concatenating these abbreviations,
+ *  where first comes the arguments, and last comes the return value,
+ *  of the macro.
+ */
 
 #define USE_INODE_GENERATION_COUNTER
 
@@ -739,14 +878,17 @@
 /* n must be power of 2 */
 #define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
 
-// to be ok for alpha and others we have to align structures to 8 byte
-// boundary.
-// FIXME: do not change 4 by anything else: there is code which relies on that
+/*
+ * to be ok for alpha and others we have to align structures to 8 byte
+ * boundary.
+ * FIXME: do not change 4 by anything else: there is code which relies on that
+ */
 #define ROUND_UP(x) _ROUND_UP(x,8LL)
 
-/* debug levels.  Right now, CONFIG_REISERFS_CHECK means print all debug
-** messages.
-*/
+/*
+ * debug levels.  Right now, CONFIG_REISERFS_CHECK means print all debug
+ * messages.
+ */
 #define REISERFS_DEBUG_CODE 5	/* extra messages to help find/debug errors */
 
 void __reiserfs_warning(struct super_block *s, const char *id,
@@ -755,7 +897,7 @@
 	 __reiserfs_warning(s, id, __func__, fmt, ##args)
 /* assertions handling */
 
-/** always check a condition and panic if it's false. */
+/* always check a condition and panic if it's false. */
 #define __RASSERT(cond, scond, format, args...)			\
 do {									\
 	if (!(cond))							\
@@ -778,35 +920,48 @@
  * Disk Data Structures
  */
 
-/***************************************************************************/
-/*                             SUPER BLOCK                                 */
-/***************************************************************************/
+/***************************************************************************
+ *                             SUPER BLOCK                                 *
+ ***************************************************************************/
 
 /*
- * Structure of super block on disk, a version of which in RAM is often accessed as REISERFS_SB(s)->s_rs
- * the version in RAM is part of a larger structure containing fields never written to disk.
+ * Structure of super block on disk, a version of which in RAM is often
+ * accessed as REISERFS_SB(s)->s_rs. The version in RAM is part of a larger
+ * structure containing fields never written to disk.
  */
-#define UNSET_HASH 0		// read_super will guess about, what hash names
-		     // in directories were sorted with
+#define UNSET_HASH 0	/* Detect hash on disk */
 #define TEA_HASH  1
 #define YURA_HASH 2
 #define R5_HASH   3
 #define DEFAULT_HASH R5_HASH
 
 struct journal_params {
-	__le32 jp_journal_1st_block;	/* where does journal start from on its
-					 * device */
-	__le32 jp_journal_dev;	/* journal device st_rdev */
-	__le32 jp_journal_size;	/* size of the journal */
-	__le32 jp_journal_trans_max;	/* max number of blocks in a transaction. */
-	__le32 jp_journal_magic;	/* random value made on fs creation (this
-					 * was sb_journal_block_count) */
-	__le32 jp_journal_max_batch;	/* max number of blocks to batch into a
-					 * trans */
-	__le32 jp_journal_max_commit_age;	/* in seconds, how old can an async
-						 * commit be */
-	__le32 jp_journal_max_trans_age;	/* in seconds, how old can a transaction
-						 * be */
+	/* where does journal start from on its * device */
+	__le32 jp_journal_1st_block;
+
+	/* journal device st_rdev */
+	__le32 jp_journal_dev;
+
+	/* size of the journal */
+	__le32 jp_journal_size;
+
+	/* max number of blocks in a transaction. */
+	__le32 jp_journal_trans_max;
+
+	/*
+	 * random value made on fs creation
+	 * (this was sb_journal_block_count)
+	 */
+	__le32 jp_journal_magic;
+
+	/* max number of blocks to batch into a trans */
+	__le32 jp_journal_max_batch;
+
+	/* in seconds, how old can an async  commit be */
+	__le32 jp_journal_max_commit_age;
+
+	/* in seconds, how old can a transaction be */
+	__le32 jp_journal_max_trans_age;
 };
 
 /* this is the super from 3.5.X, where X >= 10 */
@@ -816,26 +971,48 @@
 	__le32 s_root_block;	/* root block number    */
 	struct journal_params s_journal;
 	__le16 s_blocksize;	/* block size */
-	__le16 s_oid_maxsize;	/* max size of object id array, see
-				 * get_objectid() commentary  */
+
+	/* max size of object id array, see get_objectid() commentary  */
+	__le16 s_oid_maxsize;
 	__le16 s_oid_cursize;	/* current size of object id array */
-	__le16 s_umount_state;	/* this is set to 1 when filesystem was
-				 * umounted, to 2 - when not */
-	char s_magic[10];	/* reiserfs magic string indicates that
-				 * file system is reiserfs:
-				 * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
-	__le16 s_fs_state;	/* it is set to used by fsck to mark which
-				 * phase of rebuilding is done */
-	__le32 s_hash_function_code;	/* indicate, what hash function is being use
-					 * to sort names in a directory*/
+
+	/* this is set to 1 when filesystem was umounted, to 2 - when not */
+	__le16 s_umount_state;
+
+	/*
+	 * reiserfs magic string indicates that file system is reiserfs:
+	 * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs"
+	 */
+	char s_magic[10];
+
+	/*
+	 * it is set to used by fsck to mark which
+	 * phase of rebuilding is done
+	 */
+	__le16 s_fs_state;
+	/*
+	 * indicate, what hash function is being use
+	 * to sort names in a directory
+	 */
+	__le32 s_hash_function_code;
 	__le16 s_tree_height;	/* height of disk tree */
-	__le16 s_bmap_nr;	/* amount of bitmap blocks needed to address
-				 * each block of file system */
-	__le16 s_version;	/* this field is only reliable on filesystem
-				 * with non-standard journal */
-	__le16 s_reserved_for_journal;	/* size in blocks of journal area on main
-					 * device, we need to keep after
-					 * making fs with non-standard journal */
+
+	/*
+	 * amount of bitmap blocks needed to address
+	 * each block of file system
+	 */
+	__le16 s_bmap_nr;
+
+	/*
+	 * this field is only reliable on filesystem with non-standard journal
+	 */
+	__le16 s_version;
+
+	/*
+	 * size in blocks of journal area on main device, we need to
+	 * keep after making fs with non-standard journal
+	 */
+	__le16 s_reserved_for_journal;
 } __attribute__ ((__packed__));
 
 #define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
@@ -844,17 +1021,21 @@
 struct reiserfs_super_block {
 	struct reiserfs_super_block_v1 s_v1;
 	__le32 s_inode_generation;
-	__le32 s_flags;		/* Right now used only by inode-attributes, if enabled */
+
+	/* Right now used only by inode-attributes, if enabled */
+	__le32 s_flags;
+
 	unsigned char s_uuid[16];	/* filesystem unique identifier */
 	unsigned char s_label[16];	/* filesystem volume label */
 	__le16 s_mnt_count;		/* Count of mounts since last fsck */
 	__le16 s_max_mnt_count;		/* Maximum mounts before check */
 	__le32 s_lastcheck;		/* Timestamp of last fsck */
 	__le32 s_check_interval;	/* Interval between checks */
-	char s_unused[76];	/* zero filled by mkreiserfs and
-				 * reiserfs_convert_objectid_map_v1()
-				 * so any additions must be updated
-				 * there as well. */
+
+	/*
+	 * zero filled by mkreiserfs and reiserfs_convert_objectid_map_v1()
+	 * so any additions must be updated there as well. */
+	char s_unused[76];
 } __attribute__ ((__packed__));
 
 #define SB_SIZE (sizeof(struct reiserfs_super_block))
@@ -862,7 +1043,7 @@
 #define REISERFS_VERSION_1 0
 #define REISERFS_VERSION_2 2
 
-// on-disk super block fields converted to cpu form
+/* on-disk super block fields converted to cpu form */
 #define SB_DISK_SUPER_BLOCK(s) (REISERFS_SB(s)->s_rs)
 #define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
 #define SB_BLOCKSIZE(s) \
@@ -917,11 +1098,13 @@
 int is_reiserfs_3_6(struct reiserfs_super_block *rs);
 int is_reiserfs_jr(struct reiserfs_super_block *rs);
 
-/* ReiserFS leaves the first 64k unused, so that partition labels have
-   enough space.  If someone wants to write a fancy bootloader that
-   needs more than 64k, let us know, and this will be increased in size.
-   This number must be larger than than the largest block size on any
-   platform, or code will break.  -Hans */
+/*
+ * ReiserFS leaves the first 64k unused, so that partition labels have
+ * enough space.  If someone wants to write a fancy bootloader that
+ * needs more than 64k, let us know, and this will be increased in size.
+ * This number must be larger than than the largest block size on any
+ * platform, or code will break.  -Hans
+ */
 #define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
 #define REISERFS_FIRST_BLOCK unused_define
 #define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
@@ -946,8 +1129,7 @@
 	unsigned short unfm_freespace;
 };
 
-/* there are two formats of keys: 3.5 and 3.6
- */
+/* there are two formats of keys: 3.5 and 3.6 */
 #define KEY_FORMAT_3_5 0
 #define KEY_FORMAT_3_6 1
 
@@ -965,8 +1147,10 @@
 	return sb->s_fs_info;
 }
 
-/* Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
- * which overflows on large file systems. */
+/*
+ * Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
+ * which overflows on large file systems.
+ */
 static inline __u32 reiserfs_bmap_count(struct super_block *sb)
 {
 	return (SB_BLOCK_COUNT(sb) - 1) / (sb->s_blocksize * 8) + 1;
@@ -977,8 +1161,10 @@
 	return bmap_nr > ((1LL << 16) - 1);
 }
 
-/** this says about version of key of all items (but stat data) the
-    object consists of */
+/*
+ * this says about version of key of all items (but stat data) the
+ * object consists of
+ */
 #define get_inode_item_key_version( inode )                                    \
     ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
 
@@ -997,16 +1183,18 @@
             else                                                               \
                 REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
 
-/* This is an aggressive tail suppression policy, I am hoping it
-   improves our benchmarks. The principle behind it is that percentage
-   space saving is what matters, not absolute space saving.  This is
-   non-intuitive, but it helps to understand it if you consider that the
-   cost to access 4 blocks is not much more than the cost to access 1
-   block, if you have to do a seek and rotate.  A tail risks a
-   non-linear disk access that is significant as a percentage of total
-   time cost for a 4 block file and saves an amount of space that is
-   less significant as a percentage of space, or so goes the hypothesis.
-   -Hans */
+/*
+ * This is an aggressive tail suppression policy, I am hoping it
+ * improves our benchmarks. The principle behind it is that percentage
+ * space saving is what matters, not absolute space saving.  This is
+ * non-intuitive, but it helps to understand it if you consider that the
+ * cost to access 4 blocks is not much more than the cost to access 1
+ * block, if you have to do a seek and rotate.  A tail risks a
+ * non-linear disk access that is significant as a percentage of total
+ * time cost for a 4 block file and saves an amount of space that is
+ * less significant as a percentage of space, or so goes the hypothesis.
+ * -Hans
+ */
 #define STORE_TAIL_IN_UNFM_S1(n_file_size,n_tail_size,n_block_size) \
 (\
   (!(n_tail_size)) || \
@@ -1020,10 +1208,11 @@
      ( (n_tail_size) >=   (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
 )
 
-/* Another strategy for tails, this one means only create a tail if all the
-   file would fit into one DIRECT item.
-   Primary intention for this one is to increase performance by decreasing
-   seeking.
+/*
+ * Another strategy for tails, this one means only create a tail if all the
+ * file would fit into one DIRECT item.
+ * Primary intention for this one is to increase performance by decreasing
+ * seeking.
 */
 #define STORE_TAIL_IN_UNFM_S2(n_file_size,n_tail_size,n_block_size) \
 (\
@@ -1037,23 +1226,21 @@
 #define REISERFS_VALID_FS    1
 #define REISERFS_ERROR_FS    2
 
-//
-// there are 5 item types currently
-//
+/*
+ * there are 5 item types currently
+ */
 #define TYPE_STAT_DATA 0
 #define TYPE_INDIRECT 1
 #define TYPE_DIRECT 2
 #define TYPE_DIRENTRY 3
 #define TYPE_MAXTYPE 3
-#define TYPE_ANY 15		// FIXME: comment is required
+#define TYPE_ANY 15		/* FIXME: comment is required */
 
-/***************************************************************************/
-/*                       KEY & ITEM HEAD                                   */
-/***************************************************************************/
+/***************************************************************************
+ *                       KEY & ITEM HEAD                                   *
+ ***************************************************************************/
 
-//
-// directories use this key as well as old files
-//
+/* * directories use this key as well as old files */
 struct offset_v1 {
 	__le32 k_offset;
 	__le32 k_uniqueness;
@@ -1086,11 +1273,14 @@
 	v2->v = (v2->v & cpu_to_le64(15ULL << 60)) | cpu_to_le64(offset);
 }
 
-/* Key of an item determines its location in the S+tree, and
-   is composed of 4 components */
+/*
+ * Key of an item determines its location in the S+tree, and
+ * is composed of 4 components
+ */
 struct reiserfs_key {
-	__le32 k_dir_id;	/* packing locality: by default parent
-				   directory object id */
+	/* packing locality: by default parent directory object id */
+	__le32 k_dir_id;
+
 	__le32 k_objectid;	/* object identifier */
 	union {
 		struct offset_v1 k_offset_v1;
@@ -1099,8 +1289,8 @@
 } __attribute__ ((__packed__));
 
 struct in_core_key {
-	__u32 k_dir_id;		/* packing locality: by default parent
-				   directory object id */
+	/* packing locality: by default parent directory object id */
+	__u32 k_dir_id;
 	__u32 k_objectid;	/* object identifier */
 	__u64 k_offset;
 	__u8 k_type;
@@ -1109,14 +1299,16 @@
 struct cpu_key {
 	struct in_core_key on_disk_key;
 	int version;
-	int key_length;		/* 3 in all cases but direct2indirect and
-				   indirect2direct conversion */
+	/* 3 in all cases but direct2indirect and indirect2direct conversion */
+	int key_length;
 };
 
-/* Our function for comparing keys can compare keys of different
-   lengths.  It takes as a parameter the length of the keys it is to
-   compare.  These defines are used in determining what is to be passed
-   to it as that parameter. */
+/*
+ * Our function for comparing keys can compare keys of different
+ * lengths.  It takes as a parameter the length of the keys it is to
+ * compare.  These defines are used in determining what is to be passed
+ * to it as that parameter.
+ */
 #define REISERFS_FULL_KEY_LEN     4
 #define REISERFS_SHORT_KEY_LEN    2
 
@@ -1145,40 +1337,52 @@
 #define POSITION_FOUND 1
 #define POSITION_NOT_FOUND 0
 
-// return values for reiserfs_find_entry and search_by_entry_key
+/* return values for reiserfs_find_entry and search_by_entry_key */
 #define NAME_FOUND 1
 #define NAME_NOT_FOUND 0
 #define GOTO_PREVIOUS_ITEM 2
 #define NAME_FOUND_INVISIBLE 3
 
-/*  Everything in the filesystem is stored as a set of items.  The
-    item head contains the key of the item, its free space (for
-    indirect items) and specifies the location of the item itself
-    within the block.  */
+/*
+ * Everything in the filesystem is stored as a set of items.  The
+ * item head contains the key of the item, its free space (for
+ * indirect items) and specifies the location of the item itself
+ * within the block.
+ */
 
 struct item_head {
-	/* Everything in the tree is found by searching for it based on
-	 * its key.*/
+	/*
+	 * Everything in the tree is found by searching for it based on
+	 * its key.
+	 */
 	struct reiserfs_key ih_key;
 	union {
-		/* The free space in the last unformatted node of an
-		   indirect item if this is an indirect item.  This
-		   equals 0xFFFF iff this is a direct item or stat data
-		   item. Note that the key, not this field, is used to
-		   determine the item type, and thus which field this
-		   union contains. */
+		/*
+		 * The free space in the last unformatted node of an
+		 * indirect item if this is an indirect item.  This
+		 * equals 0xFFFF iff this is a direct item or stat data
+		 * item. Note that the key, not this field, is used to
+		 * determine the item type, and thus which field this
+		 * union contains.
+		 */
 		__le16 ih_free_space_reserved;
-		/* Iff this is a directory item, this field equals the
-		   number of directory entries in the directory item. */
+
+		/*
+		 * Iff this is a directory item, this field equals the
+		 * number of directory entries in the directory item.
+		 */
 		__le16 ih_entry_count;
 	} __attribute__ ((__packed__)) u;
 	__le16 ih_item_len;	/* total size of the item body */
-	__le16 ih_item_location;	/* an offset to the item body
-					 * within the block */
-	__le16 ih_version;	/* 0 for all old items, 2 for new
-				   ones. Highest bit is set by fsck
-				   temporary, cleaned after all
-				   done */
+
+	/* an offset to the item body within the block */
+	__le16 ih_item_location;
+
+	/*
+	 * 0 for all old items, 2 for new ones. Highest bit is set by fsck
+	 * temporary, cleaned after all done
+	 */
+	__le16 ih_version;
 } __attribute__ ((__packed__));
 /* size of item header     */
 #define IH_SIZE (sizeof(struct item_head))
@@ -1200,27 +1404,24 @@
 #define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
 #define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
 
-/* these operate on indirect items, where you've got an array of ints
-** at a possibly unaligned location.  These are a noop on ia32
-** 
-** p is the array of __u32, i is the index into the array, v is the value
-** to store there.
-*/
+/*
+ * these operate on indirect items, where you've got an array of ints
+ * at a possibly unaligned location.  These are a noop on ia32
+ *
+ * p is the array of __u32, i is the index into the array, v is the value
+ * to store there.
+ */
 #define get_block_num(p, i) get_unaligned_le32((p) + (i))
 #define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
 
-//
-// in old version uniqueness field shows key type
-//
+/* * in old version uniqueness field shows key type */
 #define V1_SD_UNIQUENESS 0
 #define V1_INDIRECT_UNIQUENESS 0xfffffffe
 #define V1_DIRECT_UNIQUENESS 0xffffffff
 #define V1_DIRENTRY_UNIQUENESS 500
-#define V1_ANY_UNIQUENESS 555	// FIXME: comment is required
+#define V1_ANY_UNIQUENESS 555	/* FIXME: comment is required */
 
-//
-// here are conversion routines
-//
+/* here are conversion routines */
 static inline int uniqueness2type(__u32 uniqueness) CONSTF;
 static inline int uniqueness2type(__u32 uniqueness)
 {
@@ -1257,11 +1458,11 @@
 	}
 }
 
-//
-// key is pointer to on disk key which is stored in le, result is cpu,
-// there is no way to get version of object from key, so, provide
-// version to these defines
-//
+/*
+ * key is pointer to on disk key which is stored in le, result is cpu,
+ * there is no way to get version of object from key, so, provide
+ * version to these defines
+ */
 static inline loff_t le_key_k_offset(int version,
 				     const struct reiserfs_key *key)
 {
@@ -1350,9 +1551,7 @@
 	return le_key_k_type(version, key) == TYPE_STAT_DATA;
 }
 
-//
-// item header has version.
-//
+/* item header has version.  */
 static inline int is_direntry_le_ih(struct item_head *ih)
 {
 	return is_direntry_le_key(ih_version(ih), &ih->ih_key);
@@ -1373,9 +1572,7 @@
 	return is_statdata_le_key(ih_version(ih), &ih->ih_key);
 }
 
-//
-// key is pointer to cpu key, result is cpu
-//
+/* key is pointer to cpu key, result is cpu */
 static inline loff_t cpu_key_k_offset(const struct cpu_key *key)
 {
 	return key->on_disk_key.k_offset;
@@ -1426,7 +1623,7 @@
 
 extern struct reiserfs_key root_key;
 
-/* 
+/*
  * Picture represents a leaf of the S+tree
  *  ______________________________________________________
  * |      |  Array of     |                   |           |
@@ -1435,15 +1632,19 @@
  * |______|_______________|___________________|___________|
  */
 
-/* Header of a disk block.  More precisely, header of a formatted leaf
-   or internal node, and not the header of an unformatted node. */
+/*
+ * Header of a disk block.  More precisely, header of a formatted leaf
+ * or internal node, and not the header of an unformatted node.
+ */
 struct block_head {
 	__le16 blk_level;	/* Level of a block in the tree. */
 	__le16 blk_nr_item;	/* Number of keys/items in a block. */
 	__le16 blk_free_space;	/* Block free space in bytes. */
 	__le16 blk_reserved;
 	/* dump this in v4/planA */
-	struct reiserfs_key blk_right_delim_key;	/* kept only for compatibility */
+
+	/* kept only for compatibility */
+	struct reiserfs_key blk_right_delim_key;
 };
 
 #define BLKH_SIZE                     (sizeof(struct block_head))
@@ -1458,18 +1659,20 @@
 #define blkh_right_delim_key(p_blkh)  ((p_blkh)->blk_right_delim_key)
 #define set_blkh_right_delim_key(p_blkh,val)  ((p_blkh)->blk_right_delim_key = val)
 
-/*
- * values for blk_level field of the struct block_head
- */
+/* values for blk_level field of the struct block_head */
 
-#define FREE_LEVEL 0		/* when node gets removed from the tree its
-				   blk_level is set to FREE_LEVEL. It is then
-				   used to see whether the node is still in the
-				   tree */
+/*
+ * When node gets removed from the tree its blk_level is set to FREE_LEVEL.
+ * It is then  used to see whether the node is still in the tree
+ */
+#define FREE_LEVEL 0
 
 #define DISK_LEAF_NODE_LEVEL  1	/* Leaf node level. */
 
-/* Given the buffer head of a formatted node, resolve to the block head of that node. */
+/*
+ * Given the buffer head of a formatted node, resolve to the
+ * block head of that node.
+ */
 #define B_BLK_HEAD(bh)			((struct block_head *)((bh)->b_data))
 /* Number of items that are in buffer. */
 #define B_NR_ITEMS(bh)			(blkh_nr_item(B_BLK_HEAD(bh)))
@@ -1490,14 +1693,14 @@
 #define B_IS_KEYS_LEVEL(bh)      (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
 					    && B_LEVEL(bh) <= MAX_HEIGHT)
 
-/***************************************************************************/
-/*                             STAT DATA                                   */
-/***************************************************************************/
+/***************************************************************************
+ *                             STAT DATA                                   *
+ ***************************************************************************/
 
-//
-// old stat data is 32 bytes long. We are going to distinguish new one by
-// different size
-//
+/*
+ * old stat data is 32 bytes long. We are going to distinguish new one by
+ * different size
+*/
 struct stat_data_v1 {
 	__le16 sd_mode;		/* file type, permissions */
 	__le16 sd_nlink;	/* number of hard links */
@@ -1506,20 +1709,25 @@
 	__le32 sd_size;		/* file size */
 	__le32 sd_atime;	/* time of last access */
 	__le32 sd_mtime;	/* time file was last modified  */
-	__le32 sd_ctime;	/* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+
+	/*
+	 * time inode (stat data) was last changed
+	 * (except changes to sd_atime and sd_mtime)
+	 */
+	__le32 sd_ctime;
 	union {
 		__le32 sd_rdev;
 		__le32 sd_blocks;	/* number of blocks file uses */
 	} __attribute__ ((__packed__)) u;
-	__le32 sd_first_direct_byte;	/* first byte of file which is stored
-					   in a direct item: except that if it
-					   equals 1 it is a symlink and if it
-					   equals ~(__u32)0 there is no
-					   direct item.  The existence of this
-					   field really grates on me. Let's
-					   replace it with a macro based on
-					   sd_size and our tail suppression
-					   policy.  Someday.  -Hans */
+
+	/*
+	 * first byte of file which is stored in a direct item: except that if
+	 * it equals 1 it is a symlink and if it equals ~(__u32)0 there is no
+	 * direct item.  The existence of this field really grates on me.
+	 * Let's replace it with a macro based on sd_size and our tail
+	 * suppression policy.  Someday.  -Hans
+	 */
+	__le32 sd_first_direct_byte;
 } __attribute__ ((__packed__));
 
 #define SD_V1_SIZE              (sizeof(struct stat_data_v1))
@@ -1551,8 +1759,10 @@
 
 /* inode flags stored in sd_attrs (nee sd_reserved) */
 
-/* we want common flags to have the same values as in ext2,
-   so chattr(1) will work without problems */
+/*
+ * we want common flags to have the same values as in ext2,
+ * so chattr(1) will work without problems
+ */
 #define REISERFS_IMMUTABLE_FL FS_IMMUTABLE_FL
 #define REISERFS_APPEND_FL    FS_APPEND_FL
 #define REISERFS_SYNC_FL      FS_SYNC_FL
@@ -1572,8 +1782,10 @@
 				REISERFS_COMPR_FL |	\
 				REISERFS_NOTAIL_FL )
 
-/* Stat Data on disk (reiserfs version of UFS disk inode minus the
-   address blocks) */
+/*
+ * Stat Data on disk (reiserfs version of UFS disk inode minus the
+ * address blocks)
+ */
 struct stat_data {
 	__le16 sd_mode;		/* file type, permissions */
 	__le16 sd_attrs;	/* persistent inode flags */
@@ -1583,25 +1795,20 @@
 	__le32 sd_gid;		/* group */
 	__le32 sd_atime;	/* time of last access */
 	__le32 sd_mtime;	/* time file was last modified  */
-	__le32 sd_ctime;	/* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+
+	/*
+	 * time inode (stat data) was last changed
+	 * (except changes to sd_atime and sd_mtime)
+	 */
+	__le32 sd_ctime;
 	__le32 sd_blocks;
 	union {
 		__le32 sd_rdev;
 		__le32 sd_generation;
-		//__le32 sd_first_direct_byte;
-		/* first byte of file which is stored in a
-		   direct item: except that if it equals 1
-		   it is a symlink and if it equals
-		   ~(__u32)0 there is no direct item.  The
-		   existence of this field really grates
-		   on me. Let's replace it with a macro
-		   based on sd_size and our tail
-		   suppression policy? */
 	} __attribute__ ((__packed__)) u;
 } __attribute__ ((__packed__));
-//
-// this is 44 bytes long
-//
+
+/* this is 44 bytes long */
 #define SD_SIZE (sizeof(struct stat_data))
 #define SD_V2_SIZE              SD_SIZE
 #define stat_data_v2(ih)        (ih_version (ih) == KEY_FORMAT_3_6)
@@ -1632,48 +1839,61 @@
 #define sd_v2_attrs(sdp)         (le16_to_cpu((sdp)->sd_attrs))
 #define set_sd_v2_attrs(sdp,v)   ((sdp)->sd_attrs = cpu_to_le16(v))
 
-/***************************************************************************/
-/*                      DIRECTORY STRUCTURE                                */
-/***************************************************************************/
-/* 
-   Picture represents the structure of directory items
-   ________________________________________________
-   |  Array of     |   |     |        |       |   |
-   | directory     |N-1| N-2 | ....   |   1st |0th|
-   | entry headers |   |     |        |       |   |
-   |_______________|___|_____|________|_______|___|
-                    <----   directory entries         ------>
-
- First directory item has k_offset component 1. We store "." and ".."
- in one item, always, we never split "." and ".." into differing
- items.  This makes, among other things, the code for removing
- directories simpler. */
+/***************************************************************************
+ *                      DIRECTORY STRUCTURE                                *
+ ***************************************************************************/
+/*
+ * Picture represents the structure of directory items
+ * ________________________________________________
+ * |  Array of     |   |     |        |       |   |
+ * | directory     |N-1| N-2 | ....   |   1st |0th|
+ * | entry headers |   |     |        |       |   |
+ * |_______________|___|_____|________|_______|___|
+ *                  <----   directory entries         ------>
+ *
+ * First directory item has k_offset component 1. We store "." and ".."
+ * in one item, always, we never split "." and ".." into differing
+ * items.  This makes, among other things, the code for removing
+ * directories simpler.
+ */
 #define SD_OFFSET  0
 #define SD_UNIQUENESS 0
 #define DOT_OFFSET 1
 #define DOT_DOT_OFFSET 2
 #define DIRENTRY_UNIQUENESS 500
 
-/* */
 #define FIRST_ITEM_OFFSET 1
 
 /*
-   Q: How to get key of object pointed to by entry from entry?  
+ * Q: How to get key of object pointed to by entry from entry?
+ *
+ * A: Each directory entry has its header. This header has deh_dir_id
+ *    and deh_objectid fields, those are key of object, entry points to
+ */
 
-   A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key
-      of object, entry points to */
-
-/* NOT IMPLEMENTED:   
-   Directory will someday contain stat data of object */
+/*
+ * NOT IMPLEMENTED:
+ * Directory will someday contain stat data of object
+ */
 
 struct reiserfs_de_head {
 	__le32 deh_offset;	/* third component of the directory entry key */
-	__le32 deh_dir_id;	/* objectid of the parent directory of the object, that is referenced
-				   by directory entry */
-	__le32 deh_objectid;	/* objectid of the object, that is referenced by directory entry */
+
+	/*
+	 * objectid of the parent directory of the object, that is referenced
+	 * by directory entry
+	 */
+	__le32 deh_dir_id;
+
+	/* objectid of the object, that is referenced by directory entry */
+	__le32 deh_objectid;
 	__le16 deh_location;	/* offset of name in the whole item */
-	__le16 deh_state;	/* whether 1) entry contains stat data (for future), and 2) whether
-				   entry is hidden (unlinked) */
+
+	/*
+	 * whether 1) entry contains stat data (for future), and
+	 * 2) whether entry is hidden (unlinked)
+	 */
+	__le16 deh_state;
 } __attribute__ ((__packed__));
 #define DEH_SIZE                  sizeof(struct reiserfs_de_head)
 #define deh_offset(p_deh)         (le32_to_cpu((p_deh)->deh_offset))
@@ -1703,9 +1923,11 @@
 #   define ADDR_UNALIGNED_BITS  (3)
 #endif
 
-/* These are only used to manipulate deh_state.
+/*
+ * These are only used to manipulate deh_state.
  * Because of this, we'll use the ext2_ bit routines,
- * since they are little endian */
+ * since they are little endian
+ */
 #ifdef ADDR_UNALIGNED_BITS
 
 #   define aligned_address(addr)           ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1)))
@@ -1740,13 +1962,16 @@
 extern void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
 				__le32 par_dirid, __le32 par_objid);
 
-// two entries per block (at least)
+/* two entries per block (at least) */
 #define REISERFS_MAX_NAME(block_size) 255
 
-/* this structure is used for operations on directory entries. It is
-   not a disk structure. */
-/* When reiserfs_find_entry or search_by_entry_key find directory
-   entry, they return filled reiserfs_dir_entry structure */
+/*
+ * this structure is used for operations on directory entries. It is
+ * not a disk structure.
+ *
+ * When reiserfs_find_entry or search_by_entry_key find directory
+ * entry, they return filled reiserfs_dir_entry structure
+ */
 struct reiserfs_dir_entry {
 	struct buffer_head *de_bh;
 	int de_item_num;
@@ -1764,7 +1989,10 @@
 	struct cpu_key de_entry_key;
 };
 
-/* these defines are useful when a particular member of a reiserfs_dir_entry is needed */
+/*
+ * these defines are useful when a particular member of
+ * a reiserfs_dir_entry is needed
+ */
 
 /* pointer to file name, stored in entry */
 #define B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh) \
@@ -1791,11 +2019,13 @@
  * |______|_______________|___________________|___________|
  */
 
-/***************************************************************************/
-/*                      DISK CHILD                                         */
-/***************************************************************************/
-/* Disk child pointer: The pointer from an internal node of the tree
-   to a node that is on disk. */
+/***************************************************************************
+ *                      DISK CHILD                                         *
+ ***************************************************************************/
+/*
+ * Disk child pointer:
+ * The pointer from an internal node of the tree to a node that is on disk.
+ */
 struct disk_child {
 	__le32 dc_block_number;	/* Disk child's block number. */
 	__le16 dc_size;		/* Disk child's used space.   */
@@ -1828,47 +2058,66 @@
 #define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) )
 #define MIN_NR_KEY(bh)    (MAX_NR_KEY(bh)/2)
 
-/***************************************************************************/
-/*                      PATH STRUCTURES AND DEFINES                        */
-/***************************************************************************/
+/***************************************************************************
+ *                      PATH STRUCTURES AND DEFINES                        *
+ ***************************************************************************/
 
-/* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the
-   key.  It uses reiserfs_bread to try to find buffers in the cache given their block number.  If it
-   does not find them in the cache it reads them from disk.  For each node search_by_key finds using
-   reiserfs_bread it then uses bin_search to look through that node.  bin_search will find the
-   position of the block_number of the next node if it is looking through an internal node.  If it
-   is looking through a leaf node bin_search will find the position of the item which has key either
-   equal to given key, or which is the maximal key less than the given key. */
+/*
+ * search_by_key fills up the path from the root to the leaf as it descends
+ * the tree looking for the key.  It uses reiserfs_bread to try to find
+ * buffers in the cache given their block number.  If it does not find
+ * them in the cache it reads them from disk.  For each node search_by_key
+ * finds using reiserfs_bread it then uses bin_search to look through that
+ * node.  bin_search will find the position of the block_number of the next
+ * node if it is looking through an internal node.  If it is looking through
+ * a leaf node bin_search will find the position of the item which has key
+ * either equal to given key, or which is the maximal key less than the
+ * given key.
+ */
 
 struct path_element {
-	struct buffer_head *pe_buffer;	/* Pointer to the buffer at the path in the tree. */
-	int pe_position;	/* Position in the tree node which is placed in the */
-	/* buffer above.                                  */
+	/* Pointer to the buffer at the path in the tree. */
+	struct buffer_head *pe_buffer;
+	/* Position in the tree node which is placed in the buffer above. */
+	int pe_position;
 };
 
-#define MAX_HEIGHT 5		/* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */
-#define EXTENDED_MAX_HEIGHT         7	/* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
-#define FIRST_PATH_ELEMENT_OFFSET   2	/* Must be equal to at least 2. */
+/*
+ * maximal height of a tree. don't change this without
+ * changing JOURNAL_PER_BALANCE_CNT
+ */
+#define MAX_HEIGHT 5
 
-#define ILLEGAL_PATH_ELEMENT_OFFSET 1	/* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
-#define MAX_FEB_SIZE 6		/* this MUST be MAX_HEIGHT + 1. See about FEB below */
+/* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
+#define EXTENDED_MAX_HEIGHT         7
 
-/* We need to keep track of who the ancestors of nodes are.  When we
-   perform a search we record which nodes were visited while
-   descending the tree looking for the node we searched for. This list
-   of nodes is called the path.  This information is used while
-   performing balancing.  Note that this path information may become
-   invalid, and this means we must check it when using it to see if it
-   is still valid. You'll need to read search_by_key and the comments
-   in it, especially about decrement_counters_in_path(), to understand
-   this structure.  
+/* Must be equal to at least 2. */
+#define FIRST_PATH_ELEMENT_OFFSET   2
 
-Paths make the code so much harder to work with and debug.... An
-enormous number of bugs are due to them, and trying to write or modify
-code that uses them just makes my head hurt.  They are based on an
-excessive effort to avoid disturbing the precious VFS code.:-( The
-gods only know how we are going to SMP the code that uses them.
-znodes are the way! */
+/* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
+#define ILLEGAL_PATH_ELEMENT_OFFSET 1
+
+/* this MUST be MAX_HEIGHT + 1. See about FEB below */
+#define MAX_FEB_SIZE 6
+
+/*
+ * We need to keep track of who the ancestors of nodes are.  When we
+ * perform a search we record which nodes were visited while
+ * descending the tree looking for the node we searched for. This list
+ * of nodes is called the path.  This information is used while
+ * performing balancing.  Note that this path information may become
+ * invalid, and this means we must check it when using it to see if it
+ * is still valid. You'll need to read search_by_key and the comments
+ * in it, especially about decrement_counters_in_path(), to understand
+ * this structure.
+ *
+ * Paths make the code so much harder to work with and debug.... An
+ * enormous number of bugs are due to them, and trying to write or modify
+ * code that uses them just makes my head hurt.  They are based on an
+ * excessive effort to avoid disturbing the precious VFS code.:-( The
+ * gods only know how we are going to SMP the code that uses them.
+ * znodes are the way!
+ */
 
 #define PATH_READA	0x1	/* do read ahead */
 #define PATH_READA_BACK 0x2	/* read backwards */
@@ -1876,7 +2125,8 @@
 struct treepath {
 	int path_length;	/* Length of the array above.   */
 	int reada;
-	struct path_element path_elements[EXTENDED_MAX_HEIGHT];	/* Array of the path elements.  */
+	/* Array of the path elements.  */
+	struct path_element path_elements[EXTENDED_MAX_HEIGHT];
 	int pos_in_item;
 };
 
@@ -1895,20 +2145,31 @@
 #define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
 
 #define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
-				/* you know, to the person who didn't
-				   write this the macro name does not
-				   at first suggest what it does.
-				   Maybe POSITION_FROM_PATH_END? Or
-				   maybe we should just focus on
-				   dumping paths... -Hans */
+
+/*
+ * you know, to the person who didn't write this the macro name does not
+ * at first suggest what it does.  Maybe POSITION_FROM_PATH_END? Or
+ * maybe we should just focus on dumping paths... -Hans
+ */
 #define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
 
-/* in do_balance leaf has h == 0 in contrast with path structure,
-   where root has level == 0. That is why we need these defines */
-#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h))	/* tb->S[h] */
-#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1)	/* tb->F[h] or tb->S[0]->b_parent */
-#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
-#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1)	/* tb->S[h]->b_item_order */
+/*
+ * in do_balance leaf has h == 0 in contrast with path structure,
+ * where root has level == 0. That is why we need these defines
+ */
+
+/* tb->S[h] */
+#define PATH_H_PBUFFER(path, h) \
+			PATH_OFFSET_PBUFFER(path, path->path_length - (h))
+
+/* tb->F[h] or tb->S[0]->b_parent */
+#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER(path, (h) + 1)
+
+#define PATH_H_POSITION(path, h) \
+			PATH_OFFSET_POSITION(path, path->path_length - (h))
+
+/* tb->S[h]->b_item_order */
+#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1)
 
 #define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
 
@@ -1973,16 +2234,14 @@
  /* get item body */
 #define B_I_DEH(bh, ih) ((struct reiserfs_de_head *)(ih_item_body(bh, ih)))
 
-/* length of the directory entry in directory item. This define
-   calculates length of i-th directory entry using directory entry
-   locations from dir entry head. When it calculates length of 0-th
-   directory entry, it uses length of whole item in place of entry
-   location of the non-existent following entry in the calculation.
-   See picture above.*/
 /*
-#define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \
-((i) ? (deh_location((deh)-1) - deh_location((deh))) : (ih_item_len((ih)) - deh_location((deh))))
-*/
+ * length of the directory entry in directory item. This define
+ * calculates length of i-th directory entry using directory entry
+ * locations from dir entry head. When it calculates length of 0-th
+ * directory entry, it uses length of whole item in place of entry
+ * location of the non-existent following entry in the calculation.
+ * See picture above.
+ */
 static inline int entry_length(const struct buffer_head *bh,
 			       const struct item_head *ih, int pos_in_item)
 {
@@ -1995,15 +2254,15 @@
 	return ih_item_len(ih) - deh_location(deh);
 }
 
-/***************************************************************************/
-/*                       MISC                                              */
-/***************************************************************************/
+/***************************************************************************
+ *                       MISC                                              *
+ ***************************************************************************/
 
 /* Size of pointer to the unformatted node. */
 #define UNFM_P_SIZE (sizeof(unp_t))
 #define UNFM_P_SHIFT 2
 
-// in in-core inode key is stored on le form
+/* in in-core inode key is stored on le form */
 #define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
 
 #define MAX_UL_INT 0xffffffff
@@ -2019,7 +2278,6 @@
 	return (loff_t) ((~(__u64) 0) >> 4);
 }
 
-/*#define MAX_KEY_UNIQUENESS	MAX_UL_INT*/
 #define MAX_KEY_OBJECTID	MAX_UL_INT
 
 #define MAX_B_NUM  MAX_UL_INT
@@ -2028,9 +2286,12 @@
 /* the purpose is to detect overflow of an unsigned short */
 #define REISERFS_LINK_MAX (MAX_US_INT - 1000)
 
-/* The following defines are used in reiserfs_insert_item and reiserfs_append_item  */
-#define REISERFS_KERNEL_MEM		0	/* reiserfs kernel memory mode  */
-#define REISERFS_USER_MEM		1	/* reiserfs user memory mode            */
+/*
+ * The following defines are used in reiserfs_insert_item
+ * and reiserfs_append_item
+ */
+#define REISERFS_KERNEL_MEM		0	/* kernel memory mode */
+#define REISERFS_USER_MEM		1	/* user memory mode */
 
 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
 #define get_generation(s) atomic_read (&fs_generation(s))
@@ -2042,46 +2303,65 @@
 	__fs_changed(gen, s);		\
 })
 
-/***************************************************************************/
-/*                  FIXATE NODES                                           */
-/***************************************************************************/
+/***************************************************************************
+ *                  FIXATE NODES                                           *
+ ***************************************************************************/
 
 #define VI_TYPE_LEFT_MERGEABLE 1
 #define VI_TYPE_RIGHT_MERGEABLE 2
 
-/* To make any changes in the tree we always first find node, that
-   contains item to be changed/deleted or place to insert a new
-   item. We call this node S. To do balancing we need to decide what
-   we will shift to left/right neighbor, or to a new node, where new
-   item will be etc. To make this analysis simpler we build virtual
-   node. Virtual node is an array of items, that will replace items of
-   node S. (For instance if we are going to delete an item, virtual
-   node does not contain it). Virtual node keeps information about
-   item sizes and types, mergeability of first and last items, sizes
-   of all entries in directory item. We use this array of items when
-   calculating what we can shift to neighbors and how many nodes we
-   have to have if we do not any shiftings, if we shift to left/right
-   neighbor or to both. */
+/*
+ * To make any changes in the tree we always first find node, that
+ * contains item to be changed/deleted or place to insert a new
+ * item. We call this node S. To do balancing we need to decide what
+ * we will shift to left/right neighbor, or to a new node, where new
+ * item will be etc. To make this analysis simpler we build virtual
+ * node. Virtual node is an array of items, that will replace items of
+ * node S. (For instance if we are going to delete an item, virtual
+ * node does not contain it). Virtual node keeps information about
+ * item sizes and types, mergeability of first and last items, sizes
+ * of all entries in directory item. We use this array of items when
+ * calculating what we can shift to neighbors and how many nodes we
+ * have to have if we do not any shiftings, if we shift to left/right
+ * neighbor or to both.
+ */
 struct virtual_item {
-	int vi_index;		// index in the array of item operations
-	unsigned short vi_type;	// left/right mergeability
-	unsigned short vi_item_len;	/* length of item that it will have after balancing */
+	int vi_index;		/* index in the array of item operations */
+	unsigned short vi_type;	/* left/right mergeability */
+
+	/* length of item that it will have after balancing */
+	unsigned short vi_item_len;
+
 	struct item_head *vi_ih;
-	const char *vi_item;	// body of item (old or new)
-	const void *vi_new_data;	// 0 always but paste mode
-	void *vi_uarea;		// item specific area
+	const char *vi_item;	/* body of item (old or new) */
+	const void *vi_new_data;	/* 0 always but paste mode */
+	void *vi_uarea;		/* item specific area */
 };
 
 struct virtual_node {
-	char *vn_free_ptr;	/* this is a pointer to the free space in the buffer */
+	/* this is a pointer to the free space in the buffer */
+	char *vn_free_ptr;
+
 	unsigned short vn_nr_item;	/* number of items in virtual node */
-	short vn_size;		/* size of node , that node would have if it has unlimited size and no balancing is performed */
-	short vn_mode;		/* mode of balancing (paste, insert, delete, cut) */
+
+	/*
+	 * size of node , that node would have if it has
+	 * unlimited size and no balancing is performed
+	 */
+	short vn_size;
+
+	/* mode of balancing (paste, insert, delete, cut) */
+	short vn_mode;
+
 	short vn_affected_item_num;
 	short vn_pos_in_item;
-	struct item_head *vn_ins_ih;	/* item header of inserted item, 0 for other modes */
+
+	/* item header of inserted item, 0 for other modes */
+	struct item_head *vn_ins_ih;
 	const void *vn_data;
-	struct virtual_item *vn_vi;	/* array of items (including a new one, excluding item to be deleted) */
+
+	/* array of items (including a new one, excluding item to be deleted) */
+	struct virtual_item *vn_vi;
 };
 
 /* used by directory items when creating virtual nodes */
@@ -2091,22 +2371,25 @@
 	__u16 entry_sizes[1];
 } __attribute__ ((__packed__));
 
-/***************************************************************************/
-/*                  TREE BALANCE                                           */
-/***************************************************************************/
+/***************************************************************************
+ *                  TREE BALANCE                                           *
+ ***************************************************************************/
 
-/* This temporary structure is used in tree balance algorithms, and
-   constructed as we go to the extent that its various parts are
-   needed.  It contains arrays of nodes that can potentially be
-   involved in the balancing of node S, and parameters that define how
-   each of the nodes must be balanced.  Note that in these algorithms
-   for balancing the worst case is to need to balance the current node
-   S and the left and right neighbors and all of their parents plus
-   create a new node.  We implement S1 balancing for the leaf nodes
-   and S0 balancing for the internal nodes (S1 and S0 are defined in
-   our papers.)*/
+/*
+ * This temporary structure is used in tree balance algorithms, and
+ * constructed as we go to the extent that its various parts are
+ * needed.  It contains arrays of nodes that can potentially be
+ * involved in the balancing of node S, and parameters that define how
+ * each of the nodes must be balanced.  Note that in these algorithms
+ * for balancing the worst case is to need to balance the current node
+ * S and the left and right neighbors and all of their parents plus
+ * create a new node.  We implement S1 balancing for the leaf nodes
+ * and S0 balancing for the internal nodes (S1 and S0 are defined in
+ * our papers.)
+ */
 
-#define MAX_FREE_BLOCK 7	/* size of the array of buffers to free at end of do_balance */
+/* size of the array of buffers to free at end of do_balance */
+#define MAX_FREE_BLOCK 7
 
 /* maximum number of FEB blocknrs on a single level */
 #define MAX_AMOUNT_NEEDED 2
@@ -2118,64 +2401,132 @@
 	struct super_block *tb_sb;
 	struct reiserfs_transaction_handle *transaction_handle;
 	struct treepath *tb_path;
-	struct buffer_head *L[MAX_HEIGHT];	/* array of left neighbors of nodes in the path */
-	struct buffer_head *R[MAX_HEIGHT];	/* array of right neighbors of nodes in the path */
-	struct buffer_head *FL[MAX_HEIGHT];	/* array of fathers of the left  neighbors      */
-	struct buffer_head *FR[MAX_HEIGHT];	/* array of fathers of the right neighbors      */
-	struct buffer_head *CFL[MAX_HEIGHT];	/* array of common parents of center node and its left neighbor  */
-	struct buffer_head *CFR[MAX_HEIGHT];	/* array of common parents of center node and its right neighbor */
 
-	struct buffer_head *FEB[MAX_FEB_SIZE];	/* array of empty buffers. Number of buffers in array equals
-						   cur_blknum. */
+	/* array of left neighbors of nodes in the path */
+	struct buffer_head *L[MAX_HEIGHT];
+
+	/* array of right neighbors of nodes in the path */
+	struct buffer_head *R[MAX_HEIGHT];
+
+	/* array of fathers of the left neighbors */
+	struct buffer_head *FL[MAX_HEIGHT];
+
+	/* array of fathers of the right neighbors */
+	struct buffer_head *FR[MAX_HEIGHT];
+	/* array of common parents of center node and its left neighbor */
+	struct buffer_head *CFL[MAX_HEIGHT];
+
+	/* array of common parents of center node and its right neighbor */
+	struct buffer_head *CFR[MAX_HEIGHT];
+
+	/*
+	 * array of empty buffers. Number of buffers in array equals
+	 * cur_blknum.
+	 */
+	struct buffer_head *FEB[MAX_FEB_SIZE];
 	struct buffer_head *used[MAX_FEB_SIZE];
 	struct buffer_head *thrown[MAX_FEB_SIZE];
-	int lnum[MAX_HEIGHT];	/* array of number of items which must be
-				   shifted to the left in order to balance the
-				   current node; for leaves includes item that
-				   will be partially shifted; for internal
-				   nodes, it is the number of child pointers
-				   rather than items. It includes the new item
-				   being created. The code sometimes subtracts
-				   one to get the number of wholly shifted
-				   items for other purposes. */
-	int rnum[MAX_HEIGHT];	/* substitute right for left in comment above */
-	int lkey[MAX_HEIGHT];	/* array indexed by height h mapping the key delimiting L[h] and
-				   S[h] to its item number within the node CFL[h] */
-	int rkey[MAX_HEIGHT];	/* substitute r for l in comment above */
-	int insert_size[MAX_HEIGHT];	/* the number of bytes by we are trying to add or remove from
-					   S[h]. A negative value means removing.  */
-	int blknum[MAX_HEIGHT];	/* number of nodes that will replace node S[h] after
-				   balancing on the level h of the tree.  If 0 then S is
-				   being deleted, if 1 then S is remaining and no new nodes
-				   are being created, if 2 or 3 then 1 or 2 new nodes is
-				   being created */
+
+	/*
+	 * array of number of items which must be shifted to the left in
+	 * order to balance the current node; for leaves includes item that
+	 * will be partially shifted; for internal nodes, it is the number
+	 * of child pointers rather than items. It includes the new item
+	 * being created. The code sometimes subtracts one to get the
+	 * number of wholly shifted items for other purposes.
+	 */
+	int lnum[MAX_HEIGHT];
+
+	/* substitute right for left in comment above */
+	int rnum[MAX_HEIGHT];
+
+	/*
+	 * array indexed by height h mapping the key delimiting L[h] and
+	 * S[h] to its item number within the node CFL[h]
+	 */
+	int lkey[MAX_HEIGHT];
+
+	/* substitute r for l in comment above */
+	int rkey[MAX_HEIGHT];
+
+	/*
+	 * the number of bytes by we are trying to add or remove from
+	 * S[h]. A negative value means removing.
+	 */
+	int insert_size[MAX_HEIGHT];
+
+	/*
+	 * number of nodes that will replace node S[h] after balancing
+	 * on the level h of the tree.  If 0 then S is being deleted,
+	 * if 1 then S is remaining and no new nodes are being created,
+	 * if 2 or 3 then 1 or 2 new nodes is being created
+	 */
+	int blknum[MAX_HEIGHT];
 
 	/* fields that are used only for balancing leaves of the tree */
-	int cur_blknum;		/* number of empty blocks having been already allocated                 */
-	int s0num;		/* number of items that fall into left most  node when S[0] splits     */
-	int s1num;		/* number of items that fall into first  new node when S[0] splits     */
-	int s2num;		/* number of items that fall into second new node when S[0] splits     */
-	int lbytes;		/* number of bytes which can flow to the left neighbor from the        left    */
-	/* most liquid item that cannot be shifted from S[0] entirely         */
-	/* if -1 then nothing will be partially shifted */
-	int rbytes;		/* number of bytes which will flow to the right neighbor from the right        */
-	/* most liquid item that cannot be shifted from S[0] entirely         */
-	/* if -1 then nothing will be partially shifted                           */
-	int s1bytes;		/* number of bytes which flow to the first  new node when S[0] splits   */
-	/* note: if S[0] splits into 3 nodes, then items do not need to be cut  */
-	int s2bytes;
-	struct buffer_head *buf_to_free[MAX_FREE_BLOCK];	/* buffers which are to be freed after do_balance finishes by unfix_nodes */
-	char *vn_buf;		/* kmalloced memory. Used to create
-				   virtual node and keep map of
-				   dirtied bitmap blocks */
-	int vn_buf_size;	/* size of the vn_buf */
-	struct virtual_node *tb_vn;	/* VN starts after bitmap of bitmap blocks */
 
-	int fs_gen;		/* saved value of `reiserfs_generation' counter
-				   see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
+	/* number of empty blocks having been already allocated */
+	int cur_blknum;
+
+	/* number of items that fall into left most node when S[0] splits */
+	int s0num;
+
+	/* number of items that fall into first new node when S[0] splits */
+	int s1num;
+
+	/* number of items that fall into second new node when S[0] splits */
+	int s2num;
+
+	/*
+	 * number of bytes which can flow to the left neighbor from the left
+	 * most liquid item that cannot be shifted from S[0] entirely
+	 * if -1 then nothing will be partially shifted
+	 */
+	int lbytes;
+
+	/*
+	 * number of bytes which will flow to the right neighbor from the right
+	 * most liquid item that cannot be shifted from S[0] entirely
+	 * if -1 then nothing will be partially shifted
+	 */
+	int rbytes;
+
+	/*
+	 * number of bytes which flow to the first new node when S[0] splits
+	 * note: if S[0] splits into 3 nodes, then items do not need to be cut
+	 */
+	int s1bytes;
+	int s2bytes;
+
+	/*
+	 * buffers which are to be freed after do_balance finishes
+	 * by unfix_nodes
+	 */
+	struct buffer_head *buf_to_free[MAX_FREE_BLOCK];
+
+	/*
+	 * kmalloced memory. Used to create virtual node and keep
+	 * map of dirtied bitmap blocks
+	 */
+	char *vn_buf;
+
+	int vn_buf_size;	/* size of the vn_buf */
+
+	/* VN starts after bitmap of bitmap blocks */
+	struct virtual_node *tb_vn;
+
+	/*
+	 * saved value of `reiserfs_generation' counter see
+	 * FILESYSTEM_CHANGED() macro in reiserfs_fs.h
+	 */
+	int fs_gen;
+
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
-	struct in_core_key key;	/* key pointer, to pass to block allocator or
-				   another low-level subsystem */
+	/*
+	 * key pointer, to pass to block allocator or
+	 * another low-level subsystem
+	 */
+	struct in_core_key key;
 #endif
 };
 
@@ -2183,20 +2534,24 @@
 
 /* When inserting an item. */
 #define M_INSERT	'i'
-/* When inserting into (directories only) or appending onto an already
-   existent item. */
+/*
+ * When inserting into (directories only) or appending onto an already
+ * existent item.
+ */
 #define M_PASTE		'p'
 /* When deleting an item. */
 #define M_DELETE	'd'
 /* When truncating an item or removing an entry from a (directory) item. */
-#define M_CUT 		'c'
+#define M_CUT		'c'
 
 /* used when balancing on leaf level skipped (in reiserfsck) */
 #define M_INTERNAL	'n'
 
-/* When further balancing is not needed, then do_balance does not need
-   to be called. */
-#define M_SKIP_BALANCING 		's'
+/*
+ * When further balancing is not needed, then do_balance does not need
+ * to be called.
+ */
+#define M_SKIP_BALANCING		's'
 #define M_CONVERT	'v'
 
 /* modes of leaf_move_items */
@@ -2209,8 +2564,10 @@
 #define FIRST_TO_LAST 0
 #define LAST_TO_FIRST 1
 
-/* used in do_balance for passing parent of node information that has
-   been gotten from tb struct */
+/*
+ * used in do_balance for passing parent of node information that has
+ * been gotten from tb struct
+ */
 struct buffer_info {
 	struct tree_balance *tb;
 	struct buffer_head *bi_bh;
@@ -2228,20 +2585,24 @@
 	return bi ? sb_from_tb(bi->tb) : NULL;
 }
 
-/* there are 4 types of items: stat data, directory item, indirect, direct.
-+-------------------+------------+--------------+------------+
-|	            |  k_offset  | k_uniqueness | mergeable? |
-+-------------------+------------+--------------+------------+
-|     stat data     |	0        |      0       |   no       |
-+-------------------+------------+--------------+------------+
-| 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS|   no       | 
-| non 1st directory | hash value |              |   yes      |
-|     item          |            |              |            |
-+-------------------+------------+--------------+------------+
-| indirect item     | offset + 1 |TYPE_INDIRECT |   if this is not the first indirect item of the object
-+-------------------+------------+--------------+------------+
-| direct item       | offset + 1 |TYPE_DIRECT   | if not this is not the first direct item of the object
-+-------------------+------------+--------------+------------+
+/*
+ * there are 4 types of items: stat data, directory item, indirect, direct.
+ * +-------------------+------------+--------------+------------+
+ * |                   |  k_offset  | k_uniqueness | mergeable? |
+ * +-------------------+------------+--------------+------------+
+ * |     stat data     |     0      |      0       |   no       |
+ * +-------------------+------------+--------------+------------+
+ * | 1st directory item| DOT_OFFSET | DIRENTRY_ .. |   no       |
+ * | non 1st directory | hash value | UNIQUENESS   |   yes      |
+ * |     item          |            |              |            |
+ * +-------------------+------------+--------------+------------+
+ * | indirect item     | offset + 1 |TYPE_INDIRECT |    [1]	|
+ * +-------------------+------------+--------------+------------+
+ * | direct item       | offset + 1 |TYPE_DIRECT   |    [2]     |
+ * +-------------------+------------+--------------+------------+
+ *
+ * [1] if this is not the first indirect item of the object
+ * [2] if this is not the first direct item of the object
 */
 
 struct item_operations {
@@ -2280,22 +2641,30 @@
 /* number of blocks pointed to by the indirect item */
 #define I_UNFM_NUM(ih)	(ih_item_len(ih) / UNFM_P_SIZE)
 
-/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
+/*
+ * the used space within the unformatted node corresponding
+ * to pos within the item pointed to by ih
+ */
 #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
 
-/* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */
+/*
+ * number of bytes contained by the direct item or the
+ * unformatted nodes the indirect item points to
+ */
 
-    /* following defines use reiserfs buffer header and item header */
+/* following defines use reiserfs buffer header and item header */
 
 /* get stat-data */
 #define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + ih_location(ih)) )
 
-// this is 3976 for size==4096
+/* this is 3976 for size==4096 */
 #define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE)
 
-/* indirect items consist of entries which contain blocknrs, pos
-   indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
-   blocknr contained by the entry pos points to */
+/*
+ * indirect items consist of entries which contain blocknrs, pos
+ * indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
+ * blocknr contained by the entry pos points to
+ */
 #define B_I_POS_UNFM_POINTER(bh, ih, pos)				\
 	le32_to_cpu(*(((unp_t *)ih_item_body(bh, ih)) + (pos)))
 #define PUT_B_I_POS_UNFM_POINTER(bh, ih, pos, val)			\
@@ -2306,9 +2675,9 @@
 	__u32 dirid;
 };
 
-/***************************************************************************/
-/*                    FUNCTION DECLARATIONS                                */
-/***************************************************************************/
+/***************************************************************************
+ *                    FUNCTION DECLARATIONS                                *
+ ***************************************************************************/
 
 #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
 
@@ -2320,7 +2689,10 @@
 /* first block written in a commit.  */
 struct reiserfs_journal_desc {
 	__le32 j_trans_id;	/* id of commit */
-	__le32 j_len;		/* length of commit. len +1 is the commit block */
+
+	/* length of commit. len +1 is the commit block */
+	__le32 j_len;
+
 	__le32 j_mount_id;	/* mount id of this trans */
 	__le32 j_realblock[1];	/* real locations for each block */
 };
@@ -2347,22 +2719,35 @@
 #define set_commit_trans_id(c,val)     do { (c)->j_trans_id = cpu_to_le32 (val); } while (0)
 #define set_commit_trans_len(c,val)    do { (c)->j_len = cpu_to_le32 (val); } while (0)
 
-/* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
-** last fully flushed transaction.  fully flushed means all the log blocks and all the real blocks are on disk,
-** and this transaction does not need to be replayed.
-*/
+/*
+ * this header block gets written whenever a transaction is considered
+ * fully flushed, and is more recent than the last fully flushed transaction.
+ * fully flushed means all the log blocks and all the real blocks are on
+ * disk, and this transaction does not need to be replayed.
+ */
 struct reiserfs_journal_header {
-	__le32 j_last_flush_trans_id;	/* id of last fully flushed transaction */
-	__le32 j_first_unflushed_offset;	/* offset in the log of where to start replay after a crash */
+	/* id of last fully flushed transaction */
+	__le32 j_last_flush_trans_id;
+
+	/* offset in the log of where to start replay after a crash */
+	__le32 j_first_unflushed_offset;
+
 	__le32 j_mount_id;
 	/* 12 */ struct journal_params jh_journal;
 };
 
 /* biggest tunable defines are right here */
 #define JOURNAL_BLOCK_COUNT 8192	/* number of blocks in the journal */
-#define JOURNAL_TRANS_MAX_DEFAULT 1024	/* biggest possible single transaction, don't change for now (8/3/99) */
+
+/* biggest possible single transaction, don't change for now (8/3/99) */
+#define JOURNAL_TRANS_MAX_DEFAULT 1024
 #define JOURNAL_TRANS_MIN_DEFAULT 256
-#define JOURNAL_MAX_BATCH_DEFAULT   900	/* max blocks to batch into one transaction, don't make this any bigger than 900 */
+
+/*
+ * max blocks to batch into one transaction,
+ * don't make this any bigger than 900
+ */
+#define JOURNAL_MAX_BATCH_DEFAULT   900
 #define JOURNAL_MIN_RATIO 2
 #define JOURNAL_MAX_COMMIT_AGE 30
 #define JOURNAL_MAX_TRANS_AGE 30
@@ -2387,16 +2772,18 @@
 #define REISERFS_QUOTA_DEL_BLOCKS(s) 0
 #endif
 
-/* both of these can be as low as 1, or as high as you want.  The min is the
-** number of 4k bitmap nodes preallocated on mount. New nodes are allocated
-** as needed, and released when transactions are committed.  On release, if 
-** the current number of nodes is > max, the node is freed, otherwise, 
-** it is put on a free list for faster use later.
+/*
+ * both of these can be as low as 1, or as high as you want.  The min is the
+ * number of 4k bitmap nodes preallocated on mount. New nodes are allocated
+ * as needed, and released when transactions are committed.  On release, if
+ * the current number of nodes is > max, the node is freed, otherwise,
+ * it is put on a free list for faster use later.
 */
 #define REISERFS_MIN_BITMAP_NODES 10
 #define REISERFS_MAX_BITMAP_NODES 100
 
-#define JBH_HASH_SHIFT 13	/* these are based on journal hash size of 8192 */
+/* these are based on journal hash size of 8192 */
+#define JBH_HASH_SHIFT 13
 #define JBH_HASH_MASK 8191
 
 #define _jhashfn(sb,block)	\
@@ -2404,7 +2791,7 @@
 	 (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
 #define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
 
-// We need these to make journal.c code more readable
+/* We need these to make journal.c code more readable */
 #define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
 #define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
 #define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
@@ -2412,12 +2799,14 @@
 enum reiserfs_bh_state_bits {
 	BH_JDirty = BH_PrivateStart,	/* buffer is in current transaction */
 	BH_JDirty_wait,
-	BH_JNew,		/* disk block was taken off free list before
-				 * being in a finished transaction, or
-				 * written to disk. Can be reused immed. */
+	/*
+	 * disk block was taken off free list before being in a
+	 * finished transaction, or written to disk. Can be reused immed.
+	 */
+	BH_JNew,
 	BH_JPrepared,
 	BH_JRestore_dirty,
-	BH_JTest,		// debugging only will go away
+	BH_JTest,		/* debugging only will go away */
 };
 
 BUFFER_FNS(JDirty, journaled);
@@ -2433,27 +2822,36 @@
 BUFFER_FNS(JTest, journal_test);
 TAS_BUFFER_FNS(JTest, journal_test);
 
-/*
-** transaction handle which is passed around for all journal calls
-*/
+/* transaction handle which is passed around for all journal calls */
 struct reiserfs_transaction_handle {
-	struct super_block *t_super;	/* super for this FS when journal_begin was
-					   called. saves calls to reiserfs_get_super
-					   also used by nested transactions to make
-					   sure they are nesting on the right FS
-					   _must_ be first in the handle
-					 */
+	/*
+	 * super for this FS when journal_begin was called. saves calls to
+	 * reiserfs_get_super also used by nested transactions to make
+	 * sure they are nesting on the right FS _must_ be first
+	 * in the handle
+	 */
+	struct super_block *t_super;
+
 	int t_refcount;
 	int t_blocks_logged;	/* number of blocks this writer has logged */
 	int t_blocks_allocated;	/* number of blocks this writer allocated */
-	unsigned int t_trans_id;	/* sanity check, equals the current trans id */
+
+	/* sanity check, equals the current trans id */
+	unsigned int t_trans_id;
+
 	void *t_handle_save;	/* save existing current->journal_info */
-	unsigned displace_new_blocks:1;	/* if new block allocation occurres, that block
-					   should be displaced from others */
+
+	/*
+	 * if new block allocation occurres, that block
+	 * should be displaced from others
+	 */
+	unsigned displace_new_blocks:1;
+
 	struct list_head t_list;
 };
 
-/* used to keep track of ordered and tail writes, attached to the buffer
+/*
+ * used to keep track of ordered and tail writes, attached to the buffer
  * head through b_journal_head.
  */
 struct reiserfs_jh {
@@ -2550,20 +2948,18 @@
 extern void copy_item_head(struct item_head *to,
 			   const struct item_head *from);
 
-// first key is in cpu form, second - le
+/* first key is in cpu form, second - le */
 extern int comp_short_keys(const struct reiserfs_key *le_key,
 			   const struct cpu_key *cpu_key);
 extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from);
 
-// both are in le form
+/* both are in le form */
 extern int comp_le_keys(const struct reiserfs_key *,
 			const struct reiserfs_key *);
 extern int comp_short_le_keys(const struct reiserfs_key *,
 			      const struct reiserfs_key *);
 
-//
-// get key version from on disk key - kludge
-//
+/* * get key version from on disk key - kludge */
 static inline int le_key_version(const struct reiserfs_key *key)
 {
 	int type;
@@ -2640,12 +3036,12 @@
 
 /* inode.c */
 /* args for the create parameter of reiserfs_get_block */
-#define GET_BLOCK_NO_CREATE 0	/* don't create new blocks or convert tails */
-#define GET_BLOCK_CREATE 1	/* add anything you need to find block */
-#define GET_BLOCK_NO_HOLE 2	/* return -ENOENT for file holes */
-#define GET_BLOCK_READ_DIRECT 4	/* read the tail if indirect item not found */
-#define GET_BLOCK_NO_IMUX     8	/* i_mutex is not held, don't preallocate */
-#define GET_BLOCK_NO_DANGLE   16	/* don't leave any transactions running */
+#define GET_BLOCK_NO_CREATE 0	 /* don't create new blocks or convert tails */
+#define GET_BLOCK_CREATE 1	 /* add anything you need to find block */
+#define GET_BLOCK_NO_HOLE 2	 /* return -ENOENT for file holes */
+#define GET_BLOCK_READ_DIRECT 4	 /* read the tail if indirect item not found */
+#define GET_BLOCK_NO_IMUX     8	 /* i_mutex is not held, don't preallocate */
+#define GET_BLOCK_NO_DANGLE   16 /* don't leave any transactions running */
 
 void reiserfs_read_locked_inode(struct inode *inode,
 				struct reiserfs_iget_args *args);
@@ -2844,25 +3240,49 @@
 
 /* bitmap.c */
 
-/* structure contains hints for block allocator, and it is a container for
- * arguments, such as node, search path, transaction_handle, etc. */
+/*
+ * structure contains hints for block allocator, and it is a container for
+ * arguments, such as node, search path, transaction_handle, etc.
+ */
 struct __reiserfs_blocknr_hint {
-	struct inode *inode;	/* inode passed to allocator, if we allocate unf. nodes */
+	/* inode passed to allocator, if we allocate unf. nodes */
+	struct inode *inode;
+
 	sector_t block;		/* file offset, in blocks */
 	struct in_core_key key;
-	struct treepath *path;	/* search path, used by allocator to deternine search_start by
-				 * various ways */
-	struct reiserfs_transaction_handle *th;	/* transaction handle is needed to log super blocks and
-						 * bitmap blocks changes  */
-	b_blocknr_t beg, end;
-	b_blocknr_t search_start;	/* a field used to transfer search start value (block number)
-					 * between different block allocator procedures
-					 * (determine_search_start() and others) */
-	int prealloc_size;	/* is set in determine_prealloc_size() function, used by underlayed
-				 * function that do actual allocation */
 
-	unsigned formatted_node:1;	/* the allocator uses different polices for getting disk space for
-					 * formatted/unformatted blocks with/without preallocation */
+	/*
+	 * search path, used by allocator to deternine search_start by
+	 * various ways
+	 */
+	struct treepath *path;
+
+	/*
+	 * transaction handle is needed to log super blocks
+	 * and bitmap blocks changes
+	 */
+	struct reiserfs_transaction_handle *th;
+
+	b_blocknr_t beg, end;
+
+	/*
+	 * a field used to transfer search start value (block number)
+	 * between different block allocator procedures
+	 * (determine_search_start() and others)
+	 */
+	b_blocknr_t search_start;
+
+	/*
+	 * is set in determine_prealloc_size() function,
+	 * used by underlayed function that do actual allocation
+	 */
+	int prealloc_size;
+
+	/*
+	 * the allocator uses different polices for getting disk
+	 * space for formatted/unformatted blocks with/without preallocation
+	 */
+	unsigned formatted_node:1;
 	unsigned preallocate:1;
 };
 
@@ -2956,13 +3376,15 @@
 #define reiserfs_test_le_bit		test_bit_le
 #define reiserfs_find_next_zero_le_bit	find_next_zero_bit_le
 
-/* sometimes reiserfs_truncate may require to allocate few new blocks
-   to perform indirect2direct conversion. People probably used to
-   think, that truncate should work without problems on a filesystem
-   without free disk space. They may complain that they can not
-   truncate due to lack of free disk space. This spare space allows us
-   to not worry about it. 500 is probably too much, but it should be
-   absolutely safe */
+/*
+ * sometimes reiserfs_truncate may require to allocate few new blocks
+ * to perform indirect2direct conversion. People probably used to
+ * think, that truncate should work without problems on a filesystem
+ * without free disk space. They may complain that they can not
+ * truncate due to lack of free disk space. This spare space allows us
+ * to not worry about it. 500 is probably too much, but it should be
+ * absolutely safe
+ */
 #define SPARE_SPACE 500
 
 /* prototypes from ioctl.c */
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index a4ef5cd..037b00c 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -53,8 +53,10 @@
 	}
 	bforget(bh);
 
-	/* old disk layout detection; those partitions can be mounted, but
-	 * cannot be resized */
+	/*
+	 * old disk layout detection; those partitions can be mounted, but
+	 * cannot be resized
+	 */
 	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
 	    != REISERFS_DISK_OFFSET_IN_BYTES) {
 		printk
@@ -86,12 +88,14 @@
 			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
 			return -ENOMEM;
 		}
-		/* the new journal bitmaps are zero filled, now we copy in the bitmap
-		 ** node pointers from the old journal bitmap structs, and then
-		 ** transfer the new data structures into the journal struct.
-		 **
-		 ** using the copy_size var below allows this code to work for
-		 ** both shrinking and expanding the FS.
+		/*
+		 * the new journal bitmaps are zero filled, now we copy i
+		 * the bitmap node pointers from the old journal bitmap
+		 * structs, and then transfer the new data structures
+		 * into the journal struct.
+		 *
+		 * using the copy_size var below allows this code to work for
+		 * both shrinking and expanding the FS.
 		 */
 		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
 		copy_size =
@@ -101,36 +105,45 @@
 			jb = SB_JOURNAL(s)->j_list_bitmap + i;
 			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
 
-			/* just in case vfree schedules on us, copy the new
-			 ** pointer into the journal struct before freeing the
-			 ** old one
+			/*
+			 * just in case vfree schedules on us, copy the new
+			 * pointer into the journal struct before freeing the
+			 * old one
 			 */
 			node_tmp = jb->bitmaps;
 			jb->bitmaps = jbitmap[i].bitmaps;
 			vfree(node_tmp);
 		}
 
-		/* allocate additional bitmap blocks, reallocate array of bitmap
-		 * block pointers */
+		/*
+		 * allocate additional bitmap blocks, reallocate
+		 * array of bitmap block pointers
+		 */
 		bitmap =
 		    vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
 		if (!bitmap) {
-			/* Journal bitmaps are still supersized, but the memory isn't
-			 * leaked, so I guess it's ok */
+			/*
+			 * Journal bitmaps are still supersized, but the
+			 * memory isn't leaked, so I guess it's ok
+			 */
 			printk("reiserfs_resize: unable to allocate memory.\n");
 			return -ENOMEM;
 		}
 		for (i = 0; i < bmap_nr; i++)
 			bitmap[i] = old_bitmap[i];
 
-		/* This doesn't go through the journal, but it doesn't have to.
-		 * The changes are still atomic: We're synced up when the journal
-		 * transaction begins, and the new bitmaps don't matter if the
-		 * transaction fails. */
+		/*
+		 * This doesn't go through the journal, but it doesn't have to.
+		 * The changes are still atomic: We're synced up when the
+		 * journal transaction begins, and the new bitmaps don't
+		 * matter if the transaction fails.
+		 */
 		for (i = bmap_nr; i < bmap_nr_new; i++) {
 			int depth;
-			/* don't use read_bitmap_block since it will cache
-			 * the uninitialized bitmap */
+			/*
+			 * don't use read_bitmap_block since it will cache
+			 * the uninitialized bitmap
+			 */
 			depth = reiserfs_write_unlock_nested(s);
 			bh = sb_bread(s, i * s->s_blocksize * 8);
 			reiserfs_write_lock_nested(s, depth);
@@ -147,7 +160,7 @@
 			depth = reiserfs_write_unlock_nested(s);
 			sync_dirty_buffer(bh);
 			reiserfs_write_lock_nested(s, depth);
-			// update bitmap_info stuff
+			/* update bitmap_info stuff */
 			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
 			brelse(bh);
 		}
@@ -156,9 +169,11 @@
 		vfree(old_bitmap);
 	}
 
-	/* begin transaction, if there was an error, it's fine. Yes, we have
+	/*
+	 * begin transaction, if there was an error, it's fine. Yes, we have
 	 * incorrect bitmaps now, but none of it is ever going to touch the
-	 * disk anyway. */
+	 * disk anyway.
+	 */
 	err = journal_begin(&th, s, 10);
 	if (err)
 		return err;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 40b3e77..aa86757 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -8,46 +8,6 @@
  *  Pereslavl-Zalessky Russia
  */
 
-/*
- *  This file contains functions dealing with S+tree
- *
- * B_IS_IN_TREE
- * copy_item_head
- * comp_short_keys
- * comp_keys
- * comp_short_le_keys
- * le_key2cpu_key
- * comp_le_keys
- * bin_search
- * get_lkey
- * get_rkey
- * key_in_buffer
- * decrement_bcount
- * reiserfs_check_path
- * pathrelse_and_restore
- * pathrelse
- * search_by_key_reada
- * search_by_key
- * search_for_position_by_key
- * comp_items
- * prepare_for_direct_item
- * prepare_for_direntry_item
- * prepare_for_delete_or_cut
- * calc_deleted_bytes_number
- * init_tb_struct
- * padd_item
- * reiserfs_delete_item
- * reiserfs_delete_solid_item
- * reiserfs_delete_object
- * maybe_indirect_to_direct
- * indirect_to_direct_roll_back
- * reiserfs_cut_from_item
- * truncate_directory
- * reiserfs_do_truncate
- * reiserfs_paste_into_item
- * reiserfs_insert_item
- */
-
 #include <linux/time.h>
 #include <linux/string.h>
 #include <linux/pagemap.h>
@@ -65,21 +25,21 @@
 	return (B_LEVEL(bh) != FREE_LEVEL);
 }
 
-//
-// to gets item head in le form
-//
+/* to get item head in le form */
 inline void copy_item_head(struct item_head *to,
 			   const struct item_head *from)
 {
 	memcpy(to, from, IH_SIZE);
 }
 
-/* k1 is pointer to on-disk structure which is stored in little-endian
-   form. k2 is pointer to cpu variable. For key of items of the same
-   object this returns 0.
-   Returns: -1 if key1 < key2
-   0 if key1 == key2
-   1 if key1 > key2 */
+/*
+ * k1 is pointer to on-disk structure which is stored in little-endian
+ * form. k2 is pointer to cpu variable. For key of items of the same
+ * object this returns 0.
+ * Returns: -1 if key1 < key2
+ * 0 if key1 == key2
+ * 1 if key1 > key2
+ */
 inline int comp_short_keys(const struct reiserfs_key *le_key,
 			   const struct cpu_key *cpu_key)
 {
@@ -97,11 +57,13 @@
 	return 0;
 }
 
-/* k1 is pointer to on-disk structure which is stored in little-endian
-   form. k2 is pointer to cpu variable.
-   Compare keys using all 4 key fields.
-   Returns: -1 if key1 < key2 0
-   if key1 = key2 1 if key1 > key2 */
+/*
+ * k1 is pointer to on-disk structure which is stored in little-endian
+ * form. k2 is pointer to cpu variable.
+ * Compare keys using all 4 key fields.
+ * Returns: -1 if key1 < key2 0
+ * if key1 = key2 1 if key1 > key2
+ */
 static inline int comp_keys(const struct reiserfs_key *le_key,
 			    const struct cpu_key *cpu_key)
 {
@@ -155,15 +117,17 @@
 	to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
 	to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid);
 
-	// find out version of the key
+	/* find out version of the key */
 	version = le_key_version(from);
 	to->version = version;
 	to->on_disk_key.k_offset = le_key_k_offset(version, from);
 	to->on_disk_key.k_type = le_key_k_type(version, from);
 }
 
-// this does not say which one is bigger, it only returns 1 if keys
-// are not equal, 0 otherwise
+/*
+ * this does not say which one is bigger, it only returns 1 if keys
+ * are not equal, 0 otherwise
+ */
 inline int comp_le_keys(const struct reiserfs_key *k1,
 			const struct reiserfs_key *k2)
 {
@@ -177,24 +141,27 @@
  *        *pos = number of the searched element if found, else the        *
  *        number of the first element that is larger than key.            *
  **************************************************************************/
-/* For those not familiar with binary search: lbound is the leftmost item that it
- could be, rbound the rightmost item that it could be.  We examine the item
- halfway between lbound and rbound, and that tells us either that we can increase
- lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that
- there are no possible items, and we have not found it. With each examination we
- cut the number of possible items it could be by one more than half rounded down,
- or we find it. */
+/*
+ * For those not familiar with binary search: lbound is the leftmost item
+ * that it could be, rbound the rightmost item that it could be.  We examine
+ * the item halfway between lbound and rbound, and that tells us either
+ * that we can increase lbound, or decrease rbound, or that we have found it,
+ * or if lbound <= rbound that there are no possible items, and we have not
+ * found it. With each examination we cut the number of possible items it
+ * could be by one more than half rounded down, or we find it.
+ */
 static inline int bin_search(const void *key,	/* Key to search for. */
 			     const void *base,	/* First item in the array. */
 			     int num,	/* Number of items in the array. */
-			     int width,	/* Item size in the array.
-					   searched. Lest the reader be
-					   confused, note that this is crafted
-					   as a general function, and when it
-					   is applied specifically to the array
-					   of item headers in a node, width
-					   is actually the item header size not
-					   the item size. */
+			     /*
+			      * Item size in the array.  searched. Lest the
+			      * reader be confused, note that this is crafted
+			      * as a general function, and when it is applied
+			      * specifically to the array of item headers in a
+			      * node, width is actually the item header size
+			      * not the item size.
+			      */
+			     int width,
 			     int *pos /* Number of the searched for element. */
     )
 {
@@ -216,8 +183,10 @@
 			return ITEM_FOUND;	/* Key found in the array.  */
 		}
 
-	/* bin_search did not find given key, it returns position of key,
-	   that is minimal and greater than the given one. */
+	/*
+	 * bin_search did not find given key, it returns position of key,
+	 * that is minimal and greater than the given one.
+	 */
 	*pos = lbound;
 	return ITEM_NOT_FOUND;
 }
@@ -234,10 +203,14 @@
 	  __constant_cpu_to_le32(0xffffffff)},}
 };
 
-/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
-   of the path, and going upwards.  We must check the path's validity at each step.  If the key is not in
-   the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
-   case we return a special key, either MIN_KEY or MAX_KEY. */
+/*
+ * Get delimiting key of the buffer by looking for it in the buffers in the
+ * path, starting from the bottom of the path, and going upwards.  We must
+ * check the path's validity at each step.  If the key is not in the path,
+ * there is no delimiting key in the tree (buffer is first or last buffer
+ * in tree), and in this case we return a special key, either MIN_KEY or
+ * MAX_KEY.
+ */
 static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
 						  const struct super_block *sb)
 {
@@ -270,7 +243,10 @@
 		    PATH_OFFSET_PBUFFER(chk_path,
 					path_offset + 1)->b_blocknr)
 			return &MAX_KEY;
-		/* Return delimiting key if position in the parent is not equal to zero. */
+		/*
+		 * Return delimiting key if position in the parent
+		 * is not equal to zero.
+		 */
 		if (position)
 			return internal_key(parent, position - 1);
 	}
@@ -308,15 +284,23 @@
 					  path_offset)) >
 		    B_NR_ITEMS(parent))
 			return &MIN_KEY;
-		/* Check whether parent at the path really points to the child. */
+		/*
+		 * Check whether parent at the path really points
+		 * to the child.
+		 */
 		if (B_N_CHILD_NUM(parent, position) !=
 		    PATH_OFFSET_PBUFFER(chk_path,
 					path_offset + 1)->b_blocknr)
 			return &MIN_KEY;
-		/* Return delimiting key if position in the parent is not the last one. */
+
+		/*
+		 * Return delimiting key if position in the parent
+		 * is not the last one.
+		 */
 		if (position != B_NR_ITEMS(parent))
 			return internal_key(parent, position);
 	}
+
 	/* Return MAX_KEY if we are in the root of the buffer tree. */
 	if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
 	    b_blocknr == SB_ROOT_BLOCK(sb))
@@ -324,13 +308,20 @@
 	return &MIN_KEY;
 }
 
-/* Check whether a key is contained in the tree rooted from a buffer at a path. */
-/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
-   the path.  These delimiting keys are stored at least one level above that buffer in the tree. If the
-   buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
-   this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
-static inline int key_in_buffer(struct treepath *chk_path,	/* Path which should be checked.  */
-				const struct cpu_key *key,	/* Key which should be checked.   */
+/*
+ * Check whether a key is contained in the tree rooted from a buffer at a path.
+ * This works by looking at the left and right delimiting keys for the buffer
+ * in the last path_element in the path.  These delimiting keys are stored
+ * at least one level above that buffer in the tree. If the buffer is the
+ * first or last node in the tree order then one of the delimiting keys may
+ * be absent, and in this case get_lkey and get_rkey return a special key
+ * which is MIN_KEY or MAX_KEY.
+ */
+static inline int key_in_buffer(
+				/* Path which should be checked. */
+				struct treepath *chk_path,
+				/* Key which should be checked. */
+				const struct cpu_key *key,
 				struct super_block *sb
     )
 {
@@ -359,9 +350,11 @@
 	return 0;
 }
 
-/* Drop the reference to each buffer in a path and restore
+/*
+ * Drop the reference to each buffer in a path and restore
  * dirty bits clean when preparing the buffer for the log.
- * This version should only be called from fix_nodes() */
+ * This version should only be called from fix_nodes()
+ */
 void pathrelse_and_restore(struct super_block *sb,
 			   struct treepath *search_path)
 {
@@ -418,14 +411,17 @@
 	}
 	ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
 	used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih));
+
+	/* free space does not match to calculated amount of use space */
 	if (used_space != blocksize - blkh_free_space(blkh)) {
-		/* free space does not match to calculated amount of use space */
 		reiserfs_warning(NULL, "reiserfs-5082",
 				 "free space seems wrong: %z", bh);
 		return 0;
 	}
-	// FIXME: it is_leaf will hit performance too much - we may have
-	// return 1 here
+	/*
+	 * FIXME: it is_leaf will hit performance too much - we may have
+	 * return 1 here
+	 */
 
 	/* check tables of item heads */
 	ih = (struct item_head *)(buf + BLKH_SIZE);
@@ -460,7 +456,7 @@
 		prev_location = ih_location(ih);
 	}
 
-	// one may imagine much more checks
+	/* one may imagine many more checks */
 	return 1;
 }
 
@@ -481,8 +477,8 @@
 	}
 
 	nr = blkh_nr_item(blkh);
+	/* for internal which is not root we might check min number of keys */
 	if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
-		/* for internal which is not root we might check min number of keys */
 		reiserfs_warning(NULL, "reiserfs-5088",
 				 "number of key seems wrong: %z", bh);
 		return 0;
@@ -494,12 +490,15 @@
 				 "free space seems wrong: %z", bh);
 		return 0;
 	}
-	// one may imagine much more checks
+
+	/* one may imagine many more checks */
 	return 1;
 }
 
-// make sure that bh contains formatted node of reiserfs tree of
-// 'level'-th level
+/*
+ * make sure that bh contains formatted node of reiserfs tree of
+ * 'level'-th level
+ */
 static int is_tree_node(struct buffer_head *bh, int level)
 {
 	if (B_LEVEL(bh) != level) {
@@ -546,7 +545,8 @@
 	for (j = 0; j < i; j++) {
 		/*
 		 * note, this needs attention if we are getting rid of the BKL
-		 * you have to make sure the prepared bit isn't set on this buffer
+		 * you have to make sure the prepared bit isn't set on this
+		 * buffer
 		 */
 		if (!buffer_uptodate(bh[j])) {
 			if (depth == -1)
@@ -558,39 +558,34 @@
 	return depth;
 }
 
-/**************************************************************************
- * Algorithm   SearchByKey                                                *
- *             look for item in the Disk S+Tree by its key                *
- * Input:  sb   -  super block                                            *
- *         key  - pointer to the key to search                            *
- * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR                         *
- *         search_path - path from the root to the needed leaf            *
- **************************************************************************/
-
-/* This function fills up the path from the root to the leaf as it
-   descends the tree looking for the key.  It uses reiserfs_bread to
-   try to find buffers in the cache given their block number.  If it
-   does not find them in the cache it reads them from disk.  For each
-   node search_by_key finds using reiserfs_bread it then uses
-   bin_search to look through that node.  bin_search will find the
-   position of the block_number of the next node if it is looking
-   through an internal node.  If it is looking through a leaf node
-   bin_search will find the position of the item which has key either
-   equal to given key, or which is the maximal key less than the given
-   key.  search_by_key returns a path that must be checked for the
-   correctness of the top of the path but need not be checked for the
-   correctness of the bottom of the path */
-/* The function is NOT SCHEDULE-SAFE! */
-int search_by_key(struct super_block *sb, const struct cpu_key *key,	/* Key to search. */
-		  struct treepath *search_path,/* This structure was
-						   allocated and initialized
-						   by the calling
-						   function. It is filled up
-						   by this function.  */
-		  int stop_level	/* How far down the tree to search. To
-					   stop at leaf level - set to
-					   DISK_LEAF_NODE_LEVEL */
-    )
+/*
+ * This function fills up the path from the root to the leaf as it
+ * descends the tree looking for the key.  It uses reiserfs_bread to
+ * try to find buffers in the cache given their block number.  If it
+ * does not find them in the cache it reads them from disk.  For each
+ * node search_by_key finds using reiserfs_bread it then uses
+ * bin_search to look through that node.  bin_search will find the
+ * position of the block_number of the next node if it is looking
+ * through an internal node.  If it is looking through a leaf node
+ * bin_search will find the position of the item which has key either
+ * equal to given key, or which is the maximal key less than the given
+ * key.  search_by_key returns a path that must be checked for the
+ * correctness of the top of the path but need not be checked for the
+ * correctness of the bottom of the path
+ */
+/*
+ * search_by_key - search for key (and item) in stree
+ * @sb: superblock
+ * @key: pointer to key to search for
+ * @search_path: Allocated and initialized struct treepath; Returned filled
+ *		 on success.
+ * @stop_level: How far down the tree to search, Use DISK_LEAF_NODE_LEVEL to
+ *		stop at leaf level.
+ *
+ * The function is NOT SCHEDULE-SAFE!
+ */
+int search_by_key(struct super_block *sb, const struct cpu_key *key,
+		  struct treepath *search_path, int stop_level)
 {
 	b_blocknr_t block_number;
 	int expected_level;
@@ -609,17 +604,22 @@
 
 	PROC_INFO_INC(sb, search_by_key);
 
-	/* As we add each node to a path we increase its count.  This means that
-	   we must be careful to release all nodes in a path before we either
-	   discard the path struct or re-use the path struct, as we do here. */
+	/*
+	 * As we add each node to a path we increase its count.  This means
+	 * that we must be careful to release all nodes in a path before we
+	 * either discard the path struct or re-use the path struct, as we
+	 * do here.
+	 */
 
 	pathrelse(search_path);
 
 	right_neighbor_of_leaf_node = 0;
 
-	/* With each iteration of this loop we search through the items in the
-	   current node, and calculate the next current node(next path element)
-	   for the next iteration of this loop.. */
+	/*
+	 * With each iteration of this loop we search through the items in the
+	 * current node, and calculate the next current node(next path element)
+	 * for the next iteration of this loop..
+	 */
 	block_number = SB_ROOT_BLOCK(sb);
 	expected_level = -1;
 	while (1) {
@@ -639,8 +639,10 @@
 					 ++search_path->path_length);
 		fs_gen = get_generation(sb);
 
-		/* Read the next tree node, and set the last element in the path to
-		   have a pointer to it. */
+		/*
+		 * Read the next tree node, and set the last element
+		 * in the path to have a pointer to it.
+		 */
 		if ((bh = last_element->pe_buffer =
 		     sb_getblk(sb, block_number))) {
 
@@ -676,9 +678,12 @@
 			expected_level = SB_TREE_HEIGHT(sb);
 		expected_level--;
 
-		/* It is possible that schedule occurred. We must check whether the key
-		   to search is still in the tree rooted from the current buffer. If
-		   not then repeat search from the root. */
+		/*
+		 * It is possible that schedule occurred. We must check
+		 * whether the key to search is still in the tree rooted
+		 * from the current buffer. If not then repeat search
+		 * from the root.
+		 */
 		if (fs_changed(fs_gen, sb) &&
 		    (!B_IS_IN_TREE(bh) ||
 		     B_LEVEL(bh) != expected_level ||
@@ -689,8 +694,10 @@
 				      sbk_restarted[expected_level - 1]);
 			pathrelse(search_path);
 
-			/* Get the root block number so that we can repeat the search
-			   starting from the root. */
+			/*
+			 * Get the root block number so that we can
+			 * repeat the search starting from the root.
+			 */
 			block_number = SB_ROOT_BLOCK(sb);
 			expected_level = -1;
 			right_neighbor_of_leaf_node = 0;
@@ -699,9 +706,11 @@
 			continue;
 		}
 
-		/* only check that the key is in the buffer if key is not
-		   equal to the MAX_KEY. Latter case is only possible in
-		   "finish_unfinished()" processing during mount. */
+		/*
+		 * only check that the key is in the buffer if key is not
+		 * equal to the MAX_KEY. Latter case is only possible in
+		 * "finish_unfinished()" processing during mount.
+		 */
 		RFALSE(comp_keys(&MAX_KEY, key) &&
 		       !key_in_buffer(search_path, key, sb),
 		       "PAP-5130: key is not in the buffer");
@@ -713,8 +722,10 @@
 		}
 #endif
 
-		// make sure, that the node contents look like a node of
-		// certain level
+		/*
+		 * make sure, that the node contents look like a node of
+		 * certain level
+		 */
 		if (!is_tree_node(bh, expected_level)) {
 			reiserfs_error(sb, "vs-5150",
 				       "invalid format found in block %ld. "
@@ -743,21 +754,31 @@
 		}
 
 		/* we are not in the stop level */
+		/*
+		 * item has been found, so we choose the pointer which
+		 * is to the right of the found one
+		 */
 		if (retval == ITEM_FOUND)
-			/* item has been found, so we choose the pointer which is to the right of the found one */
 			last_element->pe_position++;
 
-		/* if item was not found we choose the position which is to
-		   the left of the found item. This requires no code,
-		   bin_search did it already. */
+		/*
+		 * if item was not found we choose the position which is to
+		 * the left of the found item. This requires no code,
+		 * bin_search did it already.
+		 */
 
-		/* So we have chosen a position in the current node which is
-		   an internal node.  Now we calculate child block number by
-		   position in the node. */
+		/*
+		 * So we have chosen a position in the current node which is
+		 * an internal node.  Now we calculate child block number by
+		 * position in the node.
+		 */
 		block_number =
 		    B_N_CHILD_NUM(bh, last_element->pe_position);
 
-		/* if we are going to read leaf nodes, try for read ahead as well */
+		/*
+		 * if we are going to read leaf nodes, try for read
+		 * ahead as well
+		 */
 		if ((search_path->reada & PATH_READA) &&
 		    node_level == DISK_LEAF_NODE_LEVEL + 1) {
 			int pos = last_element->pe_position;
@@ -789,26 +810,28 @@
 	}
 }
 
-/* Form the path to an item and position in this item which contains
-   file byte defined by key. If there is no such item
-   corresponding to the key, we point the path to the item with
-   maximal key less than key, and *pos_in_item is set to one
-   past the last entry/byte in the item.  If searching for entry in a
-   directory item, and it is not found, *pos_in_item is set to one
-   entry more than the entry with maximal key which is less than the
-   sought key.
-
-   Note that if there is no entry in this same node which is one more,
-   then we point to an imaginary entry.  for direct items, the
-   position is in units of bytes, for indirect items the position is
-   in units of blocknr entries, for directory items the position is in
-   units of directory entries.  */
-
+/*
+ * Form the path to an item and position in this item which contains
+ * file byte defined by key. If there is no such item
+ * corresponding to the key, we point the path to the item with
+ * maximal key less than key, and *pos_in_item is set to one
+ * past the last entry/byte in the item.  If searching for entry in a
+ * directory item, and it is not found, *pos_in_item is set to one
+ * entry more than the entry with maximal key which is less than the
+ * sought key.
+ *
+ * Note that if there is no entry in this same node which is one more,
+ * then we point to an imaginary entry.  for direct items, the
+ * position is in units of bytes, for indirect items the position is
+ * in units of blocknr entries, for directory items the position is in
+ * units of directory entries.
+ */
 /* The function is NOT SCHEDULE-SAFE! */
-int search_for_position_by_key(struct super_block *sb,	/* Pointer to the super block.          */
-			       const struct cpu_key *p_cpu_key,	/* Key to search (cpu variable)         */
-			       struct treepath *search_path	/* Filled up by this function.          */
-    )
+int search_for_position_by_key(struct super_block *sb,
+			       /* Key to search (cpu variable) */
+			       const struct cpu_key *p_cpu_key,
+			       /* Filled up by this function. */
+			       struct treepath *search_path)
 {
 	struct item_head *p_le_ih;	/* pointer to on-disk structure */
 	int blk_size;
@@ -851,7 +874,8 @@
 	if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
 		return FILE_NOT_FOUND;
 	}
-	// FIXME: quite ugly this far
+
+	/* FIXME: quite ugly this far */
 
 	item_offset = le_ih_k_offset(p_le_ih);
 	offset = cpu_key_k_offset(p_cpu_key);
@@ -866,8 +890,10 @@
 		return POSITION_FOUND;
 	}
 
-	/* Needed byte is not contained in the item pointed to by the
-	   path. Set pos_in_item out of the item. */
+	/*
+	 * Needed byte is not contained in the item pointed to by the
+	 * path. Set pos_in_item out of the item.
+	 */
 	if (is_indirect_le_ih(p_le_ih))
 		pos_in_item(search_path) =
 		    ih_item_len(p_le_ih) / UNFM_P_SIZE;
@@ -896,15 +922,13 @@
 	return memcmp(stored_ih, ih, IH_SIZE);
 }
 
-/* unformatted nodes are not logged anymore, ever.  This is safe
-** now
-*/
+/* unformatted nodes are not logged anymore, ever.  This is safe now */
 #define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
 
-// block can not be forgotten as it is in I/O or held by someone
+/* block can not be forgotten as it is in I/O or held by someone */
 #define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
 
-// prepare for delete or cut of direct item
+/* prepare for delete or cut of direct item */
 static inline int prepare_for_direct_item(struct treepath *path,
 					  struct item_head *le_ih,
 					  struct inode *inode,
@@ -917,9 +941,8 @@
 		*cut_size = -(IH_SIZE + ih_item_len(le_ih));
 		return M_DELETE;
 	}
-	// new file gets truncated
+	/* new file gets truncated */
 	if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
-		//
 		round_len = ROUND_UP(new_file_length);
 		/* this was new_file_length < le_ih ... */
 		if (round_len < le_ih_k_offset(le_ih)) {
@@ -933,12 +956,13 @@
 		return M_CUT;	/* Cut from this item. */
 	}
 
-	// old file: items may have any length
+	/* old file: items may have any length */
 
 	if (new_file_length < le_ih_k_offset(le_ih)) {
 		*cut_size = -(IH_SIZE + ih_item_len(le_ih));
 		return M_DELETE;	/* Delete this item. */
 	}
+
 	/* Calculate first position and size for cutting from item. */
 	*cut_size = -(ih_item_len(le_ih) -
 		      (pos_in_item(path) =
@@ -957,12 +981,15 @@
 		RFALSE(ih_entry_count(le_ih) != 2,
 		       "PAP-5220: incorrect empty directory item (%h)", le_ih);
 		*cut_size = -(IH_SIZE + ih_item_len(le_ih));
-		return M_DELETE;	/* Delete the directory item containing "." and ".." entry. */
+		/* Delete the directory item containing "." and ".." entry. */
+		return M_DELETE;
 	}
 
 	if (ih_entry_count(le_ih) == 1) {
-		/* Delete the directory item such as there is one record only
-		   in this item */
+		/*
+		 * Delete the directory item such as there is one record only
+		 * in this item
+		 */
 		*cut_size = -(IH_SIZE + ih_item_len(le_ih));
 		return M_DELETE;
 	}
@@ -976,14 +1003,30 @@
 
 #define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1)
 
-/*  If the path points to a directory or direct item, calculate mode and the size cut, for balance.
-    If the path points to an indirect item, remove some number of its unformatted nodes.
-    In case of file truncate calculate whether this item must be deleted/truncated or last
-    unformatted node of this item will be converted to a direct item.
-    This function returns a determination of what balance mode the calling function should employ. */
-static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed,	/* Number of unformatted nodes which were removed
-																						   from end of the file. */
-				      int *cut_size, unsigned long long new_file_length	/* MAX_KEY_OFFSET in case of delete. */
+/*
+ * If the path points to a directory or direct item, calculate mode
+ * and the size cut, for balance.
+ * If the path points to an indirect item, remove some number of its
+ * unformatted nodes.
+ * In case of file truncate calculate whether this item must be
+ * deleted/truncated or last unformatted node of this item will be
+ * converted to a direct item.
+ * This function returns a determination of what balance mode the
+ * calling function should employ.
+ */
+static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th,
+				      struct inode *inode,
+				      struct treepath *path,
+				      const struct cpu_key *item_key,
+				      /*
+				       * Number of unformatted nodes
+				       * which were removed from end
+				       * of the file.
+				       */
+				      int *removed,
+				      int *cut_size,
+				      /* MAX_KEY_OFFSET in case of delete. */
+				      unsigned long long new_file_length
     )
 {
 	struct super_block *sb = inode->i_sb;
@@ -1023,8 +1066,10 @@
 	    int pos = 0;
 
 	    if ( new_file_length == max_reiserfs_offset (inode) ) {
-		/* prepare_for_delete_or_cut() is called by
-		 * reiserfs_delete_item() */
+		/*
+		 * prepare_for_delete_or_cut() is called by
+		 * reiserfs_delete_item()
+		 */
 		new_file_length = 0;
 		delete = 1;
 	    }
@@ -1040,9 +1085,12 @@
 		    __le32 *unfm;
 		    __u32 block;
 
-		    /* Each unformatted block deletion may involve one additional
-		     * bitmap block into the transaction, thereby the initial
-		     * journal space reservation might not be enough. */
+		    /*
+		     * Each unformatted block deletion may involve
+		     * one additional bitmap block into the transaction,
+		     * thereby the initial journal space reservation
+		     * might not be enough.
+		     */
 		    if (!delete && (*cut_size) != 0 &&
 			reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD)
 			break;
@@ -1074,17 +1122,21 @@
 			break;
 		    }
 		}
-		/* a trick.  If the buffer has been logged, this will do nothing.  If
-		** we've broken the loop without logging it, it will restore the
-		** buffer */
+		/*
+		 * a trick.  If the buffer has been logged, this will
+		 * do nothing.  If we've broken the loop without logging
+		 * it, it will restore the buffer
+		 */
 		reiserfs_restore_prepared_buffer(sb, bh);
 	    } while (need_re_search &&
 		     search_for_position_by_key(sb, item_key, path) == POSITION_FOUND);
 	    pos_in_item(path) = pos * UNFM_P_SIZE;
 
 	    if (*cut_size == 0) {
-		/* Nothing were cut. maybe convert last unformatted node to the
-		 * direct item? */
+		/*
+		 * Nothing was cut. maybe convert last unformatted node to the
+		 * direct item?
+		 */
 		result = M_CONVERT;
 	    }
 	    return result;
@@ -1104,9 +1156,11 @@
 	    (mode ==
 	     M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
 	if (is_direntry_le_ih(p_le_ih)) {
-		/* return EMPTY_DIR_SIZE; We delete emty directoris only.
-		 * we can't use EMPTY_DIR_SIZE, as old format dirs have a different
-		 * empty size.  ick. FIXME, is this right? */
+		/*
+		 * return EMPTY_DIR_SIZE; We delete emty directories only.
+		 * we can't use EMPTY_DIR_SIZE, as old format dirs have a
+		 * different empty size.  ick. FIXME, is this right?
+		 */
 		return del_size;
 	}
 
@@ -1169,7 +1223,8 @@
 }
 #endif
 
-/* Delete object item.
+/*
+ * Delete object item.
  * th       - active transaction handle
  * path     - path to the deleted item
  * item_key - key to search for the deleted item
@@ -1221,7 +1276,7 @@
 
 		PROC_INFO_INC(sb, delete_item_restarted);
 
-		// file system changed, repeat search
+		/* file system changed, repeat search */
 		ret_value =
 		    search_for_position_by_key(sb, item_key, path);
 		if (ret_value == IO_ERROR)
@@ -1238,16 +1293,18 @@
 		unfix_nodes(&s_del_balance);
 		return 0;
 	}
-	// reiserfs_delete_item returns item length when success
+
+	/* reiserfs_delete_item returns item length when success */
 	ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
 	q_ih = tp_item_head(path);
 	quota_cut_bytes = ih_item_len(q_ih);
 
-	/* hack so the quota code doesn't have to guess if the file
-	 ** has a tail.  On tail insert, we allocate quota for 1 unformatted node.
-	 ** We test the offset because the tail might have been
-	 ** split into multiple items, and we only want to decrement for
-	 ** the unfm node once
+	/*
+	 * hack so the quota code doesn't have to guess if the file has a
+	 * tail.  On tail insert, we allocate quota for 1 unformatted node.
+	 * We test the offset because the tail might have been
+	 * split into multiple items, and we only want to decrement for
+	 * the unfm node once
 	 */
 	if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) {
 		if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
@@ -1261,24 +1318,28 @@
 		int off;
 		char *data;
 
-		/* We are in direct2indirect conversion, so move tail contents
-		   to the unformatted node */
-		/* note, we do the copy before preparing the buffer because we
-		 ** don't care about the contents of the unformatted node yet.
-		 ** the only thing we really care about is the direct item's data
-		 ** is in the unformatted node.
-		 **
-		 ** Otherwise, we would have to call reiserfs_prepare_for_journal on
-		 ** the unformatted node, which might schedule, meaning we'd have to
-		 ** loop all the way back up to the start of the while loop.
-		 **
-		 ** The unformatted node must be dirtied later on.  We can't be
-		 ** sure here if the entire tail has been deleted yet.
-		 **
-		 ** un_bh is from the page cache (all unformatted nodes are
-		 ** from the page cache) and might be a highmem page.  So, we
-		 ** can't use un_bh->b_data.
-		 ** -clm
+		/*
+		 * We are in direct2indirect conversion, so move tail contents
+		 * to the unformatted node
+		 */
+		/*
+		 * note, we do the copy before preparing the buffer because we
+		 * don't care about the contents of the unformatted node yet.
+		 * the only thing we really care about is the direct item's
+		 * data is in the unformatted node.
+		 *
+		 * Otherwise, we would have to call
+		 * reiserfs_prepare_for_journal on the unformatted node,
+		 * which might schedule, meaning we'd have to loop all the
+		 * way back up to the start of the while loop.
+		 *
+		 * The unformatted node must be dirtied later on.  We can't be
+		 * sure here if the entire tail has been deleted yet.
+		 *
+		 * un_bh is from the page cache (all unformatted nodes are
+		 * from the page cache) and might be a highmem page.  So, we
+		 * can't use un_bh->b_data.
+		 * -clm
 		 */
 
 		data = kmap_atomic(un_bh->b_page);
@@ -1288,6 +1349,7 @@
 		       ret_value);
 		kunmap_atomic(data);
 	}
+
 	/* Perform balancing after all resources have been collected at once. */
 	do_balance(&s_del_balance, NULL, NULL, M_DELETE);
 
@@ -1304,20 +1366,21 @@
 	return ret_value;
 }
 
-/* Summary Of Mechanisms For Handling Collisions Between Processes:
-
- deletion of the body of the object is performed by iput(), with the
- result that if multiple processes are operating on a file, the
- deletion of the body of the file is deferred until the last process
- that has an open inode performs its iput().
-
- writes and truncates are protected from collisions by use of
- semaphores.
-
- creates, linking, and mknod are protected from collisions with other
- processes by making the reiserfs_add_entry() the last step in the
- creation, and then rolling back all changes if there was a collision.
- - Hans
+/*
+ * Summary Of Mechanisms For Handling Collisions Between Processes:
+ *
+ *  deletion of the body of the object is performed by iput(), with the
+ *  result that if multiple processes are operating on a file, the
+ *  deletion of the body of the file is deferred until the last process
+ *  that has an open inode performs its iput().
+ *
+ *  writes and truncates are protected from collisions by use of
+ *  semaphores.
+ *
+ *  creates, linking, and mknod are protected from collisions with other
+ *  processes by making the reiserfs_add_entry() the last step in the
+ *  creation, and then rolling back all changes if there was a collision.
+ *  - Hans
 */
 
 /* this deletes item which never gets split */
@@ -1347,7 +1410,11 @@
 		}
 		if (retval != ITEM_FOUND) {
 			pathrelse(&path);
-			// No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir
+			/*
+			 * No need for a warning, if there is just no free
+			 * space to insert '..' item into the
+			 * newly-created subdir
+			 */
 			if (!
 			    ((unsigned long long)
 			     GET_HASH_VALUE(le_key_k_offset
@@ -1376,7 +1443,11 @@
 
 		if (retval == CARRY_ON) {
 			do_balance(&tb, NULL, NULL, M_DELETE);
-			if (inode) {	/* Should we count quota for item? (we don't count quotas for save-links) */
+			/*
+			 * Should we count quota for item? (we don't
+			 * count quotas for save-links)
+			 */
+			if (inode) {
 				int depth;
 #ifdef REISERQUOTA_DEBUG
 				reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
@@ -1391,7 +1462,8 @@
 			}
 			break;
 		}
-		// IO_ERROR, NO_DISK_SPACE, etc
+
+		/* IO_ERROR, NO_DISK_SPACE, etc */
 		reiserfs_warning(th->t_super, "vs-5360",
 				 "could not delete %K due to fix_nodes failure",
 				 &cpu_key);
@@ -1447,11 +1519,13 @@
 			do {
 				next = bh->b_this_page;
 
-				/* we want to unmap the buffers that contain the tail, and
-				 ** all the buffers after it (since the tail must be at the
-				 ** end of the file).  We don't want to unmap file data
-				 ** before the tail, since it might be dirty and waiting to
-				 ** reach disk
+				/*
+				 * we want to unmap the buffers that contain
+				 * the tail, and all the buffers after it
+				 * (since the tail must be at the end of the
+				 * file).  We don't want to unmap file data
+				 * before the tail, since it might be dirty
+				 * and waiting to reach disk
 				 */
 				cur_index += bh->b_size;
 				if (cur_index > tail_index) {
@@ -1476,9 +1550,10 @@
 	BUG_ON(!th->t_trans_id);
 	BUG_ON(new_file_size != inode->i_size);
 
-	/* the page being sent in could be NULL if there was an i/o error
-	 ** reading in the last block.  The user will hit problems trying to
-	 ** read the file, but for now we just skip the indirect2direct
+	/*
+	 * the page being sent in could be NULL if there was an i/o error
+	 * reading in the last block.  The user will hit problems trying to
+	 * read the file, but for now we just skip the indirect2direct
 	 */
 	if (atomic_read(&inode->i_count) > 1 ||
 	    !tail_has_to_be_packed(inode) ||
@@ -1490,17 +1565,18 @@
 		pathrelse(path);
 		return cut_bytes;
 	}
+
 	/* Perform the conversion to a direct_item. */
-	/* return indirect_to_direct(inode, path, item_key,
-				  new_file_size, mode); */
 	return indirect2direct(th, inode, page, path, item_key,
 			       new_file_size, mode);
 }
 
-/* we did indirect_to_direct conversion. And we have inserted direct
-   item successesfully, but there were no disk space to cut unfm
-   pointer being converted. Therefore we have to delete inserted
-   direct item(s) */
+/*
+ * we did indirect_to_direct conversion. And we have inserted direct
+ * item successesfully, but there were no disk space to cut unfm
+ * pointer being converted. Therefore we have to delete inserted
+ * direct item(s)
+ */
 static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
 					 struct inode *inode, struct treepath *path)
 {
@@ -1509,7 +1585,7 @@
 	int removed;
 	BUG_ON(!th->t_trans_id);
 
-	make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);	// !!!!
+	make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);
 	tail_key.key_length = 4;
 
 	tail_len =
@@ -1539,7 +1615,6 @@
 	reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct "
 			 "conversion has been rolled back due to "
 			 "lack of disk space");
-	//mark_file_without_tail (inode);
 	mark_inode_dirty(inode);
 }
 
@@ -1551,15 +1626,18 @@
 			   struct page *page, loff_t new_file_size)
 {
 	struct super_block *sb = inode->i_sb;
-	/* Every function which is going to call do_balance must first
-	   create a tree_balance structure.  Then it must fill up this
-	   structure by using the init_tb_struct and fix_nodes functions.
-	   After that we can make tree balancing. */
+	/*
+	 * Every function which is going to call do_balance must first
+	 * create a tree_balance structure.  Then it must fill up this
+	 * structure by using the init_tb_struct and fix_nodes functions.
+	 * After that we can make tree balancing.
+	 */
 	struct tree_balance s_cut_balance;
 	struct item_head *p_le_ih;
-	int cut_size = 0,	/* Amount to be cut. */
-	    ret_value = CARRY_ON, removed = 0,	/* Number of the removed unformatted nodes. */
-	    is_inode_locked = 0;
+	int cut_size = 0;	/* Amount to be cut. */
+	int ret_value = CARRY_ON;
+	int removed = 0;	/* Number of the removed unformatted nodes. */
+	int is_inode_locked = 0;
 	char mode;		/* Mode of the balance. */
 	int retval2 = -1;
 	int quota_cut_bytes;
@@ -1571,21 +1649,27 @@
 	init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
 		       cut_size);
 
-	/* Repeat this loop until we either cut the item without needing
-	   to balance, or we fix_nodes without schedule occurring */
+	/*
+	 * Repeat this loop until we either cut the item without needing
+	 * to balance, or we fix_nodes without schedule occurring
+	 */
 	while (1) {
-		/* Determine the balance mode, position of the first byte to
-		   be cut, and size to be cut.  In case of the indirect item
-		   free unformatted nodes which are pointed to by the cut
-		   pointers. */
+		/*
+		 * Determine the balance mode, position of the first byte to
+		 * be cut, and size to be cut.  In case of the indirect item
+		 * free unformatted nodes which are pointed to by the cut
+		 * pointers.
+		 */
 
 		mode =
 		    prepare_for_delete_or_cut(th, inode, path,
 					      item_key, &removed,
 					      &cut_size, new_file_size);
 		if (mode == M_CONVERT) {
-			/* convert last unformatted node to direct item or leave
-			   tail in the unformatted node */
+			/*
+			 * convert last unformatted node to direct item or
+			 * leave tail in the unformatted node
+			 */
 			RFALSE(ret_value != CARRY_ON,
 			       "PAP-5570: can not convert twice");
 
@@ -1599,15 +1683,20 @@
 
 			is_inode_locked = 1;
 
-			/* removing of last unformatted node will change value we
-			   have to return to truncate. Save it */
+			/*
+			 * removing of last unformatted node will
+			 * change value we have to return to truncate.
+			 * Save it
+			 */
 			retval2 = ret_value;
-			/*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */
 
-			/* So, we have performed the first part of the conversion:
-			   inserting the new direct item.  Now we are removing the
-			   last unformatted node pointer. Set key to search for
-			   it. */
+			/*
+			 * So, we have performed the first part of the
+			 * conversion:
+			 * inserting the new direct item.  Now we are
+			 * removing the last unformatted node pointer.
+			 * Set key to search for it.
+			 */
 			set_cpu_key_k_type(item_key, TYPE_INDIRECT);
 			item_key->key_length = 4;
 			new_file_size -=
@@ -1650,11 +1739,13 @@
 		return (ret_value == IO_ERROR) ? -EIO : -ENOENT;
 	}			/* while */
 
-	// check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
+	/* check fix_nodes results (IO_ERROR or NO_DISK_SPACE) */
 	if (ret_value != CARRY_ON) {
 		if (is_inode_locked) {
-			// FIXME: this seems to be not needed: we are always able
-			// to cut item
+			/*
+			 * FIXME: this seems to be not needed: we are always
+			 * able to cut item
+			 */
 			indirect_to_direct_roll_back(th, inode, path);
 		}
 		if (ret_value == NO_DISK_SPACE)
@@ -1678,15 +1769,16 @@
 	else
 		ret_value = retval2;
 
-	/* For direct items, we only change the quota when deleting the last
-	 ** item.
+	/*
+	 * For direct items, we only change the quota when deleting the last
+	 * item.
 	 */
 	p_le_ih = tp_item_head(s_cut_balance.tb_path);
 	if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
 		if (mode == M_DELETE &&
 		    (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
 		    1) {
-			// FIXME: this is to keep 3.5 happy
+			/* FIXME: this is to keep 3.5 happy */
 			REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
 			quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
 		} else {
@@ -1697,9 +1789,11 @@
 	if (is_inode_locked) {
 		struct item_head *le_ih =
 		    tp_item_head(s_cut_balance.tb_path);
-		/* we are going to complete indirect2direct conversion. Make
-		   sure, that we exactly remove last unformatted node pointer
-		   of the item */
+		/*
+		 * we are going to complete indirect2direct conversion. Make
+		 * sure, that we exactly remove last unformatted node pointer
+		 * of the item
+		 */
 		if (!is_indirect_le_ih(le_ih))
 			reiserfs_panic(sb, "vs-5652",
 				       "item must be indirect %h", le_ih);
@@ -1717,17 +1811,20 @@
 				       "(CUT, insert_size==%d)",
 				       le_ih, s_cut_balance.insert_size[0]);
 		}
-		/* it would be useful to make sure, that right neighboring
-		   item is direct item of this file */
+		/*
+		 * it would be useful to make sure, that right neighboring
+		 * item is direct item of this file
+		 */
 	}
 #endif
 
 	do_balance(&s_cut_balance, NULL, NULL, mode);
 	if (is_inode_locked) {
-		/* we've done an indirect->direct conversion.  when the data block
-		 ** was freed, it was removed from the list of blocks that must
-		 ** be flushed before the transaction commits, make sure to
-		 ** unmap and invalidate it
+		/*
+		 * we've done an indirect->direct conversion.  when the
+		 * data block was freed, it was removed from the list of
+		 * blocks that must be flushed before the transaction
+		 * commits, make sure to unmap and invalidate it
 		 */
 		unmap_buffers(page, tail_pos);
 		REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
@@ -1758,20 +1855,25 @@
 	set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA);
 }
 
-/* Truncate file to the new size. Note, this must be called with a transaction
-   already started */
+/*
+ * Truncate file to the new size. Note, this must be called with a
+ * transaction already started
+ */
 int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
-			  struct inode *inode,	/* ->i_size contains new size */
+			 struct inode *inode,	/* ->i_size contains new size */
 			 struct page *page,	/* up to date for last block */
-			 int update_timestamps	/* when it is called by
-						   file_release to convert
-						   the tail - no timestamps
-						   should be updated */
+			 /*
+			  * when it is called by file_release to convert
+			  * the tail - no timestamps should be updated
+			  */
+			 int update_timestamps
     )
 {
 	INITIALIZE_PATH(s_search_path);	/* Path to the current object item. */
 	struct item_head *p_le_ih;	/* Pointer to an item header. */
-	struct cpu_key s_item_key;	/* Key to search for a previous file item. */
+
+	/* Key to search for a previous file item. */
+	struct cpu_key s_item_key;
 	loff_t file_size,	/* Old file size. */
 	 new_file_size;	/* New file size. */
 	int deleted;		/* Number of deleted or truncated bytes. */
@@ -1784,8 +1886,8 @@
 	     || S_ISLNK(inode->i_mode)))
 		return 0;
 
+	/* deletion of directory - no need to update timestamps */
 	if (S_ISDIR(inode->i_mode)) {
-		// deletion of directory - no need to update timestamps
 		truncate_directory(th, inode);
 		return 0;
 	}
@@ -1793,7 +1895,7 @@
 	/* Get new file size. */
 	new_file_size = inode->i_size;
 
-	// FIXME: note, that key type is unimportant here
+	/* FIXME: note, that key type is unimportant here */
 	make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
 		     TYPE_DIRECT, 3);
 
@@ -1827,9 +1929,11 @@
 		int bytes =
 		    op_bytes_number(p_le_ih, inode->i_sb->s_blocksize);
 
-		/* this may mismatch with real file size: if last direct item
-		   had no padding zeros and last unformatted node had no free
-		   space, this file would have this file size */
+		/*
+		 * this may mismatch with real file size: if last direct item
+		 * had no padding zeros and last unformatted node had no free
+		 * space, this file would have this file size
+		 */
 		file_size = offset + bytes - 1;
 	}
 	/*
@@ -1867,14 +1971,17 @@
 
 		set_cpu_key_k_offset(&s_item_key, file_size);
 
-		/* While there are bytes to truncate and previous file item is presented in the tree. */
+		/*
+		 * While there are bytes to truncate and previous
+		 * file item is presented in the tree.
+		 */
 
 		/*
-		 ** This loop could take a really long time, and could log
-		 ** many more blocks than a transaction can hold.  So, we do a polite
-		 ** journal end here, and if the transaction needs ending, we make
-		 ** sure the file is consistent before ending the current trans
-		 ** and starting a new one
+		 * This loop could take a really long time, and could log
+		 * many more blocks than a transaction can hold.  So, we do
+		 * a polite journal end here, and if the transaction needs
+		 * ending, we make sure the file is consistent before ending
+		 * the current trans and starting a new one
 		 */
 		if (journal_transaction_should_end(th, 0) ||
 		    reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
@@ -1906,7 +2013,7 @@
 
       update_and_out:
 	if (update_timestamps) {
-		// this is truncate, not file closing
+		/* this is truncate, not file closing */
 		inode->i_mtime = CURRENT_TIME_SEC;
 		inode->i_ctime = CURRENT_TIME_SEC;
 	}
@@ -1918,7 +2025,7 @@
 }
 
 #ifdef CONFIG_REISERFS_CHECK
-// this makes sure, that we __append__, not overwrite or add holes
+/* this makes sure, that we __append__, not overwrite or add holes */
 static void check_research_for_paste(struct treepath *path,
 				     const struct cpu_key *key)
 {
@@ -1952,13 +2059,22 @@
 }
 #endif				/* config reiserfs check */
 
-/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
-int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path,	/* Path to the pasted item.	  */
-			     const struct cpu_key *key,	/* Key to search for the needed item. */
-			     struct inode *inode,	/* Inode item belongs to */
-			     const char *body,	/* Pointer to the bytes to paste.    */
+/*
+ * Paste bytes to the existing item.
+ * Returns bytes number pasted into the item.
+ */
+int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th,
+			     /* Path to the pasted item. */
+			     struct treepath *search_path,
+			     /* Key to search for the needed item. */
+			     const struct cpu_key *key,
+			     /* Inode item belongs to */
+			     struct inode *inode,
+			     /* Pointer to the bytes to paste. */
+			     const char *body,
+			     /* Size of pasted bytes. */
 			     int pasted_size)
-{				/* Size of pasted bytes.             */
+{
 	struct super_block *sb = inode->i_sb;
 	struct tree_balance s_paste_balance;
 	int retval;
@@ -2019,8 +2135,10 @@
 #endif
 	}
 
-	/* Perform balancing after all resources are collected by fix_nodes, and
-	   accessing them will not risk triggering schedule. */
+	/*
+	 * Perform balancing after all resources are collected by fix_nodes,
+	 * and accessing them will not risk triggering schedule.
+	 */
 	if (retval == CARRY_ON) {
 		do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE);
 		return 0;
@@ -2041,7 +2159,8 @@
 	return retval;
 }
 
-/* Insert new item into the buffer at the path.
+/*
+ * Insert new item into the buffer at the path.
  * th   - active transaction handle
  * path - path to the inserted item
  * ih   - pointer to the item header to insert
@@ -2064,8 +2183,10 @@
 		fs_gen = get_generation(inode->i_sb);
 		quota_bytes = ih_item_len(ih);
 
-		/* hack so the quota code doesn't have to guess if the file has
-		 ** a tail, links are always tails, so there's no guessing needed
+		/*
+		 * hack so the quota code doesn't have to guess
+		 * if the file has a tail, links are always tails,
+		 * so there's no guessing needed
 		 */
 		if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih))
 			quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE;
@@ -2074,8 +2195,10 @@
 			       "reiserquota insert_item(): allocating %u id=%u type=%c",
 			       quota_bytes, inode->i_uid, head2type(ih));
 #endif
-		/* We can't dirty inode here. It would be immediately written but
-		 * appropriate stat item isn't inserted yet... */
+		/*
+		 * We can't dirty inode here. It would be immediately
+		 * written but appropriate stat item isn't inserted yet...
+		 */
 		depth = reiserfs_write_unlock_nested(inode->i_sb);
 		retval = dquot_alloc_space_nodirty(inode, quota_bytes);
 		reiserfs_write_lock_nested(inode->i_sb, depth);
@@ -2089,7 +2212,10 @@
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
 	s_ins_balance.key = key->on_disk_key;
 #endif
-	/* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */
+	/*
+	 * DQUOT_* can schedule, must check to be sure calling
+	 * fix_nodes is safe
+	 */
 	if (inode && fs_changed(fs_gen, inode->i_sb)) {
 		goto search_again;
 	}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index c02b6b0..6268bb8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -153,13 +153,15 @@
 
 extern const struct in_core_key MAX_IN_CORE_KEY;
 
-/* this is used to delete "save link" when there are no items of a
-   file it points to. It can either happen if unlink is completed but
-   "save unlink" removal, or if file has both unlink and truncate
-   pending and as unlink completes first (because key of "save link"
-   protecting unlink is bigger that a key lf "save link" which
-   protects truncate), so there left no items to make truncate
-   completion on */
+/*
+ * this is used to delete "save link" when there are no items of a
+ * file it points to. It can either happen if unlink is completed but
+ * "save unlink" removal, or if file has both unlink and truncate
+ * pending and as unlink completes first (because key of "save link"
+ * protecting unlink is bigger that a key lf "save link" which
+ * protects truncate), so there left no items to make truncate
+ * completion on
+ */
 static int remove_save_link_only(struct super_block *s,
 				 struct reiserfs_key *key, int oid_free)
 {
@@ -282,8 +284,10 @@
 
 		inode = reiserfs_iget(s, &obj_key);
 		if (!inode) {
-			/* the unlink almost completed, it just did not manage to remove
-			   "save" link and release objectid */
+			/*
+			 * the unlink almost completed, it just did not
+			 * manage to remove "save" link and release objectid
+			 */
 			reiserfs_warning(s, "vs-2180", "iget failed for %K",
 					 &obj_key);
 			retval = remove_save_link_only(s, &save_link_key, 1);
@@ -303,10 +307,13 @@
 		reiserfs_write_lock_nested(inode->i_sb, depth);
 
 		if (truncate && S_ISDIR(inode->i_mode)) {
-			/* We got a truncate request for a dir which is impossible.
-			   The only imaginable way is to execute unfinished truncate request
-			   then boot into old kernel, remove the file and create dir with
-			   the same key. */
+			/*
+			 * We got a truncate request for a dir which
+			 * is impossible.  The only imaginable way is to
+			 * execute unfinished truncate request then boot
+			 * into old kernel, remove the file and create dir
+			 * with the same key.
+			 */
 			reiserfs_warning(s, "green-2101",
 					 "impossible truncate on a "
 					 "directory %k. Please report",
@@ -320,14 +327,16 @@
 		if (truncate) {
 			REISERFS_I(inode)->i_flags |=
 			    i_link_saved_truncate_mask;
-			/* not completed truncate found. New size was committed together
-			   with "save" link */
+			/*
+			 * not completed truncate found. New size was
+			 * committed together with "save" link
+			 */
 			reiserfs_info(s, "Truncating %k to %Ld ..",
 				      INODE_PKEY(inode), inode->i_size);
-			reiserfs_truncate_file(inode,
-					       0
-					       /*don't update modification time */
-					       );
+
+			/* don't update modification time */
+			reiserfs_truncate_file(inode, 0);
+
 			retval = remove_save_link(inode, truncate);
 		} else {
 			REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask;
@@ -373,10 +382,12 @@
 	return retval;
 }
 
-/* to protect file being unlinked from getting lost we "safe" link files
-   being unlinked. This link will be deleted in the same transaction with last
-   item of file. mounting the filesystem we scan all these links and remove
-   files which almost got lost */
+/*
+ * to protect file being unlinked from getting lost we "safe" link files
+ * being unlinked. This link will be deleted in the same transaction with last
+ * item of file. mounting the filesystem we scan all these links and remove
+ * files which almost got lost
+ */
 void add_save_link(struct reiserfs_transaction_handle *th,
 		   struct inode *inode, int truncate)
 {
@@ -530,7 +541,10 @@
 
 	reiserfs_write_lock(s);
 
-	/* change file system state to current state if it was mounted with read-write permissions */
+	/*
+	 * change file system state to current state if it was mounted
+	 * with read-write permissions
+	 */
 	if (!(s->s_flags & MS_RDONLY)) {
 		if (!journal_begin(&th, s, 10)) {
 			reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
@@ -541,8 +555,9 @@
 		}
 	}
 
-	/* note, journal_release checks for readonly mount, and can decide not
-	 ** to do a journal_end
+	/*
+	 * note, journal_release checks for readonly mount, and can
+	 * decide not to do a journal_end
 	 */
 	journal_release(&th, s);
 
@@ -635,8 +650,9 @@
 	}
 	reiserfs_write_lock(inode->i_sb);
 
-	/* this is really only used for atime updates, so they don't have
-	 ** to be included in O_SYNC or fsync
+	/*
+	 * this is really only used for atime updates, so they don't have
+	 * to be included in O_SYNC or fsync
 	 */
 	err = journal_begin(&th, inode->i_sb, 1);
 	if (err)
@@ -789,31 +805,53 @@
 	.get_parent = reiserfs_get_parent,
 };
 
-/* this struct is used in reiserfs_getopt () for containing the value for those
-   mount options that have values rather than being toggles. */
+/*
+ * this struct is used in reiserfs_getopt () for containing the value for
+ * those mount options that have values rather than being toggles.
+ */
 typedef struct {
 	char *value;
-	int setmask;		/* bitmask which is to set on mount_options bitmask when this
-				   value is found, 0 is no bits are to be changed. */
-	int clrmask;		/* bitmask which is to clear on mount_options bitmask when  this
-				   value is found, 0 is no bits are to be changed. This is
-				   applied BEFORE setmask */
+	/*
+	 * bitmask which is to set on mount_options bitmask
+	 * when this value is found, 0 is no bits are to be changed.
+	 */
+	int setmask;
+	/*
+	 * bitmask which is to clear on mount_options bitmask
+	 * when this value is found, 0 is no bits are to be changed.
+	 * This is applied BEFORE setmask
+	 */
+	int clrmask;
 } arg_desc_t;
 
 /* Set this bit in arg_required to allow empty arguments */
 #define REISERFS_OPT_ALLOWEMPTY 31
 
-/* this struct is used in reiserfs_getopt() for describing the set of reiserfs
-   mount options */
+/*
+ * this struct is used in reiserfs_getopt() for describing the
+ * set of reiserfs mount options
+ */
 typedef struct {
 	char *option_name;
-	int arg_required;	/* 0 if argument is not required, not 0 otherwise */
-	const arg_desc_t *values;	/* list of values accepted by an option */
-	int setmask;		/* bitmask which is to set on mount_options bitmask when this
-				   value is found, 0 is no bits are to be changed. */
-	int clrmask;		/* bitmask which is to clear on mount_options bitmask when  this
-				   value is found, 0 is no bits are to be changed. This is
-				   applied BEFORE setmask */
+
+	/* 0 if argument is not required, not 0 otherwise */
+	int arg_required;
+
+	/* list of values accepted by an option */
+	const arg_desc_t *values;
+
+	/*
+	 * bitmask which is to set on mount_options bitmask
+	 * when this value is found, 0 is no bits are to be changed.
+	 */
+	int setmask;
+
+	/*
+	 * bitmask which is to clear on mount_options bitmask
+	 * when this value is found, 0 is no bits are to be changed.
+	 * This is applied BEFORE setmask
+	 */
+	int clrmask;
 } opt_desc_t;
 
 /* possible values for -o data= */
@@ -834,8 +872,10 @@
 	{.value = NULL}
 };
 
-/* possible values for "-o block-allocator=" and bits which are to be set in
-   s_mount_opt of reiserfs specific part of in-core super block */
+/*
+ * possible values for "-o block-allocator=" and bits which are to be set in
+ * s_mount_opt of reiserfs specific part of in-core super block
+ */
 static const arg_desc_t balloc[] = {
 	{"noborder", 1 << REISERFS_NO_BORDER, 0},
 	{"border", 0, 1 << REISERFS_NO_BORDER},
@@ -865,21 +905,25 @@
 	{NULL, 0, 0},
 };
 
-/* proceed only one option from a list *cur - string containing of mount options
-   opts - array of options which are accepted
-   opt_arg - if option is found and requires an argument and if it is specifed
-   in the input - pointer to the argument is stored here
-   bit_flags - if option requires to set a certain bit - it is set here
-   return -1 if unknown option is found, opt->arg_required otherwise */
+/*
+ * proceed only one option from a list *cur - string containing of mount
+ * options
+ * opts - array of options which are accepted
+ * opt_arg - if option is found and requires an argument and if it is specifed
+ * in the input - pointer to the argument is stored here
+ * bit_flags - if option requires to set a certain bit - it is set here
+ * return -1 if unknown option is found, opt->arg_required otherwise
+ */
 static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
 			   char **opt_arg, unsigned long *bit_flags)
 {
 	char *p;
-	/* foo=bar,
-	   ^   ^  ^
-	   |   |  +-- option_end
-	   |   +-- arg_start
-	   +-- option_start
+	/*
+	 * foo=bar,
+	 * ^   ^  ^
+	 * |   |  +-- option_end
+	 * |   +-- arg_start
+	 * +-- option_start
 	 */
 	const opt_desc_t *opt;
 	const arg_desc_t *arg;
@@ -894,9 +938,12 @@
 	}
 
 	if (!strncmp(p, "alloc=", 6)) {
-		/* Ugly special case, probably we should redo options parser so that
-		   it can understand several arguments for some options, also so that
-		   it can fill several bitfields with option values. */
+		/*
+		 * Ugly special case, probably we should redo options
+		 * parser so that it can understand several arguments for
+		 * some options, also so that it can fill several bitfields
+		 * with option values.
+		 */
 		if (reiserfs_parse_alloc_options(s, p + 6)) {
 			return -1;
 		} else {
@@ -959,7 +1006,10 @@
 		return -1;
 	}
 
-	/* move to the argument, or to next option if argument is not required */
+	/*
+	 * move to the argument, or to next option if argument is not
+	 * required
+	 */
 	p++;
 
 	if (opt->arg_required
@@ -996,12 +1046,20 @@
 }
 
 /* returns 0 if something is wrong in option string, 1 - otherwise */
-static int reiserfs_parse_options(struct super_block *s, char *options,	/* string given via mount's -o */
+static int reiserfs_parse_options(struct super_block *s,
+
+				  /* string given via mount's -o */
+				  char *options,
+
+				  /*
+				   * after the parsing phase, contains the
+				   * collection of bitflags defining what
+				   * mount options were selected.
+				   */
 				  unsigned long *mount_options,
-				  /* after the parsing phase, contains the
-				     collection of bitflags defining what
-				     mount options were selected. */
-				  unsigned long *blocks,	/* strtol-ed from NNN of resize=NNN */
+
+				  /* strtol-ed from NNN of resize=NNN */
+				  unsigned long *blocks,
 				  char **jdev_name,
 				  unsigned int *commit_max_age,
 				  char **qf_names,
@@ -1011,7 +1069,10 @@
 	char *arg = NULL;
 	char *pos;
 	opt_desc_t opts[] = {
-		/* Compatibility stuff, so that -o notail for old setups still work */
+		/*
+		 * Compatibility stuff, so that -o notail for old
+		 * setups still work
+		 */
 		{"tails",.arg_required = 't',.values = tails},
 		{"notail",.clrmask =
 		 (1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)},
@@ -1056,8 +1117,10 @@
 
 	*blocks = 0;
 	if (!options || !*options)
-		/* use default configuration: create tails, journaling on, no
-		   conversion to newest format */
+		/*
+		 * use default configuration: create tails, journaling on, no
+		 * conversion to newest format
+		 */
 		return 1;
 
 	for (pos = options; pos;) {
@@ -1110,7 +1173,8 @@
 
 		if (c == 'j') {
 			if (arg && *arg && jdev_name) {
-				if (*jdev_name) {	//Hm, already assigned?
+				/* Hm, already assigned? */
+				if (*jdev_name) {
 					reiserfs_warning(s, "super-6510",
 							 "journal device was "
 							 "already specified to "
@@ -1363,8 +1427,10 @@
 	safe_mask |= 1 << REISERFS_USRQUOTA;
 	safe_mask |= 1 << REISERFS_GRPQUOTA;
 
-	/* Update the bitmask, taking care to keep
-	 * the bits we're not allowed to change here */
+	/*
+	 * Update the bitmask, taking care to keep
+	 * the bits we're not allowed to change here
+	 */
 	REISERFS_SB(s)->s_mount_opt =
 	    (REISERFS_SB(s)->
 	     s_mount_opt & ~safe_mask) | (mount_options & safe_mask);
@@ -1428,7 +1494,9 @@
 		handle_data_mode(s, mount_options);
 		handle_barrier_mode(s, mount_options);
 		REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
-		s->s_flags &= ~MS_RDONLY;	/* now it is safe to call journal_begin */
+
+		/* now it is safe to call journal_begin */
+		s->s_flags &= ~MS_RDONLY;
 		err = journal_begin(&th, s, 10);
 		if (err)
 			goto out_err_unlock;
@@ -1490,9 +1558,9 @@
 		brelse(bh);
 		return 1;
 	}
-	//
-	// ok, reiserfs signature (old or new) found in at the given offset
-	//
+	/*
+	 * ok, reiserfs signature (old or new) found in at the given offset
+	 */
 	fs_blocksize = sb_blocksize(rs);
 	brelse(bh);
 	sb_set_blocksize(s, fs_blocksize);
@@ -1530,9 +1598,11 @@
 	SB_BUFFER_WITH_SB(s) = bh;
 	SB_DISK_SUPER_BLOCK(s) = rs;
 
+	/*
+	 * magic is of non-standard journal filesystem, look at s_version to
+	 * find which format is in use
+	 */
 	if (is_reiserfs_jr(rs)) {
-		/* magic is of non-standard journal filesystem, look at s_version to
-		   find which format is in use */
 		if (sb_version(rs) == REISERFS_VERSION_2)
 			reiserfs_info(s, "found reiserfs format \"3.6\""
 				      " with non-standard journal\n");
@@ -1546,8 +1616,10 @@
 			return 1;
 		}
 	} else
-		/* s_version of standard format may contain incorrect information,
-		   so we just look at the magic string */
+		/*
+		 * s_version of standard format may contain incorrect
+		 * information, so we just look at the magic string
+		 */
 		reiserfs_info(s,
 			      "found reiserfs format \"%s\" with standard journal\n",
 			      is_reiserfs_3_5(rs) ? "3.5" : "3.6");
@@ -1559,8 +1631,9 @@
 	s->dq_op = &reiserfs_quota_operations;
 #endif
 
-	/* new format is limited by the 32 bit wide i_blocks field, want to
-	 ** be one full block below that.
+	/*
+	 * new format is limited by the 32 bit wide i_blocks field, want to
+	 * be one full block below that.
 	 */
 	s->s_maxbytes = (512LL << 32) - s->s_blocksize;
 	return 0;
@@ -1579,14 +1652,15 @@
 	return 0;
 }
 
-/////////////////////////////////////////////////////
-// hash detection stuff
+/* hash detection stuff */
 
-// if root directory is empty - we set default - Yura's - hash and
-// warn about it
-// FIXME: we look for only one name in a directory. If tea and yura
-// bith have the same value - we ask user to send report to the
-// mailing list
+/*
+ * if root directory is empty - we set default - Yura's - hash and
+ * warn about it
+ * FIXME: we look for only one name in a directory. If tea and yura
+ * both have the same value - we ask user to send report to the
+ * mailing list
+ */
 static __u32 find_hash_out(struct super_block *s)
 {
 	int retval;
@@ -1598,7 +1672,7 @@
 
 	inode = s->s_root->d_inode;
 
-	do {			// Some serious "goto"-hater was there ;)
+	do {			/* Some serious "goto"-hater was there ;) */
 		u32 teahash, r5hash, yurahash;
 
 		make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3);
@@ -1663,23 +1737,25 @@
 	return hash;
 }
 
-// finds out which hash names are sorted with
+/* finds out which hash names are sorted with */
 static int what_hash(struct super_block *s)
 {
 	__u32 code;
 
 	code = sb_hash_function_code(SB_DISK_SUPER_BLOCK(s));
 
-	/* reiserfs_hash_detect() == true if any of the hash mount options
-	 ** were used.  We must check them to make sure the user isn't
-	 ** using a bad hash value
+	/*
+	 * reiserfs_hash_detect() == true if any of the hash mount options
+	 * were used.  We must check them to make sure the user isn't
+	 * using a bad hash value
 	 */
 	if (code == UNSET_HASH || reiserfs_hash_detect(s))
 		code = find_hash_out(s);
 
 	if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
-		/* detection has found the hash, and we must check against the
-		 ** mount options
+		/*
+		 * detection has found the hash, and we must check against the
+		 * mount options
 		 */
 		if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
 			reiserfs_warning(s, "reiserfs-2507",
@@ -1701,7 +1777,10 @@
 			code = UNSET_HASH;
 		}
 	} else {
-		/* find_hash_out was not called or could not determine the hash */
+		/*
+		 * find_hash_out was not called or
+		 * could not determine the hash
+		 */
 		if (reiserfs_rupasov_hash(s)) {
 			code = YURA_HASH;
 		} else if (reiserfs_tea_hash(s)) {
@@ -1711,8 +1790,9 @@
 		}
 	}
 
-	/* if we are mounted RW, and we have a new valid hash code, update
-	 ** the super
+	/*
+	 * if we are mounted RW, and we have a new valid hash code, update
+	 * the super
 	 */
 	if (code != UNSET_HASH &&
 	    !(s->s_flags & MS_RDONLY) &&
@@ -1722,7 +1802,7 @@
 	return code;
 }
 
-// return pointer to appropriate function
+/* return pointer to appropriate function */
 static hashf_t hash_function(struct super_block *s)
 {
 	switch (what_hash(s)) {
@@ -1739,7 +1819,7 @@
 	return NULL;
 }
 
-// this is used to set up correct value for old partitions
+/* this is used to set up correct value for old partitions */
 static int function2code(hashf_t func)
 {
 	if (func == keyed_hash)
@@ -1749,7 +1829,7 @@
 	if (func == r5_hash)
 		return R5_HASH;
 
-	BUG();			// should never happen
+	BUG();			/* should never happen */
 
 	return 0;
 }
@@ -1784,8 +1864,7 @@
 	sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
 	sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
 	sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
-	/* no preallocation minimum, be smart in
-	   reiserfs_file_write instead */
+	/* no preallocation minimum, be smart in reiserfs_file_write instead */
 	sbi->s_alloc_options.preallocmin = 0;
 	/* Preallocate by 16 blocks (17-1) at once */
 	sbi->s_alloc_options.preallocsize = 17;
@@ -1828,10 +1907,17 @@
 		goto error_unlocked;
 	}
 
-	/* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
+	/*
+	 * try old format (undistributed bitmap, super block in 8-th 1k
+	 * block of a device)
+	 */
 	if (!read_super_block(s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
 		old_format = 1;
-	/* try new format (64-th 1k block), which can contain reiserfs super block */
+
+	/*
+	 * try new format (64-th 1k block), which can contain reiserfs
+	 * super block
+	 */
 	else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
 		SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
 		      s->s_id);
@@ -1839,9 +1925,11 @@
 	}
 
 	rs = SB_DISK_SUPER_BLOCK(s);
-	/* Let's do basic sanity check to verify that underlying device is not
-	   smaller than the filesystem. If the check fails then abort and scream,
-	   because bad stuff will happen otherwise. */
+	/*
+	 * Let's do basic sanity check to verify that underlying device is not
+	 * smaller than the filesystem. If the check fails then abort and
+	 * scream, because bad stuff will happen otherwise.
+	 */
 	if (s->s_bdev && s->s_bdev->bd_inode
 	    && i_size_read(s->s_bdev->bd_inode) <
 	    sb_block_count(rs) * sb_blocksize(rs)) {
@@ -1885,15 +1973,16 @@
 		printk("reiserfs: using flush barriers\n");
 	}
 
-	// set_device_ro(s->s_dev, 1) ;
 	if (journal_init(s, jdev_name, old_format, commit_max_age)) {
 		SWARN(silent, s, "sh-2022",
 		      "unable to initialize journal space");
 		goto error_unlocked;
 	} else {
-		jinit_done = 1;	/* once this is set, journal_release must be called
-				 ** if we error out of the mount
-				 */
+		/*
+		 * once this is set, journal_release must be called
+		 * if we error out of the mount
+		 */
+		jinit_done = 1;
 	}
 
 	if (reread_meta_blocks(s)) {
@@ -1938,7 +2027,7 @@
 	s->s_root = d_make_root(root_inode);
 	if (!s->s_root)
 		goto error;
-	// define and initialize hash function
+	/* define and initialize hash function */
 	sbi->s_hash_function = hash_function(s);
 	if (sbi->s_hash_function == NULL) {
 		dput(s->s_root);
@@ -1967,10 +2056,12 @@
 		set_sb_umount_state(rs, REISERFS_ERROR_FS);
 		set_sb_fs_state(rs, 0);
 
-		/* Clear out s_bmap_nr if it would wrap. We can handle this
+		/*
+		 * Clear out s_bmap_nr if it would wrap. We can handle this
 		 * case, but older revisions can't. This will cause the
 		 * file system to fail mount on those older implementations,
-		 * avoiding corruption. -jeffm */
+		 * avoiding corruption. -jeffm
+		 */
 		if (bmap_would_wrap(reiserfs_bmap_count(s)) &&
 		    sb_bmap_nr(rs) != 0) {
 			reiserfs_warning(s, "super-2030", "This file system "
@@ -1983,8 +2074,10 @@
 		}
 
 		if (old_format_only(s)) {
-			/* filesystem of format 3.5 either with standard or non-standard
-			   journal */
+			/*
+			 * filesystem of format 3.5 either with standard
+			 * or non-standard journal
+			 */
 			if (convert_reiserfs(s)) {
 				/* and -o conv is given */
 				if (!silent)
@@ -1992,8 +2085,11 @@
 						      "converting 3.5 filesystem to the 3.6 format");
 
 				if (is_reiserfs_3_5(rs))
-					/* put magic string of 3.6 format. 2.2 will not be able to
-					   mount this filesystem anymore */
+					/*
+					 * put magic string of 3.6 format.
+					 * 2.2 will not be able to
+					 * mount this filesystem anymore
+					 */
 					memcpy(rs->s_v1.s_magic,
 					       reiserfs_3_6_magic_string,
 					       sizeof
@@ -2027,7 +2123,9 @@
 		}
 		reiserfs_write_lock(s);
 
-		/* look for files which were to be removed in previous session */
+		/*
+		 * look for files which were to be removed in previous session
+		 */
 		finish_unfinished(s);
 	} else {
 		if (old_format_only(s) && !silent) {
@@ -2043,7 +2141,9 @@
 		}
 		reiserfs_write_lock(s);
 	}
-	// mark hash in super block: it could be unset. overwrite should be ok
+	/*
+	 * mark hash in super block: it could be unset. overwrite should be ok
+	 */
 	set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
 
 	handle_attrs(s);
@@ -2247,7 +2347,10 @@
 		goto out;
 	}
 	inode = path->dentry->d_inode;
-	/* We must not pack tails for quota files on reiserfs for quota IO to work */
+	/*
+	 * We must not pack tails for quota files on reiserfs for quota
+	 * IO to work
+	 */
 	if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
 		err = reiserfs_unpack(inode, NULL);
 		if (err) {
@@ -2288,10 +2391,12 @@
 	return err;
 }
 
-/* Read data from quotafile - avoid pagecache and such because we cannot afford
+/*
+ * Read data from quotafile - avoid pagecache and such because we cannot afford
  * acquiring the locks... As quota files are never truncated and quota code
  * itself serializes the operations (and no one else should touch the files)
- * we don't have to be afraid of races */
+ * we don't have to be afraid of races
+ */
 static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
 				   size_t len, loff_t off)
 {
@@ -2312,7 +2417,10 @@
 		    sb->s_blocksize - offset <
 		    toread ? sb->s_blocksize - offset : toread;
 		tmp_bh.b_state = 0;
-		/* Quota files are without tails so we can safely use this function */
+		/*
+		 * Quota files are without tails so we can safely
+		 * use this function
+		 */
 		reiserfs_write_lock(sb);
 		err = reiserfs_get_block(inode, blk, &tmp_bh, 0);
 		reiserfs_write_unlock(sb);
@@ -2335,8 +2443,10 @@
 	return len;
 }
 
-/* Write to quotafile (we know the transaction is already started and has
- * enough credits) */
+/*
+ * Write to quotafile (we know the transaction is already started and has
+ * enough credits)
+ */
 static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
 				    const char *data, size_t len, loff_t off)
 {
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index fc1981d..f41e19b 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -1,5 +1,6 @@
 /*
- * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
+ * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright
+ * details
  */
 
 #include <linux/time.h>
@@ -7,13 +8,19 @@
 #include <linux/buffer_head.h>
 #include "reiserfs.h"
 
-/* access to tail : when one is going to read tail it must make sure, that is not running.
- direct2indirect and indirect2direct can not run concurrently */
+/*
+ * access to tail : when one is going to read tail it must make sure, that is
+ * not running.  direct2indirect and indirect2direct can not run concurrently
+ */
 
-/* Converts direct items to an unformatted node. Panics if file has no
-   tail. -ENOSPC if no disk space for conversion */
-/* path points to first direct item of the file regarless of how many of
-   them are there */
+/*
+ * Converts direct items to an unformatted node. Panics if file has no
+ * tail. -ENOSPC if no disk space for conversion
+ */
+/*
+ * path points to first direct item of the file regardless of how many of
+ * them are there
+ */
 int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
 		    struct treepath *path, struct buffer_head *unbh,
 		    loff_t tail_offset)
@@ -22,14 +29,20 @@
 	struct buffer_head *up_to_date_bh;
 	struct item_head *p_le_ih = tp_item_head(path);
 	unsigned long total_tail = 0;
-	struct cpu_key end_key;	/* Key to search for the last byte of the
-				   converted item. */
-	struct item_head ind_ih;	/* new indirect item to be inserted or
-					   key of unfm pointer to be pasted */
-	int blk_size, retval;	/* returned value for reiserfs_insert_item and clones */
-	unp_t unfm_ptr;		/* Handle on an unformatted node
-				   that will be inserted in the
-				   tree. */
+
+	/* Key to search for the last byte of the converted item. */
+	struct cpu_key end_key;
+
+	/*
+	 * new indirect item to be inserted or key
+	 * of unfm pointer to be pasted
+	 */
+	struct item_head ind_ih;
+	int blk_size;
+	/* returned value for reiserfs_insert_item and clones */
+	int  retval;
+	/* Handle on an unformatted node that will be inserted in the tree. */
+	unp_t unfm_ptr;
 
 	BUG_ON(!th->t_trans_id);
 
@@ -37,8 +50,10 @@
 
 	blk_size = sb->s_blocksize;
 
-	/* and key to search for append or insert pointer to the new
-	   unformatted node. */
+	/*
+	 * and key to search for append or insert pointer to the new
+	 * unformatted node.
+	 */
 	copy_item_head(&ind_ih, p_le_ih);
 	set_le_ih_k_offset(&ind_ih, tail_offset);
 	set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);
@@ -76,20 +91,26 @@
 	if (retval) {
 		return retval;
 	}
-	// note: from here there are two keys which have matching first
-	// three key components. They only differ by the fourth one.
+	/*
+	 * note: from here there are two keys which have matching first
+	 *  three key components. They only differ by the fourth one.
+	 */
 
 	/* Set the key to search for the direct items of the file */
 	make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
 		     4);
 
-	/* Move bytes from the direct items to the new unformatted node
-	   and delete them. */
+	/*
+	 * Move bytes from the direct items to the new unformatted node
+	 * and delete them.
+	 */
 	while (1) {
 		int tail_size;
 
-		/* end_key.k_offset is set so, that we will always have found
-		   last item of the file */
+		/*
+		 * end_key.k_offset is set so, that we will always have found
+		 * last item of the file
+		 */
 		if (search_for_position_by_key(sb, &end_key, path) ==
 		    POSITION_FOUND)
 			reiserfs_panic(sb, "PAP-14050",
@@ -101,11 +122,12 @@
 		tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
 		    + ih_item_len(p_le_ih) - 1;
 
-		/* we only send the unbh pointer if the buffer is not up to date.
-		 ** this avoids overwriting good data from writepage() with old data
-		 ** from the disk or buffer cache
-		 ** Special case: unbh->b_page will be NULL if we are coming through
-		 ** DIRECT_IO handler here.
+		/*
+		 * we only send the unbh pointer if the buffer is not
+		 * up to date.  this avoids overwriting good data from
+		 * writepage() with old data from the disk or buffer cache
+		 * Special case: unbh->b_page will be NULL if we are coming
+		 * through DIRECT_IO handler here.
 		 */
 		if (!unbh->b_page || buffer_uptodate(unbh)
 		    || PageUptodate(unbh->b_page)) {
@@ -117,13 +139,15 @@
 						up_to_date_bh);
 
 		total_tail += retval;
+
+		/* done: file does not have direct items anymore */
 		if (tail_size == retval)
-			// done: file does not have direct items anymore
 			break;
 
 	}
-	/* if we've copied bytes from disk into the page, we need to zero
-	 ** out the unused part of the block (it was not up to date before)
+	/*
+	 * if we've copied bytes from disk into the page, we need to zero
+	 * out the unused part of the block (it was not up to date before)
 	 */
 	if (up_to_date_bh) {
 		unsigned pgoff =
@@ -146,9 +170,11 @@
 		BUG();
 	}
 	clear_buffer_dirty(bh);
-	/* Remove the buffer from whatever list it belongs to. We are mostly
-	   interested in removing it from per-sb j_dirty_buffers list, to avoid
-	   BUG() on attempt to write not mapped buffer */
+	/*
+	 * Remove the buffer from whatever list it belongs to. We are mostly
+	 * interested in removing it from per-sb j_dirty_buffers list, to avoid
+	 * BUG() on attempt to write not mapped buffer
+	 */
 	if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
 		struct inode *inode = bh->b_page->mapping->host;
 		struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
@@ -164,12 +190,14 @@
 	unlock_buffer(bh);
 }
 
-/* this first locks inode (neither reads nor sync are permitted),
-   reads tail through page cache, insert direct item. When direct item
-   inserted successfully inode is left locked. Return value is always
-   what we expect from it (number of cut bytes). But when tail remains
-   in the unformatted node, we set mode to SKIP_BALANCING and unlock
-   inode */
+/*
+ * this first locks inode (neither reads nor sync are permitted),
+ * reads tail through page cache, insert direct item. When direct item
+ * inserted successfully inode is left locked. Return value is always
+ * what we expect from it (number of cut bytes). But when tail remains
+ * in the unformatted node, we set mode to SKIP_BALANCING and unlock
+ * inode
+ */
 int indirect2direct(struct reiserfs_transaction_handle *th,
 		    struct inode *inode, struct page *page,
 		    struct treepath *path,	/* path to the indirect item. */
@@ -207,9 +235,11 @@
 					 1) * sb->s_blocksize;
 	pos1 = pos;
 
-	// we are protected by i_mutex. The tail can not disapper, not
-	// append can be done either
-	// we are in truncate or packing tail in file_release
+	/*
+	 * we are protected by i_mutex. The tail can not disapper, not
+	 * append can be done either
+	 * we are in truncate or packing tail in file_release
+	 */
 
 	tail = (char *)kmap(page);	/* this can schedule */
 
@@ -236,9 +266,10 @@
 			  pos1 + 1, TYPE_DIRECT, round_tail_len,
 			  0xffff /*ih_free_space */ );
 
-	/* we want a pointer to the first byte of the tail in the page.
-	 ** the page was locked and this part of the page was up to date when
-	 ** indirect2direct was called, so we know the bytes are still valid
+	/*
+	 * we want a pointer to the first byte of the tail in the page.
+	 * the page was locked and this part of the page was up to date when
+	 * indirect2direct was called, so we know the bytes are still valid
 	 */
 	tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
 
@@ -250,12 +281,14 @@
 	/* Insert tail as new direct item in the tree */
 	if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
 				 tail ? tail : NULL) < 0) {
-		/* No disk memory. So we can not convert last unformatted node
-		   to the direct item.  In this case we used to adjust
-		   indirect items's ih_free_space. Now ih_free_space is not
-		   used, it would be ideal to write zeros to corresponding
-		   unformatted node. For now i_size is considered as guard for
-		   going out of file size */
+		/*
+		 * No disk memory. So we can not convert last unformatted node
+		 * to the direct item.  In this case we used to adjust
+		 * indirect items's ih_free_space. Now ih_free_space is not
+		 * used, it would be ideal to write zeros to corresponding
+		 * unformatted node. For now i_size is considered as guard for
+		 * going out of file size
+		 */
 		kunmap(page);
 		return block_size - round_tail_len;
 	}
@@ -264,12 +297,16 @@
 	/* make sure to get the i_blocks changes from reiserfs_insert_item */
 	reiserfs_update_sd(th, inode);
 
-	// note: we have now the same as in above direct2indirect
-	// conversion: there are two keys which have matching first three
-	// key components. They only differ by the fouhth one.
+	/*
+	 * note: we have now the same as in above direct2indirect
+	 * conversion: there are two keys which have matching first three
+	 * key components. They only differ by the fourth one.
+	 */
 
-	/* We have inserted new direct item and must remove last
-	   unformatted node. */
+	/*
+	 * We have inserted new direct item and must remove last
+	 * unformatted node.
+	 */
 	*mode = M_CUT;
 
 	/* we store position of first direct item in the in-core inode */
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5cdfbd6..f669990 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -56,9 +56,11 @@
 #define XAROOT_NAME   "xattrs"
 
 
-/* Helpers for inode ops. We do this so that we don't have all the VFS
+/*
+ * Helpers for inode ops. We do this so that we don't have all the VFS
  * overhead and also for proper i_mutex annotation.
- * dir->i_mutex must be held for all of them. */
+ * dir->i_mutex must be held for all of them.
+ */
 #ifdef CONFIG_REISERFS_FS_XATTR
 static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
 {
@@ -73,10 +75,12 @@
 	return dir->i_op->mkdir(dir, dentry, mode);
 }
 
-/* We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
+/*
+ * We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
  * mutation ops aren't called during rename or splace, which are the
  * only other users of I_MUTEX_CHILD. It violates the ordering, but that's
- * better than allocating another subclass just for this code. */
+ * better than allocating another subclass just for this code.
+ */
 static int xattr_unlink(struct inode *dir, struct dentry *dentry)
 {
 	int error;
@@ -166,9 +170,11 @@
 	return xadir;
 }
 
-/* The following are side effects of other operations that aren't explicitly
+/*
+ * The following are side effects of other operations that aren't explicitly
  * modifying extended attributes. This includes operations such as permissions
- * or ownership changes, object deletions, etc. */
+ * or ownership changes, object deletions, etc.
+ */
 struct reiserfs_dentry_buf {
 	struct dir_context ctx;
 	struct dentry *xadir;
@@ -267,11 +273,13 @@
 	cleanup_dentry_buf(&buf);
 
 	if (!err) {
-		/* We start a transaction here to avoid a ABBA situation
+		/*
+		 * We start a transaction here to avoid a ABBA situation
 		 * between the xattr root's i_mutex and the journal lock.
 		 * This doesn't incur much additional overhead since the
 		 * new transaction will just nest inside the
-		 * outer transaction. */
+		 * outer transaction.
+		 */
 		int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 			     4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
 		struct reiserfs_transaction_handle th;
@@ -349,9 +357,11 @@
 }
 
 #ifdef CONFIG_REISERFS_FS_XATTR
-/* Returns a dentry corresponding to a specific extended attribute file
+/*
+ * Returns a dentry corresponding to a specific extended attribute file
  * for the inode. If flags allow, the file is created. Otherwise, a
- * valid or negative dentry, or an error is returned. */
+ * valid or negative dentry, or an error is returned.
+ */
 static struct dentry *xattr_lookup(struct inode *inode, const char *name,
 				    int flags)
 {
@@ -400,8 +410,10 @@
 {
 	struct address_space *mapping = dir->i_mapping;
 	struct page *page;
-	/* We can deadlock if we try to free dentries,
-	   and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
+	/*
+	 * We can deadlock if we try to free dentries,
+	 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
+	 */
 	mapping_set_gfp_mask(mapping, GFP_NOFS);
 	page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
 	if (!IS_ERR(page)) {
@@ -615,8 +627,10 @@
 	if (name == NULL)
 		return -EINVAL;
 
-	/* We can't have xattrs attached to v1 items since they don't have
-	 * generation numbers */
+	/*
+	 * We can't have xattrs attached to v1 items since they don't have
+	 * generation numbers
+	 */
 	if (get_inode_sd_version(inode) == STAT_DATA_V1)
 		return -EOPNOTSUPP;
 
@@ -913,12 +927,16 @@
 
 static int xattr_mount_check(struct super_block *s)
 {
-	/* We need generation numbers to ensure that the oid mapping is correct
-	 * v3.5 filesystems don't have them. */
+	/*
+	 * We need generation numbers to ensure that the oid mapping is correct
+	 * v3.5 filesystems don't have them.
+	 */
 	if (old_format_only(s)) {
 		if (reiserfs_xattrs_optional(s)) {
-			/* Old format filesystem, but optional xattrs have
-			 * been enabled. Error out. */
+			/*
+			 * Old format filesystem, but optional xattrs have
+			 * been enabled. Error out.
+			 */
 			reiserfs_warning(s, "jdm-2005",
 					 "xattrs/ACLs not supported "
 					 "on pre-v3.6 format filesystems. "
@@ -972,9 +990,11 @@
 	return err;
 }
 
-/* We need to take a copy of the mount flags since things like
+/*
+ * We need to take a copy of the mount flags since things like
  * MS_RDONLY don't get set until *after* we're called.
- * mount_flags != mount_options */
+ * mount_flags != mount_options
+ */
 int reiserfs_xattr_init(struct super_block *s, int mount_flags)
 {
 	int err = 0;
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index f59626c..857ec7e 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -61,7 +61,8 @@
 	return ret;
 }
 
-/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
+/*
+ * We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
  * Let's try to be smart about it.
  * xattr root: We cache it. If it's not cached, we may need to create it.
  * xattr dir: If anything has been loaded for this inode, we can set a flag
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index a6ce532..a333a07 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -25,8 +25,10 @@
 	int size = acl ? posix_acl_xattr_size(acl->a_count) : 0;
 
 
-	/* Pessimism: We can't assume that anything from the xattr root up
-	 * has been created. */
+	/*
+	 * Pessimism: We can't assume that anything from the xattr root up
+	 * has been created.
+	 */
 
 	jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) +
 			 reiserfs_xattr_nblocks(inode, size) * 2;
@@ -208,8 +210,10 @@
 
 	retval = reiserfs_xattr_get(inode, name, value, size);
 	if (retval == -ENODATA || retval == -ENOSYS) {
-		/* This shouldn't actually happen as it should have
-		   been caught above.. but just in case */
+		/*
+		 * This shouldn't actually happen as it should have
+		 * been caught above.. but just in case
+		 */
 		acl = NULL;
 	} else if (retval < 0) {
 		acl = ERR_PTR(retval);
@@ -290,8 +294,10 @@
 	return error;
 }
 
-/* dir->i_mutex: locked,
- * inode is new and not released into the wild yet */
+/*
+ * dir->i_mutex: locked,
+ * inode is new and not released into the wild yet
+ */
 int
 reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
 			     struct inode *dir, struct dentry *dentry,
@@ -304,14 +310,18 @@
 	if (S_ISLNK(inode->i_mode))
 		return 0;
 
-	/* ACLs can only be used on "new" objects, so if it's an old object
-	 * there is nothing to inherit from */
+	/*
+	 * ACLs can only be used on "new" objects, so if it's an old object
+	 * there is nothing to inherit from
+	 */
 	if (get_inode_sd_version(dir) == STAT_DATA_V1)
 		goto apply_umask;
 
-	/* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
+	/*
+	 * Don't apply ACLs to objects in the .reiserfs_priv tree.. This
 	 * would be useless since permissions are ignored, and a pain because
-	 * it introduces locking cycles */
+	 * it introduces locking cycles
+	 */
 	if (IS_PRIVATE(dir)) {
 		inode->i_flags |= S_PRIVATE;
 		goto apply_umask;