developed overwriting compressed files

PERMISSION_HANDLING_BRANCH
Jean-Pierre André 2010-05-25 10:25:31 +02:00
parent e3537c3376
commit a192775e2a
9 changed files with 913 additions and 174 deletions

View File

@ -189,6 +189,7 @@ struct _ntfs_attr {
u32 compression_block_size;
u8 compression_block_size_bits;
u8 compression_block_clusters;
s8 unused_runs; /* pre-reserved entries available */
};
/**
@ -198,6 +199,8 @@ struct _ntfs_attr {
typedef enum {
NA_Initialized, /* 1: structure is initialized. */
NA_NonResident, /* 1: Attribute is not resident. */
NA_BeingNonResident, /* 1: Attribute is being made not resident. */
NA_FullyMapped, /* 1: Attribute has beed fully mapped */
} ntfs_attr_state_bits;
#define test_nattr_flag(na, flag) test_bit(NA_##flag, (na)->state)
@ -212,6 +215,14 @@ typedef enum {
#define NAttrSetNonResident(na) set_nattr_flag(na, NonResident)
#define NAttrClearNonResident(na) clear_nattr_flag(na, NonResident)
#define NAttrBeingNonResident(na) test_nattr_flag(na, BeingNonResident)
#define NAttrSetBeingNonResident(na) set_nattr_flag(na, BeingNonResident)
#define NAttrClearBeingNonResident(na) clear_nattr_flag(na, BeingNonResident)
#define NAttrFullyMapped(na) test_nattr_flag(na, FullyMapped)
#define NAttrSetFullyMapped(na) set_nattr_flag(na, FullyMapped)
#define NAttrClearFullyMapped(na) clear_nattr_flag(na, FullyMapped)
#define GenNAttrIno(func_name, flag) \
extern int NAttr##func_name(ntfs_attr *na); \
extern void NAttrSet##func_name(ntfs_attr *na); \

View File

@ -31,9 +31,11 @@ extern s64 ntfs_compressed_attr_pread(ntfs_attr *na, s64 pos, s64 count,
extern s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *brl, s64 wpos,
s64 offs, s64 to_write, s64 rounded,
const void *b, int compressed_part);
const void *b, int compressed_part,
VCN *update_from);
extern int ntfs_compressed_close(ntfs_attr *na, runlist_element *brl, s64 offs);
extern int ntfs_compressed_close(ntfs_attr *na, runlist_element *brl,
s64 offs, VCN *update_from);
#endif /* defined _NTFS_COMPRESS_H */

View File

@ -42,6 +42,7 @@ extern runlist *ntfs_cluster_alloc(ntfs_volume *vol, VCN start_vcn, s64 count,
LCN start_lcn, const NTFS_CLUSTER_ALLOCATION_ZONES zone);
extern int ntfs_cluster_free_from_rl(ntfs_volume *vol, runlist *rl);
extern int ntfs_cluster_free_basic(ntfs_volume *vol, s64 lcn, s64 count);
extern int ntfs_cluster_free(ntfs_volume *vol, ntfs_attr *na, VCN start_vcn,
s64 count);

View File

@ -39,6 +39,15 @@ enum {
DEFSECBASE = 10000
};
/*
* Parameters for compression
*/
/* (log2 of) number of clusters in a compression block for new files */
#define STANDARD_COMPRESSION_UNIT 4
/* maximum cluster size for allowing compression for new files */
#define MAX_COMPRESSION_CLUSTER_SIZE 4096
/*
* Permission checking modes for high level and low level
*

View File

@ -49,7 +49,8 @@ struct _runlist_element {/* In memory vcn to lcn mapping structure element. */
s64 length; /* Run length in clusters. */
};
extern runlist_element *ntfs_rl_extend(runlist_element *rl, int more_entries);
extern runlist_element *ntfs_rl_extend(ntfs_attr *na, runlist_element *rl,
int more_entries);
extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);

View File

@ -65,9 +65,6 @@
#include "misc.h"
#include "efs.h"
#define STANDARD_COMPRESSION_UNIT 4
#define MAX_COMPRESSION_CLUSTER_SIZE 4096
ntfschar AT_UNNAMED[] = { const_cpu_to_le16('\0') };
ntfschar STREAM_SDS[] = { const_cpu_to_le16('$'),
const_cpu_to_le16('S'),
@ -613,6 +610,11 @@ int ntfs_attr_map_whole_runlist(ntfs_attr *na)
ntfs_log_enter("Entering for inode %llu, attr 0x%x.\n",
(unsigned long long)na->ni->mft_no, na->type);
/* avoid multiple full runlist mappings */
if (NAttrFullyMapped(na)) {
ret = 0;
goto out;
}
ctx = ntfs_attr_get_search_ctx(na->ni, NULL);
if (!ctx)
goto out;
@ -686,8 +688,10 @@ int ntfs_attr_map_whole_runlist(ntfs_attr *na)
(long long)highest_vcn, (long long)last_vcn);
goto err_out;
}
if (errno == ENOENT)
if (errno == ENOENT) {
NAttrSetFullyMapped(na);
ret = 0;
}
err_out:
ntfs_attr_put_search_ctx(ctx);
out:
@ -1207,11 +1211,15 @@ static int ntfs_attr_fill_hole(ntfs_attr *na, s64 count, s64 *ofs,
*rl = ntfs_runlists_merge(na->rl, rlc);
/*
* For a compressed attribute, we must be sure there is an
* available entry, so reserve it before it gets too late.
* For a compressed attribute, we must be sure there are two
* available entries, so reserve them before it gets too late.
*/
if (*rl && (na->data_flags & ATTR_COMPRESSION_MASK))
*rl = ntfs_rl_extend(*rl,1);
if (*rl && (na->data_flags & ATTR_COMPRESSION_MASK)) {
runlist_element *oldrl = na->rl;
na->rl = *rl;
*rl = ntfs_rl_extend(na,*rl,2);
if (!*rl) na->rl = oldrl; /* restore to original if failed */
}
if (!*rl) {
eo = errno;
ntfs_log_perror("Failed to merge runlists");
@ -1222,8 +1230,9 @@ static int ntfs_attr_fill_hole(ntfs_attr *na, s64 count, s64 *ofs,
errno = eo;
goto err_out;
}
na->unused_runs = 2;
na->rl = *rl;
if (*update_from == -1)
if ((*update_from == -1) || (from_vcn < *update_from))
*update_from = from_vcn;
*rl = ntfs_attr_find_vcn(na, cur_vcn);
if (!*rl) {
@ -1273,6 +1282,279 @@ err_out:
static int stuff_hole(ntfs_attr *na, const s64 pos);
/*
* Split an existing hole for overwriting with data
* The hole may have to be split into two or three parts, so
* that the overwritten part fits within a single compression block
*
* No cluster allocation is needed, this will be done later in
* standard hole filling, hence no need to reserve runs for
* future needs.
*
* Returns the number of clusters with existing compressed data
* in the compression block to be written to
* (or the full block, if it was a full hole)
* -1 if there were an error
*/
static int split_compressed_hole(ntfs_attr *na, runlist_element **prl,
s64 pos, s64 count, VCN *update_from)
{
int compressed_part;
int cluster_size_bits = na->ni->vol->cluster_size_bits;
runlist_element *rl = *prl;
compressed_part
= na->compression_block_clusters;
/* reserve entries in runlist if we have to split */
if (rl->length > na->compression_block_clusters) {
*prl = ntfs_rl_extend(na,*prl,2);
if (!*prl) {
compressed_part = -1;
} else {
rl = *prl;
na->unused_runs = 2;
}
}
if (*prl && (rl->length > na->compression_block_clusters)) {
/*
* Locate the update part relative to beginning of
* current run
*/
int beginwrite = (pos >> cluster_size_bits) - rl->vcn;
s32 endblock = (((pos + count - 1) >> cluster_size_bits)
| (na->compression_block_clusters - 1)) + 1 - rl->vcn;
compressed_part = na->compression_block_clusters
- (rl->length & (na->compression_block_clusters - 1));
if ((beginwrite + compressed_part) >= na->compression_block_clusters)
compressed_part = na->compression_block_clusters;
/*
* if the run ends beyond end of needed block
* we have to split the run
*/
if (endblock < rl[0].length) {
runlist_element *xrl;
int n;
/*
* we have to split into three parts if the run
* does not end within the first compression block.
* This means the hole begins before the
* compression block.
*/
if (endblock > na->compression_block_clusters) {
if (na->unused_runs < 2) {
ntfs_log_error("No free run, case 1\n");
}
na->unused_runs -= 2;
xrl = rl;
n = 0;
while (xrl->length) {
xrl++;
n++;
}
do {
xrl[2] = *xrl;
xrl--;
} while (xrl != rl);
rl[1].length = na->compression_block_clusters;
rl[2].length = rl[0].length - endblock;
rl[0].length = endblock
- na->compression_block_clusters;
rl[1].lcn = LCN_HOLE;
rl[2].lcn = LCN_HOLE;
rl[1].vcn = rl[0].vcn + rl[0].length;
rl[2].vcn = rl[1].vcn
+ na->compression_block_clusters;
rl = ++(*prl);
} else {
/*
* split into two parts and use the
* first one
*/
if (!na->unused_runs) {
ntfs_log_error("No free run, case 2\n");
}
na->unused_runs--;
xrl = rl;
n = 0;
while (xrl->length) {
xrl++;
n++;
}
do {
xrl[1] = *xrl;
xrl--;
} while (xrl != rl);
if (beginwrite < endblock) {
/* we will write into the first part of hole */
rl[1].length = rl[0].length - endblock;
rl[0].length = endblock;
rl[1].vcn = rl[0].vcn + rl[0].length;
rl[1].lcn = LCN_HOLE;
} else {
/* we will write into the second part of hole */
// impossible ?
rl[1].length = rl[0].length - endblock;
rl[0].length = endblock;
rl[1].vcn = rl[0].vcn + rl[0].length;
rl[1].lcn = LCN_HOLE;
rl = ++(*prl);
}
}
} else {
if (rl[1].length) {
runlist_element *xrl;
int n;
/*
* split into two parts and use the
* last one
*/
if (!na->unused_runs) {
ntfs_log_error("No free run, case 4\n");
}
na->unused_runs--;
xrl = rl;
n = 0;
while (xrl->length) {
xrl++;
n++;
}
do {
xrl[1] = *xrl;
xrl--;
} while (xrl != rl);
} else {
rl[2].lcn = rl[1].lcn;
rl[2].vcn = rl[1].vcn;
rl[2].length = rl[1].length;
}
rl[1].vcn -= na->compression_block_clusters;
rl[1].lcn = LCN_HOLE;
rl[1].length = na->compression_block_clusters;
rl[0].length -= na->compression_block_clusters;
if (pos >= (rl[1].vcn << cluster_size_bits)) {
rl = ++(*prl);
}
}
if ((*update_from == -1) || ((*prl)->vcn < *update_from))
*update_from = (*prl)->vcn;
}
return (compressed_part);
}
/*
* Borrow space from adjacent hole for appending data
* The hole may have to be split so that the end of hole is not
* affected by cluster allocation and overwriting
* Cluster allocation is needed for the overwritten compression block
*
* Must always leave two unused entries in the runlist
*
* Returns the number of clusters with existing compressed data
* in the compression block to be written to
* -1 if there were an error
*/
static int borrow_from_hole(ntfs_attr *na, runlist_element **prl,
s64 pos, s64 count, VCN *update_from, BOOL wasnonresident)
{
int compressed_part = 0;
int cluster_size_bits = na->ni->vol->cluster_size_bits;
runlist_element *rl = *prl;
/* the beginning of needed block is allocated */
s32 endblock;
long long allocated;
runlist_element *zrl;
int irl;
/* check whether the compression block is fully allocated */
endblock = (((pos + count - 1) >> cluster_size_bits) | (na->compression_block_clusters - 1)) + 1 - rl->vcn;
allocated = 0;
zrl = rl;
irl = 0;
while (zrl->length && (zrl->lcn >= 0) && (allocated < endblock)) {
allocated += zrl->length;
zrl++;
irl++;
}
/*
* compression block not fully allocated and followed
* by a hole : we must allocate in the hole.
*/
if ((allocated < endblock) && (zrl->lcn == LCN_HOLE)) {
s64 xofs;
/*
* split the hole if not fully needed
*/
if ((allocated + zrl->length) > endblock) {
runlist_element *xrl;
*prl = ntfs_rl_extend(na,*prl,1);
if (*prl) {
/* beware : rl was reallocated */
rl = *prl;
zrl = &rl[irl];
na->unused_runs = 0;
xrl = zrl;
while (xrl->length) xrl++;
do {
xrl[1] = *xrl;
} while (xrl-- != zrl);
zrl->length = endblock - allocated;
zrl[1].length -= zrl->length;
zrl[1].vcn = zrl->vcn + zrl->length;
}
}
if (*prl) {
if (wasnonresident)
compressed_part = na->compression_block_clusters
- zrl->length;
xofs = 0;
if (ntfs_attr_fill_hole(na,
zrl->length << cluster_size_bits,
&xofs, &zrl, update_from))
compressed_part = -1;
else {
/* go back to initial cluster, now reallocated */
while (zrl->vcn > (pos >> cluster_size_bits))
zrl--;
*prl = zrl;
}
}
} else {
runlist_element *orl = na->rl;
/*
* Map the full runlist (needed to compute the
* compressed size), unless the runlist has not
* yet been created (data just made non-resident)
*/
if (!NAttrBeingNonResident(na)
&& ntfs_attr_map_whole_runlist(na)) {
*prl = (runlist_element*)NULL;
} else {
if (na->rl != orl) {
/* hope this cannot happen */
ntfs_log_error("Unexpected runlist relocation\n");
*prl = (runlist_element*)NULL;
} else {
*prl = ntfs_rl_extend(na,*prl,2);
na->unused_runs = 2;
}
}
}
if (!*prl) {
ntfs_log_error("No elements to borrow from a hole\n");
compressed_part = -1;
} else
if ((*update_from == -1) || ((*prl)->vcn < *update_from))
*update_from = (*prl)->vcn;
return (compressed_part);
}
/**
* ntfs_attr_pwrite - positioned write to an ntfs attribute
* @na: ntfs attribute to write to
@ -1308,9 +1590,9 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
unsigned int undo_initialized_size : 1;
unsigned int undo_data_size : 1;
} need_to = { 0, 0 };
BOOL makingnonresident = FALSE;
BOOL wasnonresident = FALSE;
BOOL compressed;
BOOL updatemap;
ntfs_log_enter("Entering for inode %lld, attr 0x%x, pos 0x%llx, count "
"0x%llx.\n", (long long)na->ni->mft_no, na->type,
@ -1324,6 +1606,7 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
vol = na->ni->vol;
compressed = (na->data_flags & ATTR_COMPRESSION_MASK)
!= const_cpu_to_le16(0);
na->unused_runs = 0; /* prepare overflow checks */
/*
* Encrypted attributes are only supported in raw mode. We return
* access denied, which is what Windows NT4 does, too.
@ -1345,23 +1628,14 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
goto errno_set;
/* If this is a compressed attribute it needs special treatment. */
wasnonresident = NAttrNonResident(na) != 0;
makingnonresident = wasnonresident /* yes : already changed */
&& !pos && (count == na->initialized_size);
/*
* Writing to compressed files is currently restricted
* to appending data. However we have to accept
* recursive write calls to make the attribute non resident.
* These are writing at position 0 up to initialized_size.
* Compression is also restricted to data streams.
* Only ATTR_IS_COMPRESSED compression mode is supported.
* Compression is restricted to data streams and
* only ATTR_IS_COMPRESSED compression mode is supported.
*/
if (compressed
&& ((na->type != AT_DATA)
|| ((na->data_flags & ATTR_COMPRESSION_MASK)
!= ATTR_IS_COMPRESSED)
|| ((pos != na->initialized_size)
&& (pos || (count != na->initialized_size))))) {
// TODO: Implement writing compressed attributes! (AIA)
!= ATTR_IS_COMPRESSED))) {
errno = EOPNOTSUPP;
goto errno_set;
}
@ -1391,7 +1665,7 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
* so truncate the requested count if needed (big buffers).
*/
if (compressed) {
fullcount = na->data_size - pos;
fullcount = (pos | (na->compression_block_size - 1)) + 1 - pos;
if (count > fullcount)
count = fullcount;
}
@ -1435,6 +1709,7 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
}
/* Handle writes beyond initialized_size. */
if (pos + count > na->initialized_size) {
if (ntfs_attr_map_whole_runlist(na))
goto err_out;
@ -1445,9 +1720,10 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
* before it gets too late.
*/
if (compressed) {
na->rl = ntfs_rl_extend(na->rl,2);
na->rl = ntfs_rl_extend(na,na->rl,2);
if (!na->rl)
goto err_out;
na->unused_runs = 2;
}
/* Set initialized_size to @pos + @count. */
ctx = ntfs_attr_get_search_ctx(na->ni, NULL);
@ -1513,7 +1789,6 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
}
goto err_out;
}
ofs = pos - (rl->vcn << vol->cluster_size_bits);
/*
* Determine if there is compressed data in the current
* compression block (when appending to an existing file).
@ -1534,43 +1809,45 @@ s64 ntfs_attr_pwrite(ntfs_attr *na, const s64 pos, s64 count, const void *b)
if ((rl->lcn == (LCN)LCN_HOLE)
&& wasnonresident) {
if (rl->length < na->compression_block_clusters)
/*
* the needed block is in a hole smaller
* than the compression block : we can use
* it fully
*/
compressed_part
= na->compression_block_clusters
- rl->length;
else {
compressed_part
= na->compression_block_clusters;
if (rl->length > na->compression_block_clusters) {
rl[2].lcn = rl[1].lcn;
rl[2].vcn = rl[1].vcn;
rl[2].length = rl[1].length;
rl[1].vcn -= compressed_part;
rl[1].lcn = LCN_HOLE;
rl[1].length = compressed_part;
rl[0].length -= compressed_part;
ofs -= rl->length << vol->cluster_size_bits;
rl++;
}
/*
* the needed block is in a hole bigger
* than the compression block : we must
* split the hole and use it partially
*/
compressed_part = split_compressed_hole(na,
&rl, pos, count, &update_from);
}
/* normal hole filling will do later */
} else
if ((rl->lcn >= 0) && (rl[1].lcn == (LCN)LCN_HOLE)) {
s64 xofs;
} else {
if (rl->lcn >= 0) {
/*
* the needed block contains data, make
* sure the full compression block is
* allocated. Borrow from hole if needed
*/
compressed_part = borrow_from_hole(na,
&rl, pos, count, &update_from,
wasnonresident);
}
}
if (wasnonresident)
compressed_part = na->compression_block_clusters
- rl[1].length;
rl++;
xofs = 0;
if (ntfs_attr_fill_hole(na,
rl->length << vol->cluster_size_bits,
&xofs, &rl, &update_from))
goto err_out;
/* the fist allocated cluster was not merged */
if (!xofs)
rl--;
}
if (compressed_part < 0)
goto err_out;
/* just making non-resident, so not yet compressed */
if (NAttrBeingNonResident(na)
&& (compressed_part < na->compression_block_clusters))
compressed_part = 0;
}
ofs = pos - (rl->vcn << vol->cluster_size_bits);
/*
* Scatter the data from the linear data buffer to the volume. Note, a
* partial final vcn is taken care of by the @count capping of write
@ -1657,7 +1934,8 @@ retry:
if (compressed) {
written = ntfs_compressed_pwrite(na,
rl, wpos, ofs, to_write,
rounding, b, compressed_part);
rounding, b, compressed_part,
&update_from);
} else {
written = ntfs_pwrite(vol->dev, wpos,
rounding, cb);
@ -1670,7 +1948,8 @@ retry:
if (compressed) {
written = ntfs_compressed_pwrite(na,
rl, wpos, ofs, to_write,
to_write, b, compressed_part);
to_write, b, compressed_part,
&update_from);
} else
written = ntfs_pwrite(vol->dev, wpos,
to_write, b);
@ -1698,10 +1977,17 @@ retry:
done:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
/* Update mapping pairs if needed. */
if ((update_from != -1)
|| (compressed && !makingnonresident))
if (ntfs_attr_update_mapping_pairs(na, 0 /*update_from*/)) {
/*
* Update mapping pairs if needed.
* For a compressed file, we try to make a partial update
* of the mapping list. This makes a difference only if
* inode extents were needed.
*/
updatemap = (compressed
? NAttrFullyMapped(na) != 0 : update_from != -1);
if (updatemap)
if (ntfs_attr_update_mapping_pairs(na,
(update_from < 0 ? 0 : update_from))) {
/*
* FIXME: trying to recover by goto rl_err_out;
* could cause driver hang by infinite looping.
@ -1765,8 +2051,10 @@ err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
/* Update mapping pairs if needed. */
if (update_from != -1)
ntfs_attr_update_mapping_pairs(na, 0 /*update_from*/);
updatemap = (compressed
? NAttrFullyMapped(na) != 0 : update_from != -1);
if (updatemap)
ntfs_attr_update_mapping_pairs(na, 0);
/* Restore original data_size if needed. */
if (need_to.undo_data_size && ntfs_attr_truncate(na, old_data_size))
ntfs_log_perror("Failed to restore data_size");
@ -1799,6 +2087,7 @@ int ntfs_attr_pclose(ntfs_attr *na)
goto errno_set;
}
vol = na->ni->vol;
na->unused_runs = 0;
compressed = (na->data_flags & ATTR_COMPRESSION_MASK)
!= const_cpu_to_le16(0);
/*
@ -1815,14 +2104,15 @@ int ntfs_attr_pclose(ntfs_attr *na)
goto out;
/*
* For a compressed attribute, we must be sure there is an
* available entry, so reserve it before it gets too late.
* For a compressed attribute, we must be sure there are two
* available entries, so reserve them before it gets too late.
*/
if (ntfs_attr_map_whole_runlist(na))
goto err_out;
na->rl = ntfs_rl_extend(na->rl,1);
na->rl = ntfs_rl_extend(na,na->rl,2);
if (!na->rl)
goto err_out;
na->unused_runs = 2;
/* Find the runlist element containing the terminal vcn. */
rl = ntfs_attr_find_vcn(na, (na->initialized_size - 1) >> vol->cluster_size_bits);
if (!rl) {
@ -1901,8 +2191,9 @@ int ntfs_attr_pclose(ntfs_attr *na)
retry:
failed = 0;
if (update_from < 0) update_from = 0;
if (!NVolReadOnly(vol)) {
failed = ntfs_compressed_close(na, rl, ofs);
failed = ntfs_compressed_close(na, rl, ofs, &update_from);
#if CACHE_NIDATA_SIZE
if (na->ni->mrec->flags & MFT_RECORD_IS_DIRECTORY
? na->type == AT_INDEX_ROOT && na->name == NTFS_INDEX_I30
@ -1923,7 +2214,8 @@ retry:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
/* Update mapping pairs if needed. */
if (ntfs_attr_update_mapping_pairs(na, 0 /*update_from*/)) {
if (NAttrFullyMapped(na))
if (ntfs_attr_update_mapping_pairs(na, update_from)) {
/*
* FIXME: trying to recover by goto rl_err_out;
* could cause driver hang by infinite looping.
@ -1946,7 +2238,8 @@ err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
/* Update mapping pairs if needed. */
ntfs_attr_update_mapping_pairs(na, 0 /*update_from*/);
if (NAttrFullyMapped(na))
ntfs_attr_update_mapping_pairs(na, 0);
errno = eo;
errno_set:
ok = FALSE;
@ -4206,6 +4499,7 @@ int ntfs_attr_make_non_resident(ntfs_attr *na,
* we can use ntfs_attr_pwrite().
*/
NAttrSetNonResident(na);
NAttrSetBeingNonResident(na);
na->rl = rl;
na->allocated_size = new_allocated_size;
na->data_size = na->initialized_size = le32_to_cpu(a->value_length);
@ -4881,7 +5175,7 @@ static int ntfs_attr_update_mapping_pairs_i(ntfs_attr *na, VCN from_vcn)
BOOL finished_build;
retry:
if (!na || !na->rl || from_vcn) {
if (!na || !na->rl) {
errno = EINVAL;
ntfs_log_perror("%s: na=%p", __FUNCTION__, na);
return -1;

View File

@ -5,7 +5,7 @@
* Copyright (c) 2004-2005 Anton Altaparmakov
* Copyright (c) 2004-2006 Szabolcs Szakacsits
* Copyright (c) 2005 Yura Pakhuchiy
* Copyright (c) 2009 Jean-Pierre Andre
* Copyright (c) 2009-2010 Jean-Pierre Andre
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@ -118,7 +118,7 @@ static void ntfs_new_node (struct COMPRESS_CONTEXT *pctx,
BOOL done;
const unsigned char *key;
int c;
unsigned int mxi;
unsigned long mxi;
unsigned int mxl;
mxl = (1 << (16 - pctx->nbt)) + 2;
@ -147,16 +147,21 @@ static void ntfs_new_node (struct COMPRESS_CONTEXT *pctx,
}
}
if (!done) {
register unsigned int i;
register unsigned long i;
register const unsigned char *p1,*p2;
i = 1;
p1 = key;
p2 = &pctx->inbuf[pp];
mxi = NTFS_SB_SIZE - r;
do {
} while ((p1[i] == p2[i]) && (++i < mxi));
less = (i < mxi) && (p1[i] < p2[i]);
if (mxi < 2)
less = FALSE;
else {
p1 = key;
p2 = &pctx->inbuf[pp];
/* this loop has a significant impact on performances */
do {
} while ((p1[i] == p2[i]) && (++i < mxi));
less = (i < mxi) && (p1[i] < p2[i]);
}
if (i >= THRESHOLD) {
if (i > pctx->match_length) {
pctx->match_position =
@ -174,7 +179,8 @@ static void ntfs_new_node (struct COMPRESS_CONTEXT *pctx,
pctx->rson[i] = r;
else
pctx->lson[i] = r;
pctx->dad[pp] = NIL; /* remove pp */
/* remove pp */
pctx->dad[pp] = NIL;
done = TRUE;
pctx->match_length = mxl;
}
@ -196,7 +202,8 @@ static void ntfs_new_node (struct COMPRESS_CONTEXT *pctx,
* or zero if there was a bug
*/
static unsigned int ntfs_nextmatch(struct COMPRESS_CONTEXT *pctx, unsigned int rr, int dd)
static unsigned int ntfs_nextmatch(struct COMPRESS_CONTEXT *pctx,
unsigned int rr, int dd)
{
unsigned int bestlen = 0;
@ -214,7 +221,8 @@ static unsigned int ntfs_nextmatch(struct COMPRESS_CONTEXT *pctx, unsigned int r
goto bug;
}
if (((rr + bestlen) < NTFS_SB_SIZE)) {
while ((unsigned int)(1 << pctx->nbt) <= (rr - 1))
while ((unsigned int)(1 << pctx->nbt)
<= (rr - 1))
pctx->nbt++;
ntfs_new_node(pctx,rr);
if (pctx->match_length > bestlen)
@ -247,7 +255,8 @@ bug :
* or zero if there was an error
*/
static unsigned int ntfs_compress_block(const char *inbuf, unsigned int size, char *outbuf)
static unsigned int ntfs_compress_block(const char *inbuf,
unsigned int size, char *outbuf)
{
struct COMPRESS_CONTEXT *pctx;
char *ptag;
@ -284,7 +293,8 @@ static unsigned int ntfs_compress_block(const char *inbuf, unsigned int size, ch
outbuf[xout++] = inbuf[rr];
ntag++;
} else {
while ((unsigned int)(1 << pctx->nbt) <= (rr - 1))
while ((unsigned int)(1 << pctx->nbt)
<= (rr - 1))
pctx->nbt++;
q = (pctx->match_position << (16 - pctx->nbt))
+ pctx->match_length - THRESHOLD;
@ -881,11 +891,11 @@ static u32 read_clusters(ntfs_volume *vol, const runlist_element *rl,
* Returns the amount of data written
*/
static int write_clusters(ntfs_volume *vol, const runlist_element *rl,
s64 offs, int to_write, const char *outbuf)
static s32 write_clusters(ntfs_volume *vol, const runlist_element *rl,
s64 offs, s32 to_write, const char *outbuf)
{
int count;
int put, xput;
s32 count;
s32 put, xput;
s64 xpos;
BOOL first;
const char *xoutbuf;
@ -926,17 +936,17 @@ static int write_clusters(ntfs_volume *vol, const runlist_element *rl,
* or -2 if there were an irrecoverable error (errno set)
*/
static int ntfs_comp_set(ntfs_attr *na, runlist_element *rl,
s64 offs, unsigned int insz, const char *inbuf)
static s32 ntfs_comp_set(ntfs_attr *na, runlist_element *rl,
s64 offs, u32 insz, const char *inbuf)
{
ntfs_volume *vol;
char *outbuf;
char *pbuf;
unsigned int compsz;
int written;
int rounded;
u32 compsz;
s32 written;
s32 rounded;
unsigned int clsz;
unsigned int p;
u32 p;
unsigned int sz;
unsigned int bsz;
BOOL fail;
@ -1002,7 +1012,10 @@ static int ntfs_comp_set(ntfs_attr *na, runlist_element *rl,
rounded = ((compsz - 1) | (clsz - 1)) + 1;
written = write_clusters(vol, rl, offs, rounded, outbuf);
if (written != rounded) {
// previously written text has been spoilt, should return a specific error
/*
* TODO : previously written text has been
* spoilt, should return a specific error
*/
ntfs_log_error("error writing compressed data\n");
errno = EIO;
written = -2;
@ -1016,25 +1029,295 @@ static int ntfs_comp_set(ntfs_attr *na, runlist_element *rl,
}
/*
* Free unneeded clusters after compression
* Check the validity of a compressed runlist
* The check starts at the beginning of current run and ends
* at the end of runlist
* errno is set if the runlist is not valid
*/
static BOOL valid_compressed_run(ntfs_attr *na, runlist_element *rl,
BOOL fullcheck, const char *text)
{
runlist_element *xrl;
const char *err;
BOOL ok = TRUE;
xrl = rl;
while (xrl->vcn & (na->compression_block_clusters - 1))
xrl--;
err = (const char*)NULL;
while (xrl->length) {
if ((xrl->vcn + xrl->length) != xrl[1].vcn)
err = "Runs not adjacent";
if (xrl->lcn == LCN_HOLE) {
if ((xrl->vcn + xrl->length)
& (na->compression_block_clusters - 1)) {
err = "Invalid hole";
}
if (fullcheck && (xrl[1].lcn == LCN_HOLE)) {
err = "Adjacent holes";
}
}
if (err) {
ntfs_log_error("%s at %s index %ld inode %lld\n",
err, text, (long)(xrl - na->rl),
(long long)na->ni->mft_no);
errno = EIO;
ok = FALSE;
err = (const char*)NULL;
}
xrl++;
}
return (ok);
}
/*
* Free unneeded clusters after overwriting compressed data
*
* This generally requires an empty slot at the end of runlist,
* This generally requires one or two empty slots at the end of runlist,
* but we do not want to reallocate the runlist here because
* there are many pointers to it.
* So the empty slot has to be reserved beforehand
* So the empty slots have to be reserved beforehand
*
* Returns zero unless some error occurred (described by errno)
*
* +======= start of block =====+
* 0 |A chunk may overflow | <-- rl usedcnt : A + B
* |A on previous block | then B
* |A |
* +-- end of allocated chunk --+ freelength : C
* |B | (incl overflow)
* +== end of compressed data ==+
* |C | <-- freerl freecnt : C + D
* |C chunk may overflow |
* |C on next block |
* +-- end of allocated chunk --+
* |D |
* |D chunk may overflow |
* 15 |D on next block |
* +======== end of block ======+
*
*/
static int ntfs_compress_overwr_free(ntfs_attr *na, runlist_element *rl,
s32 usedcnt, s32 freecnt, VCN *update_from)
{
BOOL beginhole;
BOOL mergeholes;
s32 oldlength;
s32 freelength;
s64 freelcn;
s64 freevcn;
runlist_element *freerl;
ntfs_volume *vol;
s32 carry;
int res;
vol = na->ni->vol;
res = 0;
freelcn = rl->lcn + usedcnt;
freevcn = rl->vcn + usedcnt;
freelength = rl->length - usedcnt;
beginhole = !usedcnt && !rl->vcn;
/* can merge with hole before ? */
mergeholes = !usedcnt
&& rl[0].vcn
&& (rl[-1].lcn == LCN_HOLE);
/* truncate current run, carry to subsequent hole */
carry = freelength;
oldlength = rl->length;
if (mergeholes) {
/* merging with a hole before */
freerl = rl;
} else {
rl->length -= freelength;
freerl = ++rl;
}
if (!mergeholes && (usedcnt || beginhole)) {
s32 freed;
runlist_element *frl;
runlist_element *erl;
int holes = 0;
BOOL threeparts;
/* free the unneeded clusters from initial run, then freerl */
freed = freelength;
threeparts = FALSE;
if (freed > freecnt) {
threeparts = TRUE;
freed = freecnt;
}
frl = freerl;
if (freelength) {
res = ntfs_cluster_free_basic(vol,freelcn,
(threeparts ? freecnt : freelength));
if (!usedcnt) {
holes++;
freerl--;
if (freerl->vcn < *update_from)
*update_from = freerl->vcn;
}
}
while (!res && frl->length && (freed < freecnt)) {
if (frl->length <= (freecnt - freed)) {
res = ntfs_cluster_free_basic(vol, frl->lcn,
frl->length);
if (!res) {
freed += frl->length;
frl->lcn = LCN_HOLE;
frl->length += carry;
carry = 0;
holes++;
}
} else {
res = ntfs_cluster_free_basic(vol, frl->lcn,
freecnt - freed);
if (!res) {
frl->lcn += freecnt - freed;
frl->vcn += freecnt - freed;
frl->length -= freecnt - freed;
freed = freecnt;
}
}
frl++;
}
switch (holes) {
case 0 :
/* there are no hole, must insert one */
/* space for hole has been prereserved */
if (freerl->lcn == LCN_HOLE) {
if (threeparts) {
erl = freerl;
while (erl->length)
erl++;
do {
erl[2] = *erl;
} while (erl-- != freerl);
freerl[1].length = freelength - freecnt;
freerl->length = freecnt;
freerl[1].lcn = freelcn + freecnt;
freerl[1].vcn = freevcn + freecnt;
freerl[2].lcn = LCN_HOLE;
freerl[2].vcn = freerl[1].vcn
+ freerl[1].length;
freerl->vcn = freevcn;
} else {
freerl->vcn = freevcn;
freerl->length += freelength;
}
} else {
erl = freerl;
while (erl->length)
erl++;
if (threeparts) {
do {
erl[2] = *erl;
} while (erl-- != freerl);
freerl[1].lcn = freelcn + freecnt;
freerl[1].vcn = freevcn + freecnt;
freerl[1].length = oldlength - usedcnt - freecnt;
} else {
do {
erl[1] = *erl;
} while (erl-- != freerl);
}
freerl->lcn = LCN_HOLE;
freerl->vcn = freevcn;
freerl->length = freecnt;
}
break;
case 1 :
/* there is a single hole, may have to merge */
freerl->vcn = freevcn;
if (freerl[1].lcn == LCN_HOLE) {
freerl->length += freerl[1].length;
erl = freerl;
do {
erl++;
*erl = erl[1];
} while (erl->length);
}
break;
default :
/* there were several holes, must merge them */
freerl->lcn = LCN_HOLE;
freerl->vcn = freevcn;
freerl->length = freecnt;
if (freerl[holes].lcn == LCN_HOLE) {
freerl->length += freerl[holes].length;
holes++;
}
erl = freerl;
do {
erl++;
*erl = erl[holes - 1];
} while (erl->length);
break;
}
} else {
s32 freed;
runlist_element *frl;
runlist_element *xrl;
freed = 0;
frl = freerl--;
if (freerl->vcn < *update_from)
*update_from = freerl->vcn;
while (!res && frl->length && (freed < freecnt)) {
if (frl->length <= (freecnt - freed)) {
freerl->length += frl->length;
freed += frl->length;
res = ntfs_cluster_free_basic(vol, frl->lcn,
frl->length);
frl++;
} else {
freerl->length += freecnt - freed;
res = ntfs_cluster_free_basic(vol, frl->lcn,
freecnt - freed);
frl->lcn += freecnt - freed;
frl->vcn += freecnt - freed;
frl->length -= freecnt - freed;
freed = freecnt;
}
}
/* remove unneded runlist entries */
xrl = freerl;
/* group with next run if also a hole */
if (frl->length && (frl->lcn == LCN_HOLE)) {
xrl->length += frl->length;
frl++;
}
while (frl->length) {
*++xrl = *frl++;
}
*++xrl = *frl; /* terminator */
}
return (res);
}
/*
* Free unneeded clusters after compression
*
* This generally requires one or two empty slots at the end of runlist,
* but we do not want to reallocate the runlist here because
* there are many pointers to it.
* So the empty slots have to be reserved beforehand
*
* Returns zero unless some error occurred (described by errno)
*/
static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl,
s64 used, s64 reserved)
s64 used, s64 reserved, BOOL appending,
VCN *update_from)
{
int freecnt;
int usedcnt;
s32 freecnt;
s32 usedcnt;
int res;
s64 freelcn;
s64 freevcn;
int freelength;
s32 freelength;
BOOL mergeholes;
BOOL beginhole;
ntfs_volume *vol;
@ -1044,9 +1327,11 @@ static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl,
vol = na->ni->vol;
freecnt = (reserved - used) >> vol->cluster_size_bits;
usedcnt = (reserved >> vol->cluster_size_bits) - freecnt;
if (rl->vcn < *update_from)
*update_from = rl->vcn;
/* skip entries fully used, if any */
while (rl->length && (rl->length < usedcnt)) {
usedcnt -= rl->length;
usedcnt -= rl->length; /* must be > 0 */
rl++;
}
if (rl->length) {
@ -1056,62 +1341,81 @@ static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl,
* The required entry has been prereserved when
* mapping the runlist.
*/
/* get the free part in initial run */
freelcn = rl->lcn + usedcnt;
freevcn = rl->vcn + usedcnt;
freelength = rl->length - usedcnt;
/* new count of allocated clusters */
rl->length = usedcnt; /* warning : can be zero */
if (!((freevcn + freecnt)
& (na->compression_block_clusters - 1))) {
beginhole = !usedcnt && !rl->vcn;
mergeholes = !usedcnt
&& rl[0].vcn
&& (rl[-1].lcn == LCN_HOLE);
if (mergeholes) {
freerl = rl;
freerl->length = freecnt;
} else
freerl = ++rl;
if ((freelength > 0)
&& !mergeholes
&& (usedcnt || beginhole)) {
if (!appending)
res = ntfs_compress_overwr_free(na,rl,
usedcnt,freecnt,update_from);
else {
freelength = rl->length - usedcnt;
rl->length = usedcnt; /* warning : can be zero */
beginhole = !usedcnt && !rl->vcn;
mergeholes = !usedcnt
&& rl[0].vcn
&& (rl[-1].lcn == LCN_HOLE);
if (mergeholes) {
freerl = rl;
freerl->length = freecnt;
} else
freerl = ++rl;
if ((freelength > 0)
&& !mergeholes
&& (usedcnt || beginhole)) {
/*
* move the unused part to the end. Doing so,
* the vcn will be out of order. This does
* not harm, the vcn are meaningless now, and
* only the lcn are meaningful for freeing.
*/
/* locate current end */
while (rl->length)
rl++;
/* new terminator relocated */
rl[1].vcn = rl->vcn;
rl[1].lcn = LCN_ENOENT;
rl[1].length = 0;
/* hole, currently allocated */
rl->vcn = freevcn;
rl->lcn = freelcn;
rl->length = freelength;
}
/* free the hole */
res = ntfs_cluster_free_from_rl(vol,freerl);
if (!res) {
if (mergeholes) {
/* merge with adjacent hole */
freerl--;
freerl->length += freecnt;
/* locate current end */
while (rl->length)
rl++;
/* new terminator relocated */
rl[1].vcn = rl->vcn;
rl[1].lcn = LCN_ENOENT;
rl[1].length = 0;
/* hole, currently allocated */
rl->vcn = freevcn;
rl->lcn = freelcn;
rl->length = freelength;
} else {
if (beginhole)
/* why is this different from the begin hole case ? */
if ((freelength > 0)
&& !mergeholes
&& !usedcnt) {
freerl--;
/* mark hole as free */
freerl->lcn = LCN_HOLE;
freerl->vcn = freevcn;
freerl->length = freecnt;
freerl->length = freelength;
if (freerl->vcn < *update_from)
*update_from
= freerl->vcn;
}
}
/* free the hole */
res = ntfs_cluster_free_from_rl(vol,freerl);
if (!res) {
if (mergeholes) {
/* merge with adjacent hole */
freerl--;
freerl->length += freecnt;
} else {
if (beginhole)
freerl--;
/* mark hole as free */
freerl->lcn = LCN_HOLE;
freerl->vcn = freevcn;
freerl->length = freecnt;
}
if (freerl->vcn < *update_from)
*update_from = freerl->vcn;
/* and set up the new end */
freerl[1].lcn = LCN_ENOENT;
freerl[1].vcn = freevcn + freecnt;
freerl[1].length = 0;
}
/* and set up the new end */
freerl[1].lcn = LCN_ENOENT;
freerl[1].vcn = freevcn + freecnt;
freerl[1].length = 0;
}
} else {
ntfs_log_error("Bad end of a compression block set\n");
@ -1130,7 +1434,7 @@ static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl,
*/
static int ntfs_read_append(ntfs_attr *na, const runlist_element *rl,
s64 offs, u32 compsz, int pos,
s64 offs, u32 compsz, s32 pos, BOOL appending,
char *outbuf, s64 to_write, const void *b)
{
int fail = 1;
@ -1147,7 +1451,10 @@ static int ntfs_read_append(ntfs_attr *na, const runlist_element *rl,
compbuf = (char*)ntfs_malloc(compsz);
if (compbuf) {
/* must align to full block for decompression */
decompsz = ((pos - 1) | (NTFS_SB_SIZE - 1)) + 1;
if (appending)
decompsz = ((pos - 1) | (NTFS_SB_SIZE - 1)) + 1;
else
decompsz = na->compression_block_size;
got = read_clusters(na->ni->vol, rl, offs,
compsz, compbuf);
if ((got == compsz)
@ -1171,7 +1478,8 @@ static int ntfs_read_append(ntfs_attr *na, const runlist_element *rl,
*/
static int ntfs_flush(ntfs_attr *na, runlist_element *rl, s64 offs,
const char *outbuf, int count, BOOL compress)
const char *outbuf, s32 count, BOOL compress,
BOOL appending, VCN *update_from)
{
int rounded;
int written;
@ -1183,7 +1491,8 @@ static int ntfs_flush(ntfs_attr *na, runlist_element *rl, s64 offs,
compress = FALSE;
if ((written >= 0)
&& ntfs_compress_free(na,rl,offs + written,
offs + na->compression_block_size))
offs + na->compression_block_size, appending,
update_from))
written = -1;
} else
written = 0;
@ -1212,24 +1521,47 @@ static int ntfs_flush(ntfs_attr *na, runlist_element *rl, s64 offs,
s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
s64 offs, s64 to_write, s64 rounded,
const void *b, int compressed_part)
const void *b, int compressed_part,
VCN *update_from)
{
ntfs_volume *vol;
runlist_element *brl; /* entry containing the beginning of block */
int compression_length;
s64 written;
s64 to_read;
s64 to_flush;
s64 roffs;
s64 got;
s64 start_vcn;
s64 nextblock;
s64 endwrite;
u32 compsz;
char *inbuf;
char *outbuf;
BOOL fail;
BOOL done;
BOOL compress;
BOOL appending;
if (!valid_compressed_run(na,wrl,FALSE,"begin compressed write")) {
return (0);
}
if ((*update_from < 0)
|| (compressed_part < 0)
|| (compressed_part > (int)na->compression_block_size)) {
ntfs_log_error("Bad update vcn or compressed_part %d for compressed write\n",
compressed_part);
errno = EIO;
return (0);
}
/* make sure there are two unused entries in runlist */
if (na->unused_runs < 2) {
ntfs_log_error("No unused runs for compressed write\n");
errno = EIO;
return (0);
}
if (wrl->vcn < *update_from)
*update_from = wrl->vcn;
written = 0; /* default return */
vol = na->ni->vol;
compression_length = na->compression_block_clusters;
@ -1244,8 +1576,10 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
*/
nextblock = ((offs + (wrl->vcn << vol->cluster_size_bits))
| (na->compression_block_size - 1)) + 1;
if ((offs + to_write + (wrl->vcn << vol->cluster_size_bits))
>= nextblock) {
/* determine whether we are appending to file */
endwrite = offs + to_write + (wrl->vcn << vol->cluster_size_bits);
appending = endwrite >= na->initialized_size;
if (endwrite >= nextblock) {
/* it is time to compress */
compress = TRUE;
/* only process what we can */
@ -1266,6 +1600,8 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
/* find the beginning of block */
start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits))
& -compression_length;
if (start_vcn < *update_from)
*update_from = start_vcn;
while (brl->vcn && (brl->vcn > start_vcn)) {
/* jumping back a hole means big trouble */
if (brl->lcn == (LCN)LCN_HOLE) {
@ -1285,14 +1621,24 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
* Decompress the data and append
*/
compsz = compressed_part << vol->cluster_size_bits;
// improve the needed size
outbuf = (char*)ntfs_malloc(na->compression_block_size);
if (outbuf) {
to_read = offs - roffs;
if (appending) {
to_read = offs - roffs;
to_flush = to_read + to_write;
} else {
to_read = na->data_size
- (brl->vcn << vol->cluster_size_bits);
if (to_read > na->compression_block_size)
to_read = na->compression_block_size;
to_flush = to_read;
}
if (!ntfs_read_append(na, brl, roffs, compsz,
to_read, outbuf, to_write, b)) {
(s32)(offs - roffs), appending,
outbuf, to_write, b)) {
written = ntfs_flush(na, brl, roffs,
outbuf, to_read + to_write, compress);
outbuf, to_flush, compress, appending,
update_from);
if (written >= 0) {
written = to_write;
done = TRUE;
@ -1303,9 +1649,9 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
} else {
if (compress && !fail) {
/*
* we are filling up a block, read the full set of blocks
* and compress it
*/
* we are filling up a block, read the full set
* of blocks and compress it
*/
inbuf = (char*)ntfs_malloc(na->compression_block_size);
if (inbuf) {
to_read = offs - roffs;
@ -1327,7 +1673,8 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
&& !ntfs_compress_free(na,brl,
written + roffs,
na->compression_block_size
+ roffs)) {
+ roffs,
appending, update_from)) {
done = TRUE;
written = to_write;
}
@ -1355,18 +1702,22 @@ s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos,
}
}
}
if (written
&& !valid_compressed_run(na,wrl,TRUE,"end compressed write"))
written = 0;
return (written);
}
/*
* Close a file written compressed.
* This compresses the last partial compression block of the file.
* An empty runlist slot has to be reserved beforehand.
* Two empty runlist slots have to be reserved beforehand.
*
* Returns zero if closing is successful.
*/
int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs,
VCN *update_from)
{
ntfs_volume *vol;
runlist_element *brl; /* entry containing the beginning of block */
@ -1380,6 +1731,18 @@ int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
BOOL fail;
BOOL done;
if (na->unused_runs < 2) {
ntfs_log_error("No unused runs for compressed close\n");
errno = EIO;
return (-1);
}
if (*update_from < 0) {
ntfs_log_error("Bad update vcn for compressed close\n");
errno = EIO;
return (-1);
}
if (wrl->vcn < *update_from)
*update_from = wrl->vcn;
vol = na->ni->vol;
compression_length = na->compression_block_clusters;
done = FALSE;
@ -1391,7 +1754,10 @@ int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
if (inbuf) {
start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits))
& -compression_length;
to_read = offs + ((wrl->vcn - start_vcn) << vol->cluster_size_bits);
if (start_vcn < *update_from)
*update_from = start_vcn;
to_read = offs + ((wrl->vcn - start_vcn)
<< vol->cluster_size_bits);
brl = wrl;
fail = FALSE;
while (brl->vcn && (brl->vcn > start_vcn)) {
@ -1404,7 +1770,8 @@ int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
}
if (!fail) {
/* roffs can be an offset from another uncomp block */
roffs = (start_vcn - brl->vcn) << vol->cluster_size_bits;
roffs = (start_vcn - brl->vcn)
<< vol->cluster_size_bits;
if (to_read) {
got = read_clusters(vol, brl, roffs, to_read,
inbuf);
@ -1415,7 +1782,8 @@ int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
/* free the unused clusters */
&& !ntfs_compress_free(na,brl,
written + roffs,
na->compression_block_size + roffs)) {
na->compression_block_size + roffs,
TRUE, update_from)) {
done = TRUE;
} else
/* if compression failed, leave uncompressed */
@ -1427,5 +1795,7 @@ int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs)
free(inbuf);
}
}
if (done && !valid_compressed_run(na,wrl,TRUE,"end compressed close"))
done = FALSE;
return (!done);
}

View File

@ -609,6 +609,42 @@ out:
return ret;
}
/*
* Basic cluster run free
* Returns 0 if successful
*/
int ntfs_cluster_free_basic(ntfs_volume *vol, s64 lcn, s64 count)
{
s64 nr_freed = 0;
int ret = -1;
ntfs_log_trace("Entering.\n");
ntfs_log_trace("Dealloc lcn 0x%llx, len 0x%llx.\n",
(long long)lcn, (long long)count);
if (lcn >= 0) {
update_full_status(vol,lcn);
if (ntfs_bitmap_clear_run(vol->lcnbmp_na, lcn,
count)) {
ntfs_log_perror("Cluster deallocation failed "
"(%lld, %lld)",
(long long)lcn,
(long long)count);
goto out;
}
nr_freed += count;
}
ret = 0;
out:
vol->free_clusters += nr_freed;
if (vol->free_clusters > vol->nr_clusters)
ntfs_log_error("Too many free clusters (%lld > %lld)!",
(long long)vol->free_clusters,
(long long)vol->nr_clusters);
return ret;
}
/**
* ntfs_cluster_free - free clusters on an ntfs volume
* @vol: mounted ntfs volume on which to free the clusters

View File

@ -117,18 +117,33 @@ static runlist_element *ntfs_rl_realloc(runlist_element *rl, int old_size,
*
* Returns the reallocated runlist
* or NULL if reallocation was not possible (with errno set)
* the runlist is left unchanged if the reallocation fails
*/
runlist_element *ntfs_rl_extend(runlist_element *rl, int more_entries)
runlist_element *ntfs_rl_extend(ntfs_attr *na, runlist_element *rl,
int more_entries)
{
runlist_element *newrl;
int last;
int irl;
last = 0;
while (rl[last].length)
last++;
rl = ntfs_rl_realloc(rl,last+1,last+more_entries+1);
if (!rl)
errno = ENOMEM;
if (na->rl && rl) {
irl = (int)(rl - na->rl);
last = irl;
while (na->rl[last].length)
last++;
newrl = ntfs_rl_realloc(na->rl,last+1,last+more_entries+1);
if (!newrl) {
errno = ENOMEM;
rl = (runlist_element*)NULL;
} else
na->rl = newrl;
rl = &newrl[irl];
} else {
ntfs_log_error("Cannot extend unmapped runlist");
errno = EIO;
rl = (runlist_element*)NULL;
}
return (rl);
}