Commit d4bc30ca authored by Christian Mohrbacher's avatar Christian Mohrbacher

updated to release 6.17

parent 6b8e7b3a
......@@ -181,7 +181,7 @@ assemble_numeric_version_code()
# build/packaging scripts
define_debian_version()
{
DEBIAN_VERSION=`lsb_release -r -s | sed -e 's/\..*$//'`
DEBIAN_VERSION=`lsb_release -r -s 2>/dev/null | sed -e 's/\..*$//'`
}
#
......
......@@ -57,6 +57,16 @@ void want_fn(void) {
EOF
}
_check_symbol() {
local name=$1
shift
_generate_includes "$@"
cat <<EOF
__typeof__($name) predicate = $name;
EOF
}
check_function() {
local name=$1
local signature=$2
......@@ -74,6 +84,14 @@ check_header() {
_generate_includes "$header" | _marker_if_compiles "$marker"
}
check_symbol() {
local name=$1
local marker=$2
shift 2
_check_symbol "$name" "$@" | _marker_if_compiles "$marker"
}
check_struct_field \
address_space_operations::launder_page \
KERNEL_HAS_LAUNDER_PAGE \
......@@ -124,6 +142,14 @@ check_function \
KERNEL_HAS_HAVE_SUBMOUNTS \
linux/dcache.h
# kernel 4.9 and newer have the iov_iter flavor ITER_PIPE which we currently cannot handle in our
# buffered read_iter/write_iter functions. until we can handle it those kernels must have their
# buffered read_iter/write_iter disabled, otherwise the kernel may crash.
check_symbol \
ITER_PIPE \
KERNEL_HAS_ITER_PIPE \
linux/uio.h
# we have to communicate with the calling makefile somehow. since we can't really use the return
# code of this script, we'll echo a special string at the end of our output for the caller to
# detect and remove again.
......
......@@ -359,15 +359,16 @@ FhgfsOpsErr __MessagingTk_requestResponseNodeRetry(App* app, RequestResponseNode
// check target state
if (rrNode->peer.isMirrorGroup && rrNode->targetStates)
if (rrNode->targetStates)
{
CombinedTargetState state;
bool getStateRes = TargetStateStore_getState(rrNode->targetStates, nodeID.value,
&state);
if(unlikely( !getStateRes ||
(state.reachabilityState != TargetReachabilityState_ONLINE) ||
(state.consistencyState != TargetConsistencyState_GOOD) ) )
if (!getStateRes ||
state.reachabilityState != TargetReachabilityState_ONLINE ||
(rrNode->peer.isMirrorGroup &&
state.consistencyState != TargetConsistencyState_GOOD))
{
if(state.reachabilityState == TargetReachabilityState_OFFLINE)
{ // no need to wait for offline servers
......
......@@ -1148,7 +1148,11 @@ static ssize_t FhgfsOps_buffered_read_iter(struct kiocb *iocb, struct iov_iter *
FhgfsOpsHelper_logOpDebug(app, file_dentry(iocb->ki_filp), iocb->ki_filp->f_mapping->host,
__func__, "(offset: %lld; nr_segs: %lu)", (long long)iocb->ki_pos, to->nr_segs);
#ifdef KERNEL_HAS_ITER_PIPE
if (!(to->type & (ITER_BVEC | ITER_PIPE)))
#else
if (!(to->type & ITER_BVEC))
#endif
{
struct iovec iov;
struct iov_iter iter = *to;
......@@ -1191,7 +1195,7 @@ static ssize_t FhgfsOps_buffered_read_iter(struct kiocb *iocb, struct iov_iter *
if (!buffer)
return -ENOMEM;
kaddr = kmap_atomic(buffer);
kaddr = kmap(buffer);
{
ssize_t readRes;
size_t copyRes;
......@@ -1208,14 +1212,22 @@ static ssize_t FhgfsOps_buffered_read_iter(struct kiocb *iocb, struct iov_iter *
if (readRes <= 0)
break;
#ifdef KERNEL_HAS_ITER_PIPE
// do not use copy_page_to_iter with pipe targets since that would not actually *copy*
// the page but *link* it instead. our subsequent uses of the page would clobber it
// badly, and us freeing it while the pipe still has a reference would also not be
// very good.
copyRes = copy_to_iter(kaddr, readRes, to);
#else
copyRes = copy_page_to_iter(buffer, 0, readRes, to);
#endif
if (copyRes < readRes)
readRes = copyRes;
totalReadRes += readRes;
}
}
kunmap_atomic(kaddr);
kunmap(buffer);
__free_page(buffer);
}
......@@ -1545,7 +1557,7 @@ static ssize_t FhgfsOps_buffered_write_iter(struct kiocb *iocb, struct iov_iter
if (!buffer)
return -ENOMEM;
kaddr = kmap_atomic(buffer);
kaddr = kmap(buffer);
{
ssize_t writeRes;
size_t copyRes;
......@@ -1569,7 +1581,7 @@ static ssize_t FhgfsOps_buffered_write_iter(struct kiocb *iocb, struct iov_iter
break;
}
}
kunmap_atomic(kaddr);
kunmap(buffer);
__free_page(buffer);
}
......
......@@ -529,7 +529,7 @@ int FhgfsOps_set_acl(struct inode* inode, struct posix_acl* acl, int type)
FhgfsOpsErr remotingRes;
char* xAttrName;
int xAttrBufLen;
void* xAttrBuf;
void* xAttrBuf = NULL;
FhgfsInode* fhgfsInode = BEEGFS_INODE(inode);
const EntryInfo* entryInfo = FhgfsInode_getEntryInfo(fhgfsInode);
......@@ -545,30 +545,44 @@ int FhgfsOps_set_acl(struct inode* inode, struct posix_acl* acl, int type)
else
return -EOPNOTSUPP;
// prepare extended attribute - determine size needed for buffer.
xAttrBufLen = os_posix_acl_to_xattr(acl, NULL, 0);
if (acl)
{
// prepare extended attribute - determine size needed for buffer.
xAttrBufLen = os_posix_acl_to_xattr(acl, NULL, 0);
if (xAttrBufLen < 0)
return xAttrBufLen;
if (xAttrBufLen < 0)
return xAttrBufLen;
xAttrBuf = os_kmalloc(xAttrBufLen);
if (!xAttrBuf)
return -ENOMEM;
xAttrBuf = os_kmalloc(xAttrBufLen);
if (!xAttrBuf)
return -ENOMEM;
res = os_posix_acl_to_xattr(acl, xAttrBuf, xAttrBufLen);
if (res != xAttrBufLen)
goto cleanup;
res = os_posix_acl_to_xattr(acl, xAttrBuf, xAttrBufLen);
if (res != xAttrBufLen)
goto cleanup;
FhgfsInode_entryInfoReadLock(fhgfsInode);
FhgfsInode_entryInfoReadLock(fhgfsInode);
remotingRes = FhgfsOpsRemoting_setXAttr(app, entryInfo, xAttrName, xAttrBuf, xAttrBufLen, 0);
FhgfsInode_entryInfoReadUnlock(fhgfsInode);
}
else
{
FhgfsInode_entryInfoReadLock(fhgfsInode);
remotingRes = FhgfsOpsRemoting_removeXAttr(app, entryInfo, xAttrName);
if (remotingRes == FhgfsOpsErr_NODATA)
remotingRes = FhgfsOpsErr_SUCCESS;
FhgfsInode_entryInfoReadUnlock(fhgfsInode);
}
remotingRes = FhgfsOpsRemoting_setXAttr(app, entryInfo, xAttrName, xAttrBuf, xAttrBufLen, 0);
if (remotingRes != FhgfsOpsErr_SUCCESS)
res = FhgfsOpsErr_toSysErr(remotingRes);
else
res = 0;
FhgfsInode_entryInfoReadUnlock(fhgfsInode);
cleanup:
kfree(xAttrBuf);
......
......@@ -45,6 +45,12 @@ FhgfsOpsErr MirrorBuddyGroupMapper::mapMirrorBuddyGroup(uint16_t buddyGroupID,
return FhgfsOpsErr_EXISTS;
}
// dont allow the same ID for primary and secondary
if (primaryTargetID == secondaryTargetID)
{
return FhgfsOpsErr_INVAL;
}
if (targetMapper)
{
if (! this->targetMapper->targetExists(primaryTargetID) )
......
......@@ -271,16 +271,18 @@ void NodeCapacityPools::chooseStorageTargets(unsigned numTargets, unsigned minNu
// in a pool. our strategy here is to automatically allow non-preferred targets before
// touching the emergency pool.
std::set<uint16_t> chosenTargets;
// try normal and low pool with preferred targets...
chooseStorageNodesWithPref(pools[CapacityPool_NORMAL],
numTargets, preferredTargets, false, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_NORMAL], numTargets, preferredTargets, false,
outTargets, chosenTargets);
if(outTargets->size() >= minNumRequiredTargets)
goto unlock_and_exit;
chooseStorageNodesWithPref(pools[CapacityPool_LOW],
numTargets - outTargets->size(), preferredTargets, false, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_LOW], numTargets - outTargets->size(),
preferredTargets, false, outTargets, chosenTargets);
if(!outTargets->empty() )
goto unlock_and_exit;
......@@ -291,14 +293,14 @@ void NodeCapacityPools::chooseStorageTargets(unsigned numTargets, unsigned minNu
// no targets yet - allow non-preferred targets before using emergency pool...
chooseStorageNodesWithPref(pools[CapacityPool_NORMAL],
numTargets, preferredTargets, true, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_NORMAL], numTargets, preferredTargets, true,
outTargets, chosenTargets);
if(outTargets->size() >= minNumRequiredTargets)
goto unlock_and_exit;
chooseStorageNodesWithPref(pools[CapacityPool_LOW],
numTargets - outTargets->size(), preferredTargets, true, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_LOW], numTargets - outTargets->size(),
preferredTargets, true, outTargets, chosenTargets);
if(!outTargets->empty() )
goto unlock_and_exit;
......@@ -306,13 +308,13 @@ void NodeCapacityPools::chooseStorageTargets(unsigned numTargets, unsigned minNu
/* still no targets available => we have to try the emergency pool (first with preference,
then without preference) */
chooseStorageNodesWithPref(pools[CapacityPool_EMERGENCY],
numTargets, preferredTargets, false, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_EMERGENCY], numTargets, preferredTargets, false,
outTargets, chosenTargets);
if(!outTargets->empty() )
goto unlock_and_exit;
chooseStorageNodesWithPref(pools[CapacityPool_EMERGENCY],
numTargets, preferredTargets, true, outTargets);
chooseStorageNodesWithPref(pools[CapacityPool_EMERGENCY], numTargets, preferredTargets, true,
outTargets, chosenTargets);
}
......@@ -457,7 +459,7 @@ void NodeCapacityPools::chooseStorageNodesNoPrefRoundRobin(UInt16Set& activeTarg
*/
void NodeCapacityPools::chooseStorageNodesWithPref(UInt16Set& activeTargets,
unsigned numTargets, const UInt16List* preferredTargets, bool allowNonPreferredTargets,
UInt16Vector* outTargets)
UInt16Vector* outTargets, std::set<uint16_t>& chosenTargets)
{
// note: we use the name "activeTargets" for the pool here to keep the code very similar to the
// nodes chooser in the NodeStore class
......@@ -476,7 +478,6 @@ void NodeCapacityPools::chooseStorageNodesWithPref(UInt16Set& activeTargets,
// note: we use a separate set for the outTargets here to quickly find out (in stage 2) whether
// we already added a certain node from the preferred targets (in stage 1)
UInt16Set outTargetsSet; // (see note above)
UInt16ListConstIter preferredIter;
UInt16SetIter activeTargetsIter; // (will be re-used in stage 2)
......@@ -492,11 +493,9 @@ void NodeCapacityPools::chooseStorageNodesWithPref(UInt16Set& activeTargets,
{
activeTargetsIter = activeTargets.find(*preferredIter);
if(activeTargetsIter != activeTargets.end() )
{ // this preferred node is active => add to outTargets and to outTargetsSet
if(activeTargetsIter != activeTargets.end() && chosenTargets.insert(*preferredIter).second)
{ // this preferred node is active => add to outTargets
outTargets->push_back(*preferredIter);
outTargetsSet.insert(*preferredIter);
numTargets--;
}
......@@ -520,12 +519,9 @@ void NodeCapacityPools::chooseStorageNodesWithPref(UInt16Set& activeTargets,
// while we haven't found the number of requested targets
while(numTargets)
{
outTargetsSetIter = outTargetsSet.find(*activeTargetsIter);
if(outTargetsSetIter == outTargetsSet.end() )
if(chosenTargets.insert(*activeTargetsIter).second)
{
outTargets->push_back(*activeTargetsIter);
outTargetsSet.insert(*activeTargetsIter);
numTargets--;
}
......
......@@ -58,8 +58,8 @@ class NodeCapacityPools
void chooseStorageNodesNoPrefRoundRobin(UInt16Set& activeTargets, unsigned numTargets,
UInt16Vector* outTargets);
void chooseStorageNodesWithPref(UInt16Set& activeTargets, unsigned numTargets,
const UInt16List* preferredTargets, bool allowNonPreferredTargets, UInt16Vector* outTargets);
const UInt16List* preferredTargets, bool allowNonPreferredTargets,
UInt16Vector* outTargets, std::set<uint16_t>& chosenTargets);
public:
// getters & setters
......
......@@ -16,20 +16,20 @@ class NodeStoreClients : public AbstractNodeStore
public:
NodeStoreClients(bool channelsDirectDefault);
virtual bool addOrUpdateNode(NodeHandle node);
virtual bool addOrUpdateNodeEx(NodeHandle node, NumNodeID* outNodeNumID);
virtual bool addOrUpdateNode(NodeHandle node) override;
virtual bool addOrUpdateNodeEx(NodeHandle node, NumNodeID* outNodeNumID) override;
bool updateLastHeartbeatT(NumNodeID numNodeID);
virtual bool deleteNode(NumNodeID numNodeID);
NodeHandle referenceNode(NumNodeID numNodeID);
bool isNodeActive(NumNodeID numNodeID);
virtual size_t getSize();
virtual size_t getSize() override;
virtual NodeHandle referenceFirstNode();
virtual NodeHandle referenceNextNode(const NodeHandle& oldNode);
virtual NodeHandle referenceFirstNode() override;
virtual NodeHandle referenceNextNode(const NodeHandle& oldNode) override;
virtual std::vector<NodeHandle> referenceAllNodes();
virtual std::vector<NodeHandle> referenceAllNodes() override;
void syncNodes(const std::vector<NodeHandle>& masterList, NumNodeIDList* outAddedIDs,
NumNodeIDList* outRemovedIDs, bool updateExisting, Node* appLocalNode=NULL);
......
......@@ -190,6 +190,13 @@ bool ModeCreateFile::initFileSettings(FileSettings* settings)
std::string currentTrimmedTarget = StringTk::trim(*targetsIter);
uint16_t currentTargetID = StringTk::strToUInt(currentTrimmedTarget);
if (std::find(settings->preferredTargets->begin(), settings->preferredTargets->end(),
currentTargetID) != settings->preferredTargets->end())
{
std::cerr << "Each storage target may be used only once in a stripe pattern."
<< std::endl;
return false;
}
settings->preferredTargets->push_back(currentTargetID);
}
}
......
......@@ -164,6 +164,12 @@ int ModeAddMirrorBuddyGroup::execute()
return APPCODE_INVALID_CONFIG;
}
if (cfgPrimaryTargetID == cfgSecondaryTargetID)
{
std::cerr << "Primary and secondary target must be different." << std::endl;
return APPCODE_INVALID_CONFIG;
}
if (cfg->count(MODEADDMIRRORBUDDYGROUP_ARG_GROUPID) > 0 &&
cfg->count(MODEADDMIRRORBUDDYGROUP_ARG_MGROUPID) > 0)
{
......
......@@ -7,7 +7,6 @@
LIB = mongoose
CC = gcc
CFLAGS = -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 \
-pthread -rdynamic -Wall -fmessage-length=0 -fno-strict-aliasing \
-Wunused-variable -Wextra -Wno-unused-parameter -ggdb3
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment