Quote: "Oh sorry guys I should have made it clear that I don't need the calculation itself but the algorithm to find the neighbor vertices.
0x5f3759df is very interesting through."
I use this algorithm to find the vertices that are within a specified distance of each other and weld them.
It is very fast and robust, except it is in C++ and I am not willing to convert it to tier1.
If you look it over you may glean something from it. If you have any questions on how to convert I will do my best to answer them.
P.S. to use this in C++ just paste into a file called weld.h and include in your code.
#pragma once
#include <vector> // vector<T>
#include <functional> // equal_to<T>
namespace std {
struct hash_Vector3d
{
size_t operator()(csg_vertex v)
{
unsigned int f = (unsigned int)( v.pos.x + v.pos.y * 11 - ( v.pos.z * 17 ) )&0x7fffffff; // avoid problems with negative hash value
return (f>>22)^(f>>12)^(f);
}
};
// functor for within tolerance
template<class _Ty>
struct within_tolerance : public binary_function<_Ty, _Ty, bool>
{
bool operator()( const _Ty& v1, const _Ty& v2)const
{
btVector3 btv1( v1.pos.x, v1.pos.y, v1.pos.z );
btVector3 btv2( v2.pos.x, v2.pos.y, v2.pos.z );
float distance = btv1.distance( btv2);
if (distance <= 0.001f){
//agk::Message( agk::Str( distance ) );
return true;
}
else{
return false;
}
}
};
}
inline size_t NextPowerOfTwo(size_t x)
{
size_t p = 1;
while( x > p ) {
p += p;
}
return p;
}
/** Generic welding routine. This function welds the elements of the vector p
* and returns the cross references in the xrefs array. To compare the elements
* it uses the standard hash and key_equal functors.
*
* This code is based on the ideas of Ville Miettinen and Pierre Terdiman.
*/
template <class T, class HashFunction, class BinaryPredicate, class BinaryPredicate2>
size_t weld( std::vector<T> & p, std::vector<size_t> & xrefs, HashFunction hash, BinaryPredicate equal, BinaryPredicate2 withinTolerance )
{
size_t const NIL = size_t(~0);// linked list terminator symbol.
size_t const origVertsCount = p.size();// # of input vertices.
size_t outputCount = 0;// # of output vertices
size_t hashSize = NextPowerOfTwo(origVertsCount);// size of the hash table
size_t * const hashTable = new size_t[hashSize + origVertsCount];// hash table + linked list
size_t * const next = hashTable + hashSize;// use bottom part as linked list
memset( hashTable, NIL, hashSize*sizeof(size_t) );// init hash table (NIL = 0xFFFFFFFF so memset works)
// xrefs and p have the same size.
xrefs.resize(origVertsCount);
for (size_t i = 0; i < origVertsCount; ++i)
{
const T & e = p[i];
size_t hashValue = hash(e) & (hashSize-1);
size_t offset = hashTable[hashValue];
// traverse linked list
while( offset != NIL && !equal(p[offset], e) && !withinTolerance( p[offset], e ) )
{
offset = next[offset];
}
xrefs[i] = offset;
// no match found - copy vertex & add to hash
if( offset == NIL ) {
// save xref
xrefs[i] = outputCount;
// copy vertex
p[outputCount] = e;
// link to hash table
next[outputCount] = hashTable[hashValue];
// update hash heads and increase output counter
hashTable[hashValue] = outputCount++;
}
}
// cleanup
delete [] hashTable;
// drop duplicates.
p.resize(outputCount);
// number of output vertices
return outputCount;
}
The coffee is lovely dark and deep,and I have code to write before I sleep.