Commit 6815bab5 authored by Thomas White's avatar Thomas White
Browse files

Progress with post refinement...

parent 074034e2
......@@ -292,6 +292,34 @@ double integrate_all(struct image *image, RefList *reflections)
}
/* Decide which reflections can be scaled */
static int select_scalable_reflections(RefList *list)
{
int n_scalable = 0;
Reflection *refl;
RefListIterator *iter;
for ( refl = first_refl(list, &iter);
refl != NULL;
refl = next_refl(refl, iter) ) {
int scalable = 1;
double v;
if ( get_partiality(refl) < 0.1 ) scalable = 0;
v = fabs(get_intensity(refl));
if ( v < 0.1 ) scalable = 0;
set_scalable(refl, scalable);
if ( scalable ) n_scalable++;
}
return n_scalable;
}
/* Calculate partialities and apply them to the image's raw_reflections */
void update_partialities(struct image *image, const char *sym,
int *n_expected, int *n_found, int *n_notfound)
......@@ -355,6 +383,7 @@ void update_partialities_and_asymm(struct image *image, const char *sym,
* to get the list used for scaling and post refinement */
image->reflections = asymmetric_indices(image->raw_reflections,
sym, obs);
select_scalable_reflections(image->reflections);
/* Need these lists to work fast */
optimise_reflist(image->reflections);
......
......@@ -338,12 +338,11 @@ static RefList *lsq_intensities(struct image *images, int n,
for ( refl = find_refl(images[m].reflections,
it->h, it->k, it->l);
refl != NULL;
refl = next_found_refl(refl) ) {
refl = next_found_refl(refl) )
{
double p;
if ( !get_scalable(refl) ) continue;
p = get_partiality(refl);
num += get_intensity(refl) * p * G;
......@@ -353,8 +352,14 @@ static RefList *lsq_intensities(struct image *images, int n,
}
new = add_refl(full, it->h, it->k, it->l);
set_int(new, num/den);
if ( !isnan(num/den) ) {
new = add_refl(full, it->h, it->k, it->l);
set_int(new, num/den);
} else {
ERROR("Couldn't calculate LSQ full intensity for"
"%3i %3i %3i\n", it->h, it->k, it->l);
/* Doom is probably impending */
}
}
......
......@@ -39,10 +39,11 @@ static void mess_up_cell(UnitCell *cell)
double cx, cy, cz;
cell_get_cartesian(cell, &ax, &ay, &az, &bx, &by, &bz, &cx, &cy, &cz);
ax += 0.01 * ax;
ax += 0.005*ax;
cell_set_cartesian(cell, ax, ay, az, bx, by, bz, cx, cy, cz);
}
/* For each reflection in "partial", fill in what the intensity would be
* according to "full" */
static void calculate_partials(RefList *partial, double osf,
......@@ -112,6 +113,7 @@ int main(int argc, char *argv[])
struct quaternion orientation;
struct image image;
FILE *ofh;
UnitCell *new;
/* Long options */
const struct option longopts[] = {
......@@ -252,12 +254,17 @@ int main(int argc, char *argv[])
reflist_free(image.reflections);
/* Alter the cell by a tiny amount */
mess_up_cell(image.indexed_cell);
image.filename = "(simulated 2)";
new = rotate_cell(cell, deg2rad(0.001), deg2rad(0.0), 0.0);
cell_free(image.indexed_cell);
image.indexed_cell = new;
/* Write another chunk */
/* Calculate new partials */
image.reflections = find_intersections(&image, image.indexed_cell, 0);
calculate_partials(image.reflections, 0.5, full, sym);
/* Give a slightly incorrect cell in the stream */
//mess_up_cell(image.indexed_cell);
write_chunk(ofh, &image, STREAM_INTEGRATED);
reflist_free(image.reflections);
......
......@@ -136,43 +136,12 @@ static void refine_all(struct image *images, int n_total_patterns,
qargs.task_defaults = task_defaults;
qargs.n = 0;
qargs.n_done = 0;
qargs.n_total_patterns = n_total_patterns;
qargs.images = images;
/* FIXME: Not refining the first image, for now */
qargs.n_total_patterns = n_total_patterns-1;
qargs.images = images+1;
run_threads(nthreads, refine_image, get_image, done_image,
&qargs, n_total_patterns, 0, 0, 0);
}
/* Decide which reflections can be scaled */
static void select_scalable_reflections(struct image *images, int n)
{
int m;
int n_scalable = 0;
for ( m=0; m<n; m++ ) {
Reflection *refl;
RefListIterator *iter;
for ( refl = first_refl(images[m].reflections, &iter);
refl != NULL;
refl = next_refl(refl, iter) ) {
int scalable = 1;
double v;
if ( get_partiality(refl) < 0.1 ) scalable = 0;
v = fabs(get_intensity(refl));
if ( v < 0.1 ) scalable = 0;
set_scalable(refl, scalable);
if ( scalable ) n_scalable++;
}
}
STATUS("%i reflections selected as scalable.\n", n_scalable);
&qargs, n_total_patterns-1, 0, 0, 0);
}
......@@ -369,7 +338,6 @@ int main(int argc, char *argv[])
/* Make initial estimates */
STATUS("Performing initial scaling.\n");
select_scalable_reflections(images, n_total_patterns);
full = scale_intensities(images, n_usable_patterns, sym, obs, cref);
/* Iterate */
......@@ -401,7 +369,6 @@ int main(int argc, char *argv[])
/* Re-estimate all the full intensities */
reflist_free(full);
select_scalable_reflections(images, n_usable_patterns);
full = scale_intensities(images, n_usable_patterns,
sym, obs, cref);
......
......@@ -29,15 +29,13 @@
/* Maximum number of iterations of NLSq to do for each image per macrocycle. */
#define MAX_CYCLES (100)
#define MAX_CYCLES (10)
/* Refineable parameters */
enum {
REF_SCALE,
REF_DIV,
REF_R,
REF_ASX,
NUM_PARAMS,
REF_BSX,
REF_CSX,
REF_ASY,
......@@ -46,7 +44,9 @@ enum {
REF_ASZ,
REF_BSZ,
REF_CSZ,
NUM_PARAMS
REF_SCALE,
REF_DIV,
REF_R,
};
......@@ -321,7 +321,7 @@ static double pr_iterate(struct image *image, const RefList *full,
}
}
show_matrix_eqn(M, v, NUM_PARAMS);
//show_matrix_eqn(M, v, NUM_PARAMS);
shifts = gsl_vector_alloc(NUM_PARAMS);
gsl_linalg_HH_solve(M, v, shifts);
......@@ -341,10 +341,77 @@ static double pr_iterate(struct image *image, const RefList *full,
}
static double mean_partial_dev(struct image *image,
const RefList *full, const char *sym)
{
double dev = 0.0;
/* For each reflection */
Reflection *refl;
RefListIterator *iter;
for ( refl = first_refl(image->reflections, &iter);
refl != NULL;
refl = next_refl(refl, iter) ) {
double G, p;
signed int h, k, l;
Reflection *full_version;
double I_full, I_partial;
if ( !get_scalable(refl) ) continue;
get_indices(refl, &h, &k, &l);
G = image->osf;
full_version = find_refl(full, h, k, l);
assert(full_version != NULL);
p = get_partiality(refl);
I_partial = get_intensity(refl);
I_full = get_intensity(full_version);
//STATUS("%3i %3i %3i %5.2f %5.2f %5.2f %5.2f\n",
// h, k, l, G, p, I_partial, I_full);
dev += pow(I_partial - p*G*I_full, 2.0);
}
return dev;
}
void pr_refine(struct image *image, const RefList *full, const char *sym)
{
double max_shift;
int i;
double ax, ay, az;
double bx, by, bz;
double cx, cy, cz;
UnitCell *cell = image->indexed_cell;
double shval, origval;
cell_get_cartesian(cell, &ax, &ay, &az, &bx, &by, &bz, &cx, &cy, &cz);
shval = 0.001*ax;
origval = ax;
for ( i=-10; i<=10; i++ ) {
double dev;
cell_get_cartesian(cell, &ax, &ay, &az, &bx,
&by, &bz, &cx, &cy, &cz);
ax = origval + (double)i*shval;
cell_set_cartesian(cell, ax, ay, az, bx, by, bz, cx, cy, cz);
update_partialities_and_asymm(image, sym,
NULL, NULL, NULL, NULL);
dev = mean_partial_dev(image, full, sym);
STATUS("%i %e %e\n", i, ax, dev);
}
return;
i = 0;
do {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment