Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Geometric information parameterization #584

Closed
daxiaojiupang opened this issue Sep 18, 2024 · 15 comments
Closed

Geometric information parameterization #584

daxiaojiupang opened this issue Sep 18, 2024 · 15 comments

Comments

@daxiaojiupang
Copy link

daxiaojiupang commented Sep 18, 2024

Expected Behavior

1
2

Actual Behavior

My purpose is to enter the detector coordinates directly in the.m file, DetectorXYZ is a one-dimensional array that stores the detector coordinates, the modified code is as follows. The orthographic image calculated when a single Angle is entered is the first, and the orthographic image calculated when the input projection Angle is an array is the second. The projection Angle of the first image and the second image is the same, the calculated orthographic image is inconsistent, is there anything in the code that has not been modified? Please help me check.

Code to reproduce the problem (If applicable)

__device__ Point3D projParamsArrayDev[MAX_SIZE*MAX_SIZE];  // Dev means it is on device
__device__ Point3D D_XYZ[MAX_SIZE*MAX_SIZE];
__global__ void vecAddInPlace(float *a, float *b, unsigned long  n)
{
    int idx = blockIdx.x*blockDim.x+threadIdx.x;
    // Make sure we do not go out of bounds
    if (idx < n)
        a[idx] = a[idx] + b[idx];
}

__global__ void kernelPixelDetector(int npixelXYZ,
        Geometry geo,
        float* detector,
        const int currProjSetNumber,
        const int totalNoOfProjections,
        cudaTextureObject_t tex){
    
    unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y;
    unsigned long long projNumber=threadIdx.z;
    
    
    if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
        return;
    
#if IS_FOR_MATLAB_NUCTECH
    size_t idx =  (size_t)(u * (unsigned long long)geo.nDetecV+ v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU;
#else
    size_t idx =  (size_t)(v * (unsigned long long)geo.nDetecU+ u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU;
#endif
    unsigned long indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber;  // This is the ABSOLUTE projection number in the projection array (for a given GPU)

    if(indAlpha>=totalNoOfProjections)
        return;
    for (int k = 0 ; k<npixelXYZ; k++) {
    D_XYZ[k] = projParamsArrayDev[(npixelXYZ+1)*projNumber+k];  // 6*projNumber because we have 6 Point3D values per projection
    }
    Point3D source = projParamsArrayDev[(npixelXYZ+1)*projNumber+npixelXYZ];
    
    /////// Get coordinates XYZ of pixel UV
    unsigned long pixelV = v;
    unsigned long pixelU = u;
    Point3D pixel1D;
    pixel1D.x=D_XYZ[v * (unsigned long long)(geo.nDetecU+1) + u].x;
    pixel1D.y=D_XYZ[v * (unsigned long long)(geo.nDetecU+1) + u].y;
    pixel1D.z=D_XYZ[v * (unsigned long long)(geo.nDetecU+1)+ u].z;
    ///////
}
//***********//
int siddon_ray_projection(float*  img, Geometry geo, float** result,float const * const angles,int nangles,
        int npixelXYZ, float* DetectorXYZ, const GpuIds& gpuids){
 for (dev=0;dev<deviceCount;dev++){
                cudaSetDevice(gpuids[dev]);
                
                for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
                    proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
                    if (proj_global>=nangles)
                        break;
                    if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
                        break;
                    geoArray[sp].alpha=angles[proj_global*3];
                    geoArray[sp].theta=angles[proj_global*3+1];
                    geoArray[sp].psi  =angles[proj_global*3+2];
                    
                   for (int k=0 ; k<npixelXYZ; k++) {
                       DetecXYZ[k].x = DetectorXYZ[k*3];
                       DetecXYZ[k].y = DetectorXYZ[k*3+1];
                       DetecXYZ[k].z = DetectorXYZ[k*3+2];
                   }
                    
                    //precomute distances for faster execution
                    //Precompute per angle constant stuff for speed
                    computeDeltas_Siddon(geoArray[sp],proj_global,npixelXYZ,DetecXYZ, &source);
                    //Ray tracing!
                    for (int k=0;k<npixelXYZ; k++) { 
                     projParamsArrayHost[(npixelXYZ+1)*j+k]=DetecXYZ[k];	
                    }
                    projParamsArrayHost[(npixelXYZ+1)*j+npixelXYZ]=source;
                    
                }
                cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*(npixelXYZ+1)*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*2]);
                cudaStreamSynchronize(stream[dev*2]);
                cudaCheckErrors("kernel fail");
                kernelPixelDetector<<<grid,block,0,stream[dev*2]>>>(npixelXYZ, geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
            }
}

void computeDeltas_Siddon(Geometry geo,int i, int npixelXYZ,Point3D* DetecXYZ,Point3D* source){
    Point3D S;
    S.x=geo.DSO[i];
    S.y=0;
    S.z=0;
    //End point
    for(int k=0; k<npixelXYZ; k++) 
{
  eulerZYZ(geo,&DetecXYZ[k]);
  DetecXYZ[k].x  =DetecXYZ[k].x-geo.offOrigX[i];     DetecXYZ[k].y  =DetecXYZ[k].y-geo.offOrigY[i];     DetecXYZ[k].z  =DetecXYZ[k].z-geo.offOrigZ[i];
  DetecXYZ[k].x  =DetecXYZ[k].x+geo.sVoxelX/2;      DetecXYZ[k].y  =DetecXYZ[k].y+geo.sVoxelY/2;          DetecXYZ[k].z  =DetecXYZ[k].z  +geo.sVoxelZ/2;
  DetecXYZ[k].x  =DetecXYZ[k].x/geo.dVoxelX;      DetecXYZ[k].y  =DetecXYZ[k].y/geo.dVoxelY;        DetecXYZ[k].z  =DetecXYZ[k].z/geo.dVoxelZ;
}
    
    eulerZYZ(geo,&S);
    //2: Offset image (instead of offseting image, -offset everything else)
    // NB: do not apply offsets to Pfinalu0 and Pfinalv0: they are directions, and are invariant through translations
    S.x=S.x-geo.offOrigX[i];               S.y=S.y-geo.offOrigY[i];               S.z=S.z-geo.offOrigZ[i];
    // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
    S.x      =S.x+geo.sVoxelX/2;          S.y      =S.y+geo.sVoxelY/2;              S.z      =S.z      +geo.sVoxelZ/2;
    //4. Scale everything so dVoxel==1
    S.x      =S.x/geo.dVoxelX;          S.y      =S.y/geo.dVoxelY;            S.z      =S.z/geo.dVoxelZ;
    *source=S;
}

matlab code:

clear;
close all;

%% Define Geometry
% 
% Distances
    geo.DSD = 1536;                             % Distance Source Detector      (mm)
    geo.DSO = 1000;                             % Distance Source Origin        (mm)
    % Detector parameters
    geo.nDetector=[128;128];					% number of pixels              (px)
    geo.sDetector=[512*0.8;512*0.8];            % total size of the detector    (mm)
    geo.dDetector=geo.sDetector./geo.nDetector;	%3.2			% size of each pixel            (mm)

    % Image parameters
    geo.nVoxel=[128;128;128];                          % number of voxels              (vx)
    geo.sVoxel=[256;256;256];                   % total size of the image       (mm)
    geo.dVoxel=geo.sVoxel./geo.nVoxel;          % size of each voxel            (mm)
    % Offsets
    geo.offOrigin =[0;0;0];                     % Offset of image from origin   (mm)
    geo.offDetector=[0; 0];                     % Offset of Detector            (mm)
                  
%      P.y  = geo.dDetecU*(-((double)geo.nDetecU/2.0)+0.5);       P.z  = geo.dDetecV*(((double)geo.nDetecV/2.0)-0.5);
   %
side_length =geo.sDetector(1);
center_x = -(geo.DSD-geo.DSO);

half_side =side_length/2;
top_left_y = -geo.dDetector(1)*((128/2.0)+0.5); % 
top_left_z = geo.dDetector(2)*((128/2.0)-0.5);  % 

y_range = -half_side:(geo.dDetector(1)):half_side;   %
z_range = half_side:-(geo.dDetector(2)):-half_side; % 

% 
num_points = length(y_range) * length(z_range);
x_coords = zeros(1, num_points);
y_coords = zeros(1, num_points);
z_coords = zeros(1, num_points);

% 
index = 1;
for i = 1:length(z_range)
    for j = 1:length(y_range)
        x_coords(index) = center_x;
        y_coords(index) = y_range(j);
        z_coords(index) = z_range(i);
        index = index + 1;
    end
end
X=x_coords;
Y=y_coords;
Z=z_coords;
DetectorXYZ=[X;Y;Z];    
angles=linspace(0,2*pi,4);
%angles=[2.0944];
shepp=sheppLogan3D(geo.nVoxel); 
FP   =Ax(shepp,geo,angles,DetectorXYZ,'Siddon');
plotProj(FP,angles,'Slice',2);

Specifications

  • MATLAB/python version:
  • OS:
  • CUDA version:
@AnderBiguri
Copy link
Member

Heya!

Questions:

1- Why are you doing this?
2- The images you shown, are they expected vs current result?

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 18, 2024 via email

@AnderBiguri
Copy link
Member

Hi @daxiaojiupang You need to explain that with a little bit more detail, your second point is not very understandable. Can you please explain better what the differences are?

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 18, 2024 via email

@AnderBiguri
Copy link
Member

So both of the examples here are from your code, not the basic TIGRE code?

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 18, 2024 via email

@AnderBiguri
Copy link
Member

In any case, these things are not very trivial to debug. I suggest changing the outputs of your codes to e.g. output the index of the detector rather than the integral, so you can see if the line is being computed, at least. That way you know if there is an issue in function calls or in numerical values.

This could be many things. A possibility could be e.g. that you are copying the memory out before syncronizing the stream, for example, but you didn't post the entire code, so I can't know for sure.

It could also be npixelXYZ overflow. Not sure what this is supposed to be, but if its the number of pixels in the detector (you already have access to this value in the geometry!) then an int won't cut it, likely. You want a size_t or a unsigned long long.

It could also be bad use of symbols. You seem to be copying all detector locations into a symbol? They are not created for this big amount of data, even if it may work. I would instead put them in a constant memory array on the device, and then read this array.

Or something else, who knows, these things are quite hard to debug. Try any of the above and let me know.

However, if you are looking to add support to curved detectors (where pixel distances to the source are equal) there are easier ways than this. Crucially, its better to either "flatten" the projections, or instead just make the projector code assume equal distances and compute the location of the pixels accordingly, with no need to input any memory to the GPU.

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 18, 2024 via email

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 27, 2024 via email

@AnderBiguri
Copy link
Member

hi @daxiaojiupang , I can't see any figures. Can you also edit the post so its a bit more readable (code blocks etc) please?

@AnderBiguri
Copy link
Member

AnderBiguri commented Sep 28, 2024 via email

@AnderBiguri
Copy link
Member

AnderBiguri commented Sep 28, 2024 via email

@daxiaojiupang
Copy link
Author

@AnderBiguri
Yes, the complete code is very complex. I would like to ask you guys to help me see if there is something wrong with the way I am thinking about writing this code. My original idea was to find the values of u and v in the voxel backprojection file by defining four arrays in the .m file, two arrays storing the left and right y values for each pixel in the first row of the flat detector, and two arrays storing the top and bottom z values for each pixel in the first column of the flat detector, and then using these four arrays to find the values of u and v based on the y and z values computed from the original code, are there any other There are four arrays that are used to find the values of u and v based on the y and z values calculated by the original code, and to find out if there is anything else that needs to be changed.

However, in any case, I'm not sure I can debug a highly complex code just with a little piece of it and a figure... Ander

On Sat, 28 Sept 2024, 12:46 Ander Biguri, @.> wrote: Hi, I see. Apologies for insisting, but it would be easier if you put these in the github issue rather than replying to the email, as they seem to be not being posted there. I will lose track of information if it's not in the same place. Thanks On Sat, 28 Sept 2024, 11:52 daxiaojiupang, @.> wrote: > @AnderBiguri > Thanks for your help, through repeated adjustments finally solved the > problem caused by the array passed from the cpu to the GPU memory > allocation error. > However, I have encountered the following problems, and hope to get your > help. In the backprojection calculation, I store the left and right y > values of each pixel in the first row in two arrays, and the upper and > lower Z values of each pixel in the first column in two arrays, which is > used to compute u and v. The following changes are made to > voxel_backprojection. Figure 1 is the result of the original code. Figure 2 > shows the result of the modified code. May I ask what other aspects I have > not considered to make modifications? I am looking forward to your reply. > > > ////Original code////////////////// > // Get the coordinates in the detector UV where the mid point of the > voxel is projected. > float t=__fdividef(DSO-DSD-S.x,vectX); > float y,z; > y=vectYt+S.y; > z=vectZt+S.z; > float u,v; > u=y+(float)geo.nDetecU0.5f; > v=z+(float)geo.nDetecV0.5f; > > voxelColumn[colIdx]+=tex3D(tex, v, u ,indAlpha+0.5f)weight; > > ////Modified code///////////////// > // Get the coordinates in the detector UV where the mid point of the > voxel is projected. > float t=__fdividef(DSO-DSD-S.x,vectX); > float y,z; > y=vectYt+S.y; > z=vectZt+S.z; > int u = -1; > float uFrac = 0.0f; // Store the floating point u after interpolation > //i represents the index of the I-th pixel of the first row of pixels. > //The U_left[i] array stores the y coordinate to the left of the pixel, > //and the U_left[i] array stores the y coordinate to the right of the > pixel. > for (int i = 0; i < geo.nDetecU; i++) { > if (y >= U_left[i] && y <= U_right[i]) { > u = i; > // The relative position of y in this interval is calculated and > interpolated > float range = U_right[i] - U_left[i]; // Interval length > if (range > 0.0f) { > uFrac = (y - U_left[i]) / range; // Relative position, between 0.0 and > 1.0 > } > break; > } > } > if (u == -1) { > // Coordinates are out of range. Skip it > continue; > } > int v = -1; > float vFrac = 0.0f; // Stores the floating point v after interpolation > //i represents the index of the I-th pixel of the first column pixel, the > z coordinate of the upper side of the pixel in the V_top[i] array store, > //and the z coordinate of the lower side of the pixel in the U_bot[i] > array store. > for (int i = 0; i < geo.nDetecV; i++) { > if (z >= V_bot[i] && z <= V_top[i]) { > v = i; > //The relative position of z in the interval is calculated and > interpolated > float range = V_bot[i] - V_top[i]; // Interval length > if (range > 0.0f) { > vFrac = (z - V_bot[i]) / range; // Relative position, between 0.0 and 1.0 > } > break; > } > } > if (v == -1) { > // Z-coordinates are out of range. Skip it > continue; > } > // Compute floating point u and v for texture sampling > float u_float = (float)u + uFrac; > float v_float = (float)v + vFrac; > > //Texture sampling using interpolated u_float and v_float > voxelColumn[colIdx] += tex3D(tex, u_float, v_float, indAlpha + > 0.5f) * weight; > > Fig1 > > > Fig2 > > > > > > > > > > > > > > > > > > > > > > — > Reply to this email directly, view it on GitHub > <#584 (comment)>, or > unsubscribe > https://github.com/notifications/unsubscribe-auth/AC2OENF6OJU7XMAS3YRB3PTZY2C7RAVCNFSM6AAAAABOMWMCIKVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMZDGNZYGU2TIOJRG4 > . > You are receiving this because you were mentioned.Message ID: > @.**> >

@AnderBiguri
Copy link
Member

In principle what you are saying is fine of course... but the devil is in the details I guess...

@daxiaojiupang
Copy link
Author

daxiaojiupang commented Sep 30, 2024 via email

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants