Anyone here familiar with the KiFMM code?

I don't know what's going on. My MWE below works with no problems. But my actual program gets segfaults depending on the location of where I set the breaks and jumps. If I set breaks and jumps at each of the PETSc functions, I still get a segfault with the VecRestoreArray at the end

#include<vector>
#include<iostream>
#include <cstdlib>
#include "compute.h"
#include <omp.h>
#include <mpi.h>
#include <time.h>
#include <random>
#include <string>
#include <sstream>
#include <fstream>
#include <unistd.h>
#include <petsc.h>
#include <petscsys.h>


using namespace std;


int run(int argc, char* argv[]);

int main(int argc, char* argv[])
{
    cout<<"main"<<endl;

run(argc, argv);
    cout<<"finished"<<endl;
return 0;
}


void Solve::compute()
{
Vec x;
      Vec b;
      set(b, x);
}


int Solve::computeFor(vector<Ball*> &List, int u, vector<double> &valueVector, double &Ptr)
{

unsigned int num = List.size();
vector< vector<double > > *pos = new vector< vector<double > >(2, vector<double>(num)); //pos is pointer to 2d-array
for (int i=0; i<num; i++) {
        (*pos)[0][i] = List[i]->mp[0];
        (*pos)[1][i] = List[i]->mp[1];
}


double tmp[16]={0};
double trg[16]={0};
double cnt[16]={0};
for (int i=0; i<num; i++) 
{
    tmp[i*2]=(*pos)[0][i];
    tmp[(i*2)+1]=(*pos)[1][i];
    }
    for (int i=0; i<num*2; i++) 
    {
        cnt[i] =.5*tmp[i];
    }
    double total = 0.0;
    for (int i=0; i<num*2; i++) 
    {
        trg[i] +=cnt[i];
        total +=trg[i];
    }
//below is just dummy code to get computeFor to run long enough to see speedup when parallelizing code
default_random_engine generator(u);
uniform_int_distribution<int> uDistribution;

int res = 0;
for (int i = 0; i < 10000000; i++) {
    res = uDistribution(generator);
}

    return res;

}

int run(int argc, char* argv[])
{
    cout<<"argc is "<<argc<<endl;
    for (int i=0; i<argc; i++)
    {cout<<i<<":"<<argv[i]<<"\t";}
    cout<<endl;


    PetscMPIInt    rank,size;
    PetscInitialize(&argc,&argv,0,0);
    MPI_Comm_size(PETSC_COMM_WORLD,&size);
    MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
    PetscPrintf(PETSC_COMM_WORLD,"Number of processors = %d, rank = %d\n",size,rank);
    PetscSynchronizedPrintf(PETSC_COMM_WORLD,"synchronized rank = %d\n",rank);
    PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
    MPI_Barrier (PETSC_COMM_WORLD);

Solve* slv = new Solve();

slv->computeLine(rank+1);
return 0;
}

void Solve::computeLine(int v)
{
     for (int j=1; j<3; j++)
     {
           if (j%v==0)
           {
                 compute();
           }
     }
}

void Solve::set(Vec & b, Vec & x) {

std::vector <Ball*> List;
for (int u=0; u<4; u++)
{   
    vector<double> newsamp;
    default_random_engine generator;
    uniform_real_distribution<double> uDistribution;
    double news;
    cout<<"news: "<<endl;
    for (int j=0; j<16; j++)
    {
        news = uDistribution(generator);
        cout<<j<<": "<<news<<", ";
        newsamp.push_back(news);
    }

    for (int i=0; i<8; i++) 
        { 
        Ball * dupBall = new Ball;
        List.push_back(dupBall);
        List[i]->mp[0]=newsamp[2*i];
        List[i]->mp[1]=newsamp[2*i+1];
        //delete dupBall;
    }
    List.clear();
    cout<<endl<<endl;
    }

      unsigned int Num = List.size();
      DblNumMat * aPos = new DblNumMat(3, Num);//DblNumMat is a matrix of doubles, from KiFMM
       DblNumMat * aNor = new DblNumMat(3, Num);
      for (int i=0; i<Num; i++) {
          (*aPos)(0,i) = List[i]->mp[0];
          (*aPos)(1,i) = List[i]->mp[1];
          (*aPos)(2,i) = List[i]->mp[2];
          (*aNor)(0,i) = 0.0;
          (*aNor)(1,i) = 0.0;
          (*aNor)(2,i) = 0.0;
       }
int NumTrg=2000;

      PetscScalar* barray;
      VecCreate(PETSC_COMM_WORLD,&b);
      VecSetSizes(b,PETSC_DECIDE,NumTrg);
      VecSetFromOptions(b);
      VecGetArray(b,&barray);//Returns a pointer to a contiguous array that contains this processor's portion 
            //of the vector data.
 }
/r/cpp_questions Thread Parent